]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9-3.2.9-201203012153.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-3.2.9-201203012153.patch
CommitLineData
ebd0c7c2
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index dfa6fc6..0095943 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -5,6 +5,7 @@
6 *.cis
7 *.cpio
8 *.csp
9+*.dbg
10 *.dsp
11 *.dvi
12 *.elf
13@@ -14,6 +15,7 @@
14 *.gcov
15 *.gen.S
16 *.gif
17+*.gmo
18 *.grep
19 *.grp
20 *.gz
21@@ -48,9 +50,11 @@
22 *.tab.h
23 *.tex
24 *.ver
25+*.vim
26 *.xml
27 *.xz
28 *_MODULES
29+*_reg_safe.h
30 *_vga16.c
31 *~
32 \#*#
33@@ -70,6 +74,7 @@ Kerntypes
34 Module.markers
35 Module.symvers
36 PENDING
37+PERF*
38 SCCS
39 System.map*
40 TAGS
41@@ -93,19 +98,24 @@ bounds.h
42 bsetup
43 btfixupprep
44 build
45+builtin-policy.h
46 bvmlinux
47 bzImage*
48 capability_names.h
49 capflags.c
50 classlist.h*
51+clut_vga16.c
52+common-cmds.h
53 comp*.log
54 compile.h*
55 conf
56 config
57 config-*
58 config_data.h*
59+config.c
60 config.mak
61 config.mak.autogen
62+config.tmp
63 conmakehash
64 consolemap_deftbl.c*
65 cpustr.h
66@@ -119,6 +129,7 @@ dslm
67 elf2ecoff
68 elfconfig.h*
69 evergreen_reg_safe.h
70+exception_policy.conf
71 fixdep
72 flask.h
73 fore200e_mkfirm
74@@ -126,12 +137,15 @@ fore200e_pca_fw.c*
75 gconf
76 gconf.glade.h
77 gen-devlist
78+gen-kdb_cmds.c
79 gen_crc32table
80 gen_init_cpio
81 generated
82 genheaders
83 genksyms
84 *_gray256.c
85+hash
86+hid-example
87 hpet_example
88 hugepage-mmap
89 hugepage-shm
90@@ -146,7 +160,7 @@ int32.c
91 int4.c
92 int8.c
93 kallsyms
94-kconfig
95+kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99@@ -154,7 +168,6 @@ kxgettext
100 lkc_defs.h
101 lex.c
102 lex.*.c
103-linux
104 logo_*.c
105 logo_*_clut224.c
106 logo_*_mono.c
107@@ -166,14 +179,15 @@ machtypes.h
108 map
109 map_hugetlb
110 maui_boot.h
111-media
112 mconf
113+mdp
114 miboot*
115 mk_elfconfig
116 mkboot
117 mkbugboot
118 mkcpustr
119 mkdep
120+mkpiggy
121 mkprep
122 mkregtable
123 mktables
124@@ -209,6 +223,7 @@ r300_reg_safe.h
125 r420_reg_safe.h
126 r600_reg_safe.h
127 recordmcount
128+regdb.c
129 relocs
130 rlim_names.h
131 rn50_reg_safe.h
132@@ -219,6 +234,7 @@ setup
133 setup.bin
134 setup.elf
135 sImage
136+slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140@@ -229,6 +245,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144+user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148@@ -246,7 +263,9 @@ vmlinux
149 vmlinux-*
150 vmlinux.aout
151 vmlinux.bin.all
152+vmlinux.bin.bz2
153 vmlinux.lds
154+vmlinux.relocs
155 vmlinuz
156 voffset.h
157 vsyscall.lds
158@@ -254,9 +273,11 @@ vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162+utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168+zconf.lex.c
169 zoffset.h
170diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
171index 81c287f..d456d02 100644
172--- a/Documentation/kernel-parameters.txt
173+++ b/Documentation/kernel-parameters.txt
174@@ -1935,6 +1935,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
175 the specified number of seconds. This is to be used if
176 your oopses keep scrolling off the screen.
177
178+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
179+ virtualization environments that don't cope well with the
180+ expand down segment used by UDEREF on X86-32 or the frequent
181+ page table updates on X86-64.
182+
183+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
184+
185 pcbit= [HW,ISDN]
186
187 pcd. [PARIDE]
188diff --git a/Makefile b/Makefile
189index 5f1739b..1831396 100644
190--- a/Makefile
191+++ b/Makefile
192@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
193
194 HOSTCC = gcc
195 HOSTCXX = g++
196-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
197-HOSTCXXFLAGS = -O2
198+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
199+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
200+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
201
202 # Decide whether to build built-in, modular, or both.
203 # Normally, just do built-in.
204@@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
205 # Rules shared between *config targets and build targets
206
207 # Basic helpers built in scripts/
208-PHONY += scripts_basic
209-scripts_basic:
210+PHONY += scripts_basic gcc-plugins
211+scripts_basic: gcc-plugins
212 $(Q)$(MAKE) $(build)=scripts/basic
213 $(Q)rm -f .tmp_quiet_recordmcount
214
215@@ -564,6 +565,48 @@ else
216 KBUILD_CFLAGS += -O2
217 endif
218
219+ifndef DISABLE_PAX_PLUGINS
220+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
221+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
222+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
223+endif
224+ifdef CONFIG_PAX_MEMORY_STACKLEAK
225+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
226+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
227+endif
228+ifdef CONFIG_KALLOCSTAT_PLUGIN
229+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
230+endif
231+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
232+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
233+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
234+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
235+endif
236+ifdef CONFIG_CHECKER_PLUGIN
237+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
238+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
239+endif
240+endif
241+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS) $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS)
242+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
243+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
244+ifeq ($(KBUILD_EXTMOD),)
245+gcc-plugins:
246+ $(Q)$(MAKE) $(build)=tools/gcc
247+else
248+gcc-plugins: ;
249+endif
250+else
251+gcc-plugins:
252+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
253+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
254+else
255+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
256+endif
257+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
258+endif
259+endif
260+
261 include $(srctree)/arch/$(SRCARCH)/Makefile
262
263 ifneq ($(CONFIG_FRAME_WARN),0)
264@@ -708,7 +751,7 @@ export mod_strip_cmd
265
266
267 ifeq ($(KBUILD_EXTMOD),)
268-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
269+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
270
271 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
272 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
273@@ -932,6 +975,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
274
275 # The actual objects are generated when descending,
276 # make sure no implicit rule kicks in
277+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
278+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
279 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
280
281 # Handle descending into subdirectories listed in $(vmlinux-dirs)
282@@ -941,7 +986,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
283 # Error messages still appears in the original language
284
285 PHONY += $(vmlinux-dirs)
286-$(vmlinux-dirs): prepare scripts
287+$(vmlinux-dirs): gcc-plugins prepare scripts
288 $(Q)$(MAKE) $(build)=$@
289
290 # Store (new) KERNELRELASE string in include/config/kernel.release
291@@ -985,6 +1030,7 @@ prepare0: archprepare FORCE
292 $(Q)$(MAKE) $(build)=.
293
294 # All the preparing..
295+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
296 prepare: prepare0
297
298 # Generate some files
299@@ -1086,6 +1132,8 @@ all: modules
300 # using awk while concatenating to the final file.
301
302 PHONY += modules
303+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
304+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
305 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
306 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
307 @$(kecho) ' Building modules, stage 2.';
308@@ -1101,7 +1149,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
309
310 # Target to prepare building external modules
311 PHONY += modules_prepare
312-modules_prepare: prepare scripts
313+modules_prepare: gcc-plugins prepare scripts
314
315 # Target to install modules
316 PHONY += modules_install
317@@ -1198,6 +1246,7 @@ distclean: mrproper
318 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
319 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
320 -o -name '.*.rej' \
321+ -o -name '.*.rej' -o -name '*.so' \
322 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
323 -type f -print | xargs rm -f
324
325@@ -1358,6 +1407,8 @@ PHONY += $(module-dirs) modules
326 $(module-dirs): crmodverdir $(objtree)/Module.symvers
327 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
328
329+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
330+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
331 modules: $(module-dirs)
332 @$(kecho) ' Building modules, stage 2.';
333 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
334@@ -1484,17 +1535,21 @@ else
335 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
336 endif
337
338-%.s: %.c prepare scripts FORCE
339+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
340+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
341+%.s: %.c gcc-plugins prepare scripts FORCE
342 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
343 %.i: %.c prepare scripts FORCE
344 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
345-%.o: %.c prepare scripts FORCE
346+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
347+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
348+%.o: %.c gcc-plugins prepare scripts FORCE
349 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
350 %.lst: %.c prepare scripts FORCE
351 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
352-%.s: %.S prepare scripts FORCE
353+%.s: %.S gcc-plugins prepare scripts FORCE
354 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
355-%.o: %.S prepare scripts FORCE
356+%.o: %.S gcc-plugins prepare scripts FORCE
357 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
358 %.symtypes: %.c prepare scripts FORCE
359 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
360@@ -1504,11 +1559,15 @@ endif
361 $(cmd_crmodverdir)
362 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
363 $(build)=$(build-dir)
364-%/: prepare scripts FORCE
365+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
366+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
367+%/: gcc-plugins prepare scripts FORCE
368 $(cmd_crmodverdir)
369 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
370 $(build)=$(build-dir)
371-%.ko: prepare scripts FORCE
372+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
373+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
374+%.ko: gcc-plugins prepare scripts FORCE
375 $(cmd_crmodverdir)
376 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
377 $(build)=$(build-dir) $(@:.ko=.o)
378diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
379index 640f909..48b6597 100644
380--- a/arch/alpha/include/asm/atomic.h
381+++ b/arch/alpha/include/asm/atomic.h
382@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
383 #define atomic_dec(v) atomic_sub(1,(v))
384 #define atomic64_dec(v) atomic64_sub(1,(v))
385
386+#define atomic64_read_unchecked(v) atomic64_read(v)
387+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
388+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
389+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
390+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
391+#define atomic64_inc_unchecked(v) atomic64_inc(v)
392+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
393+#define atomic64_dec_unchecked(v) atomic64_dec(v)
394+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
395+
396 #define smp_mb__before_atomic_dec() smp_mb()
397 #define smp_mb__after_atomic_dec() smp_mb()
398 #define smp_mb__before_atomic_inc() smp_mb()
399diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
400index da5449e..7418343 100644
401--- a/arch/alpha/include/asm/elf.h
402+++ b/arch/alpha/include/asm/elf.h
403@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
404
405 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
406
407+#ifdef CONFIG_PAX_ASLR
408+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
409+
410+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
411+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
412+#endif
413+
414 /* $0 is set by ld.so to a pointer to a function which might be
415 registered using atexit. This provides a mean for the dynamic
416 linker to call DT_FINI functions for shared libraries that have
417diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
418index de98a73..bd4f1f8 100644
419--- a/arch/alpha/include/asm/pgtable.h
420+++ b/arch/alpha/include/asm/pgtable.h
421@@ -101,6 +101,17 @@ struct vm_area_struct;
422 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
423 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
424 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
425+
426+#ifdef CONFIG_PAX_PAGEEXEC
427+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
428+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
429+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
430+#else
431+# define PAGE_SHARED_NOEXEC PAGE_SHARED
432+# define PAGE_COPY_NOEXEC PAGE_COPY
433+# define PAGE_READONLY_NOEXEC PAGE_READONLY
434+#endif
435+
436 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
437
438 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
439diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
440index 2fd00b7..cfd5069 100644
441--- a/arch/alpha/kernel/module.c
442+++ b/arch/alpha/kernel/module.c
443@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
444
445 /* The small sections were sorted to the end of the segment.
446 The following should definitely cover them. */
447- gp = (u64)me->module_core + me->core_size - 0x8000;
448+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
449 got = sechdrs[me->arch.gotsecindex].sh_addr;
450
451 for (i = 0; i < n; i++) {
452diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
453index 01e8715..be0e80f 100644
454--- a/arch/alpha/kernel/osf_sys.c
455+++ b/arch/alpha/kernel/osf_sys.c
456@@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
457 /* At this point: (!vma || addr < vma->vm_end). */
458 if (limit - len < addr)
459 return -ENOMEM;
460- if (!vma || addr + len <= vma->vm_start)
461+ if (check_heap_stack_gap(vma, addr, len))
462 return addr;
463 addr = vma->vm_end;
464 vma = vma->vm_next;
465@@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
466 merely specific addresses, but regions of memory -- perhaps
467 this feature should be incorporated into all ports? */
468
469+#ifdef CONFIG_PAX_RANDMMAP
470+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
471+#endif
472+
473 if (addr) {
474 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
475 if (addr != (unsigned long) -ENOMEM)
476@@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
477 }
478
479 /* Next, try allocating at TASK_UNMAPPED_BASE. */
480- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
481- len, limit);
482+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
483+
484 if (addr != (unsigned long) -ENOMEM)
485 return addr;
486
487diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
488index fadd5f8..904e73a 100644
489--- a/arch/alpha/mm/fault.c
490+++ b/arch/alpha/mm/fault.c
491@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
492 __reload_thread(pcb);
493 }
494
495+#ifdef CONFIG_PAX_PAGEEXEC
496+/*
497+ * PaX: decide what to do with offenders (regs->pc = fault address)
498+ *
499+ * returns 1 when task should be killed
500+ * 2 when patched PLT trampoline was detected
501+ * 3 when unpatched PLT trampoline was detected
502+ */
503+static int pax_handle_fetch_fault(struct pt_regs *regs)
504+{
505+
506+#ifdef CONFIG_PAX_EMUPLT
507+ int err;
508+
509+ do { /* PaX: patched PLT emulation #1 */
510+ unsigned int ldah, ldq, jmp;
511+
512+ err = get_user(ldah, (unsigned int *)regs->pc);
513+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
514+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
515+
516+ if (err)
517+ break;
518+
519+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
520+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
521+ jmp == 0x6BFB0000U)
522+ {
523+ unsigned long r27, addr;
524+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
525+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
526+
527+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
528+ err = get_user(r27, (unsigned long *)addr);
529+ if (err)
530+ break;
531+
532+ regs->r27 = r27;
533+ regs->pc = r27;
534+ return 2;
535+ }
536+ } while (0);
537+
538+ do { /* PaX: patched PLT emulation #2 */
539+ unsigned int ldah, lda, br;
540+
541+ err = get_user(ldah, (unsigned int *)regs->pc);
542+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
543+ err |= get_user(br, (unsigned int *)(regs->pc+8));
544+
545+ if (err)
546+ break;
547+
548+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
549+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
550+ (br & 0xFFE00000U) == 0xC3E00000U)
551+ {
552+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
553+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
554+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
555+
556+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
557+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
558+ return 2;
559+ }
560+ } while (0);
561+
562+ do { /* PaX: unpatched PLT emulation */
563+ unsigned int br;
564+
565+ err = get_user(br, (unsigned int *)regs->pc);
566+
567+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
568+ unsigned int br2, ldq, nop, jmp;
569+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
570+
571+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
572+ err = get_user(br2, (unsigned int *)addr);
573+ err |= get_user(ldq, (unsigned int *)(addr+4));
574+ err |= get_user(nop, (unsigned int *)(addr+8));
575+ err |= get_user(jmp, (unsigned int *)(addr+12));
576+ err |= get_user(resolver, (unsigned long *)(addr+16));
577+
578+ if (err)
579+ break;
580+
581+ if (br2 == 0xC3600000U &&
582+ ldq == 0xA77B000CU &&
583+ nop == 0x47FF041FU &&
584+ jmp == 0x6B7B0000U)
585+ {
586+ regs->r28 = regs->pc+4;
587+ regs->r27 = addr+16;
588+ regs->pc = resolver;
589+ return 3;
590+ }
591+ }
592+ } while (0);
593+#endif
594+
595+ return 1;
596+}
597+
598+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
599+{
600+ unsigned long i;
601+
602+ printk(KERN_ERR "PAX: bytes at PC: ");
603+ for (i = 0; i < 5; i++) {
604+ unsigned int c;
605+ if (get_user(c, (unsigned int *)pc+i))
606+ printk(KERN_CONT "???????? ");
607+ else
608+ printk(KERN_CONT "%08x ", c);
609+ }
610+ printk("\n");
611+}
612+#endif
613
614 /*
615 * This routine handles page faults. It determines the address,
616@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
617 good_area:
618 si_code = SEGV_ACCERR;
619 if (cause < 0) {
620- if (!(vma->vm_flags & VM_EXEC))
621+ if (!(vma->vm_flags & VM_EXEC)) {
622+
623+#ifdef CONFIG_PAX_PAGEEXEC
624+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
625+ goto bad_area;
626+
627+ up_read(&mm->mmap_sem);
628+ switch (pax_handle_fetch_fault(regs)) {
629+
630+#ifdef CONFIG_PAX_EMUPLT
631+ case 2:
632+ case 3:
633+ return;
634+#endif
635+
636+ }
637+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
638+ do_group_exit(SIGKILL);
639+#else
640 goto bad_area;
641+#endif
642+
643+ }
644 } else if (!cause) {
645 /* Allow reads even for write-only mappings */
646 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
647diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
648index 86976d0..6610950 100644
649--- a/arch/arm/include/asm/atomic.h
650+++ b/arch/arm/include/asm/atomic.h
651@@ -15,6 +15,10 @@
652 #include <linux/types.h>
653 #include <asm/system.h>
654
655+#ifdef CONFIG_GENERIC_ATOMIC64
656+#include <asm-generic/atomic64.h>
657+#endif
658+
659 #define ATOMIC_INIT(i) { (i) }
660
661 #ifdef __KERNEL__
662@@ -239,6 +243,14 @@ typedef struct {
663 u64 __aligned(8) counter;
664 } atomic64_t;
665
666+#ifdef CONFIG_PAX_REFCOUNT
667+typedef struct {
668+ u64 __aligned(8) counter;
669+} atomic64_unchecked_t;
670+#else
671+typedef atomic64_t atomic64_unchecked_t;
672+#endif
673+
674 #define ATOMIC64_INIT(i) { (i) }
675
676 static inline u64 atomic64_read(atomic64_t *v)
677@@ -459,6 +471,16 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
678 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
679 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
680
681+#define atomic64_read_unchecked(v) atomic64_read(v)
682+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
683+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
684+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
685+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
686+#define atomic64_inc_unchecked(v) atomic64_inc(v)
687+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
688+#define atomic64_dec_unchecked(v) atomic64_dec(v)
689+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
690+
691 #endif /* !CONFIG_GENERIC_ATOMIC64 */
692 #endif
693 #endif
694diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
695index 0e9ce8d..6ef1e03 100644
696--- a/arch/arm/include/asm/elf.h
697+++ b/arch/arm/include/asm/elf.h
698@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
699 the loader. We need to make sure that it is out of the way of the program
700 that it will "exec", and that there is sufficient room for the brk. */
701
702-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
703+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
704+
705+#ifdef CONFIG_PAX_ASLR
706+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
707+
708+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
709+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
710+#endif
711
712 /* When the program starts, a1 contains a pointer to a function to be
713 registered with atexit, as per the SVR4 ABI. A value of 0 means we
714@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
715 extern void elf_set_personality(const struct elf32_hdr *);
716 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
717
718-struct mm_struct;
719-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
720-#define arch_randomize_brk arch_randomize_brk
721-
722 extern int vectors_user_mapping(void);
723 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
724 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
725diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
726index e51b1e8..32a3113 100644
727--- a/arch/arm/include/asm/kmap_types.h
728+++ b/arch/arm/include/asm/kmap_types.h
729@@ -21,6 +21,7 @@ enum km_type {
730 KM_L1_CACHE,
731 KM_L2_CACHE,
732 KM_KDB,
733+ KM_CLEARPAGE,
734 KM_TYPE_NR
735 };
736
737diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
738index b293616..96310e5 100644
739--- a/arch/arm/include/asm/uaccess.h
740+++ b/arch/arm/include/asm/uaccess.h
741@@ -22,6 +22,8 @@
742 #define VERIFY_READ 0
743 #define VERIFY_WRITE 1
744
745+extern void check_object_size(const void *ptr, unsigned long n, bool to);
746+
747 /*
748 * The exception table consists of pairs of addresses: the first is the
749 * address of an instruction that is allowed to fault, and the second is
750@@ -387,8 +389,23 @@ do { \
751
752
753 #ifdef CONFIG_MMU
754-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
755-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
756+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
757+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
758+
759+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
760+{
761+ if (!__builtin_constant_p(n))
762+ check_object_size(to, n, false);
763+ return ___copy_from_user(to, from, n);
764+}
765+
766+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
767+{
768+ if (!__builtin_constant_p(n))
769+ check_object_size(from, n, true);
770+ return ___copy_to_user(to, from, n);
771+}
772+
773 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
774 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
775 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
776@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
777
778 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
779 {
780+ if ((long)n < 0)
781+ return n;
782+
783 if (access_ok(VERIFY_READ, from, n))
784 n = __copy_from_user(to, from, n);
785 else /* security hole - plug it */
786@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
787
788 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
789 {
790+ if ((long)n < 0)
791+ return n;
792+
793 if (access_ok(VERIFY_WRITE, to, n))
794 n = __copy_to_user(to, from, n);
795 return n;
796diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
797index 5b0bce6..becd81c 100644
798--- a/arch/arm/kernel/armksyms.c
799+++ b/arch/arm/kernel/armksyms.c
800@@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
801 #ifdef CONFIG_MMU
802 EXPORT_SYMBOL(copy_page);
803
804-EXPORT_SYMBOL(__copy_from_user);
805-EXPORT_SYMBOL(__copy_to_user);
806+EXPORT_SYMBOL(___copy_from_user);
807+EXPORT_SYMBOL(___copy_to_user);
808 EXPORT_SYMBOL(__clear_user);
809
810 EXPORT_SYMBOL(__get_user_1);
811diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
812index 3d0c6fb..3dcae52 100644
813--- a/arch/arm/kernel/process.c
814+++ b/arch/arm/kernel/process.c
815@@ -28,7 +28,6 @@
816 #include <linux/tick.h>
817 #include <linux/utsname.h>
818 #include <linux/uaccess.h>
819-#include <linux/random.h>
820 #include <linux/hw_breakpoint.h>
821 #include <linux/cpuidle.h>
822
823@@ -484,12 +483,6 @@ unsigned long get_wchan(struct task_struct *p)
824 return 0;
825 }
826
827-unsigned long arch_randomize_brk(struct mm_struct *mm)
828-{
829- unsigned long range_end = mm->brk + 0x02000000;
830- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
831-}
832-
833 #ifdef CONFIG_MMU
834 /*
835 * The vectors page is always readable from user space for the
836diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
837index 99a5727..a3d5bb1 100644
838--- a/arch/arm/kernel/traps.c
839+++ b/arch/arm/kernel/traps.c
840@@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
841
842 static DEFINE_RAW_SPINLOCK(die_lock);
843
844+extern void gr_handle_kernel_exploit(void);
845+
846 /*
847 * This function is protected against re-entrancy.
848 */
849@@ -288,6 +290,9 @@ void die(const char *str, struct pt_regs *regs, int err)
850 panic("Fatal exception in interrupt");
851 if (panic_on_oops)
852 panic("Fatal exception");
853+
854+ gr_handle_kernel_exploit();
855+
856 if (ret != NOTIFY_STOP)
857 do_exit(SIGSEGV);
858 }
859diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
860index 66a477a..bee61d3 100644
861--- a/arch/arm/lib/copy_from_user.S
862+++ b/arch/arm/lib/copy_from_user.S
863@@ -16,7 +16,7 @@
864 /*
865 * Prototype:
866 *
867- * size_t __copy_from_user(void *to, const void *from, size_t n)
868+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
869 *
870 * Purpose:
871 *
872@@ -84,11 +84,11 @@
873
874 .text
875
876-ENTRY(__copy_from_user)
877+ENTRY(___copy_from_user)
878
879 #include "copy_template.S"
880
881-ENDPROC(__copy_from_user)
882+ENDPROC(___copy_from_user)
883
884 .pushsection .fixup,"ax"
885 .align 0
886diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
887index d066df6..df28194 100644
888--- a/arch/arm/lib/copy_to_user.S
889+++ b/arch/arm/lib/copy_to_user.S
890@@ -16,7 +16,7 @@
891 /*
892 * Prototype:
893 *
894- * size_t __copy_to_user(void *to, const void *from, size_t n)
895+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
896 *
897 * Purpose:
898 *
899@@ -88,11 +88,11 @@
900 .text
901
902 ENTRY(__copy_to_user_std)
903-WEAK(__copy_to_user)
904+WEAK(___copy_to_user)
905
906 #include "copy_template.S"
907
908-ENDPROC(__copy_to_user)
909+ENDPROC(___copy_to_user)
910 ENDPROC(__copy_to_user_std)
911
912 .pushsection .fixup,"ax"
913diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
914index d0ece2a..5ae2f39 100644
915--- a/arch/arm/lib/uaccess.S
916+++ b/arch/arm/lib/uaccess.S
917@@ -20,7 +20,7 @@
918
919 #define PAGE_SHIFT 12
920
921-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
922+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
923 * Purpose : copy a block to user memory from kernel memory
924 * Params : to - user memory
925 * : from - kernel memory
926@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
927 sub r2, r2, ip
928 b .Lc2u_dest_aligned
929
930-ENTRY(__copy_to_user)
931+ENTRY(___copy_to_user)
932 stmfd sp!, {r2, r4 - r7, lr}
933 cmp r2, #4
934 blt .Lc2u_not_enough
935@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May fault
936 ldrgtb r3, [r1], #0
937 USER( T(strgtb) r3, [r0], #1) @ May fault
938 b .Lc2u_finished
939-ENDPROC(__copy_to_user)
940+ENDPROC(___copy_to_user)
941
942 .pushsection .fixup,"ax"
943 .align 0
944 9001: ldmfd sp!, {r0, r4 - r7, pc}
945 .popsection
946
947-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
948+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
949 * Purpose : copy a block from user memory to kernel memory
950 * Params : to - kernel memory
951 * : from - user memory
952@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May fault
953 sub r2, r2, ip
954 b .Lcfu_dest_aligned
955
956-ENTRY(__copy_from_user)
957+ENTRY(___copy_from_user)
958 stmfd sp!, {r0, r2, r4 - r7, lr}
959 cmp r2, #4
960 blt .Lcfu_not_enough
961@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May fault
962 USER( T(ldrgtb) r3, [r1], #1) @ May fault
963 strgtb r3, [r0], #1
964 b .Lcfu_finished
965-ENDPROC(__copy_from_user)
966+ENDPROC(___copy_from_user)
967
968 .pushsection .fixup,"ax"
969 .align 0
970diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
971index 025f742..8432b08 100644
972--- a/arch/arm/lib/uaccess_with_memcpy.c
973+++ b/arch/arm/lib/uaccess_with_memcpy.c
974@@ -104,7 +104,7 @@ out:
975 }
976
977 unsigned long
978-__copy_to_user(void __user *to, const void *from, unsigned long n)
979+___copy_to_user(void __user *to, const void *from, unsigned long n)
980 {
981 /*
982 * This test is stubbed out of the main function above to keep
983diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
984index 2b2d51c..0127490 100644
985--- a/arch/arm/mach-ux500/mbox-db5500.c
986+++ b/arch/arm/mach-ux500/mbox-db5500.c
987@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
988 return sprintf(buf, "0x%X\n", mbox_value);
989 }
990
991-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
992+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
993
994 static int mbox_show(struct seq_file *s, void *data)
995 {
996diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
997index aa33949..b242a2f 100644
998--- a/arch/arm/mm/fault.c
999+++ b/arch/arm/mm/fault.c
1000@@ -183,6 +183,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1001 }
1002 #endif
1003
1004+#ifdef CONFIG_PAX_PAGEEXEC
1005+ if (fsr & FSR_LNX_PF) {
1006+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1007+ do_group_exit(SIGKILL);
1008+ }
1009+#endif
1010+
1011 tsk->thread.address = addr;
1012 tsk->thread.error_code = fsr;
1013 tsk->thread.trap_no = 14;
1014@@ -384,6 +391,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1015 }
1016 #endif /* CONFIG_MMU */
1017
1018+#ifdef CONFIG_PAX_PAGEEXEC
1019+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1020+{
1021+ long i;
1022+
1023+ printk(KERN_ERR "PAX: bytes at PC: ");
1024+ for (i = 0; i < 20; i++) {
1025+ unsigned char c;
1026+ if (get_user(c, (__force unsigned char __user *)pc+i))
1027+ printk(KERN_CONT "?? ");
1028+ else
1029+ printk(KERN_CONT "%02x ", c);
1030+ }
1031+ printk("\n");
1032+
1033+ printk(KERN_ERR "PAX: bytes at SP-4: ");
1034+ for (i = -1; i < 20; i++) {
1035+ unsigned long c;
1036+ if (get_user(c, (__force unsigned long __user *)sp+i))
1037+ printk(KERN_CONT "???????? ");
1038+ else
1039+ printk(KERN_CONT "%08lx ", c);
1040+ }
1041+ printk("\n");
1042+}
1043+#endif
1044+
1045 /*
1046 * First Level Translation Fault Handler
1047 *
1048diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1049index 44b628e..623ee2a 100644
1050--- a/arch/arm/mm/mmap.c
1051+++ b/arch/arm/mm/mmap.c
1052@@ -54,6 +54,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1053 if (len > TASK_SIZE)
1054 return -ENOMEM;
1055
1056+#ifdef CONFIG_PAX_RANDMMAP
1057+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1058+#endif
1059+
1060 if (addr) {
1061 if (do_align)
1062 addr = COLOUR_ALIGN(addr, pgoff);
1063@@ -61,15 +65,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1064 addr = PAGE_ALIGN(addr);
1065
1066 vma = find_vma(mm, addr);
1067- if (TASK_SIZE - len >= addr &&
1068- (!vma || addr + len <= vma->vm_start))
1069+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1070 return addr;
1071 }
1072 if (len > mm->cached_hole_size) {
1073- start_addr = addr = mm->free_area_cache;
1074+ start_addr = addr = mm->free_area_cache;
1075 } else {
1076- start_addr = addr = TASK_UNMAPPED_BASE;
1077- mm->cached_hole_size = 0;
1078+ start_addr = addr = mm->mmap_base;
1079+ mm->cached_hole_size = 0;
1080 }
1081 /* 8 bits of randomness in 20 address space bits */
1082 if ((current->flags & PF_RANDOMIZE) &&
1083@@ -89,14 +92,14 @@ full_search:
1084 * Start a new search - just in case we missed
1085 * some holes.
1086 */
1087- if (start_addr != TASK_UNMAPPED_BASE) {
1088- start_addr = addr = TASK_UNMAPPED_BASE;
1089+ if (start_addr != mm->mmap_base) {
1090+ start_addr = addr = mm->mmap_base;
1091 mm->cached_hole_size = 0;
1092 goto full_search;
1093 }
1094 return -ENOMEM;
1095 }
1096- if (!vma || addr + len <= vma->vm_start) {
1097+ if (check_heap_stack_gap(vma, addr, len)) {
1098 /*
1099 * Remember the place where we stopped the search:
1100 */
1101diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1102index 3b3159b..425ea94 100644
1103--- a/arch/avr32/include/asm/elf.h
1104+++ b/arch/avr32/include/asm/elf.h
1105@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1106 the loader. We need to make sure that it is out of the way of the program
1107 that it will "exec", and that there is sufficient room for the brk. */
1108
1109-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1110+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1111
1112+#ifdef CONFIG_PAX_ASLR
1113+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1114+
1115+#define PAX_DELTA_MMAP_LEN 15
1116+#define PAX_DELTA_STACK_LEN 15
1117+#endif
1118
1119 /* This yields a mask that user programs can use to figure out what
1120 instruction set this CPU supports. This could be done in user space,
1121diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1122index b7f5c68..556135c 100644
1123--- a/arch/avr32/include/asm/kmap_types.h
1124+++ b/arch/avr32/include/asm/kmap_types.h
1125@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1126 D(11) KM_IRQ1,
1127 D(12) KM_SOFTIRQ0,
1128 D(13) KM_SOFTIRQ1,
1129-D(14) KM_TYPE_NR
1130+D(14) KM_CLEARPAGE,
1131+D(15) KM_TYPE_NR
1132 };
1133
1134 #undef D
1135diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1136index f7040a1..db9f300 100644
1137--- a/arch/avr32/mm/fault.c
1138+++ b/arch/avr32/mm/fault.c
1139@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1140
1141 int exception_trace = 1;
1142
1143+#ifdef CONFIG_PAX_PAGEEXEC
1144+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1145+{
1146+ unsigned long i;
1147+
1148+ printk(KERN_ERR "PAX: bytes at PC: ");
1149+ for (i = 0; i < 20; i++) {
1150+ unsigned char c;
1151+ if (get_user(c, (unsigned char *)pc+i))
1152+ printk(KERN_CONT "???????? ");
1153+ else
1154+ printk(KERN_CONT "%02x ", c);
1155+ }
1156+ printk("\n");
1157+}
1158+#endif
1159+
1160 /*
1161 * This routine handles page faults. It determines the address and the
1162 * problem, and then passes it off to one of the appropriate routines.
1163@@ -156,6 +173,16 @@ bad_area:
1164 up_read(&mm->mmap_sem);
1165
1166 if (user_mode(regs)) {
1167+
1168+#ifdef CONFIG_PAX_PAGEEXEC
1169+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1170+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1171+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1172+ do_group_exit(SIGKILL);
1173+ }
1174+ }
1175+#endif
1176+
1177 if (exception_trace && printk_ratelimit())
1178 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1179 "sp %08lx ecr %lu\n",
1180diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
1181index 0d8a7d6..d0c9ff5 100644
1182--- a/arch/frv/include/asm/atomic.h
1183+++ b/arch/frv/include/asm/atomic.h
1184@@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
1185 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
1186 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
1187
1188+#define atomic64_read_unchecked(v) atomic64_read(v)
1189+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1190+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1191+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1192+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1193+#define atomic64_inc_unchecked(v) atomic64_inc(v)
1194+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1195+#define atomic64_dec_unchecked(v) atomic64_dec(v)
1196+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1197+
1198 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
1199 {
1200 int c, old;
1201diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1202index f8e16b2..c73ff79 100644
1203--- a/arch/frv/include/asm/kmap_types.h
1204+++ b/arch/frv/include/asm/kmap_types.h
1205@@ -23,6 +23,7 @@ enum km_type {
1206 KM_IRQ1,
1207 KM_SOFTIRQ0,
1208 KM_SOFTIRQ1,
1209+ KM_CLEARPAGE,
1210 KM_TYPE_NR
1211 };
1212
1213diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1214index 385fd30..6c3d97e 100644
1215--- a/arch/frv/mm/elf-fdpic.c
1216+++ b/arch/frv/mm/elf-fdpic.c
1217@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1218 if (addr) {
1219 addr = PAGE_ALIGN(addr);
1220 vma = find_vma(current->mm, addr);
1221- if (TASK_SIZE - len >= addr &&
1222- (!vma || addr + len <= vma->vm_start))
1223+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1224 goto success;
1225 }
1226
1227@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1228 for (; vma; vma = vma->vm_next) {
1229 if (addr > limit)
1230 break;
1231- if (addr + len <= vma->vm_start)
1232+ if (check_heap_stack_gap(vma, addr, len))
1233 goto success;
1234 addr = vma->vm_end;
1235 }
1236@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1237 for (; vma; vma = vma->vm_next) {
1238 if (addr > limit)
1239 break;
1240- if (addr + len <= vma->vm_start)
1241+ if (check_heap_stack_gap(vma, addr, len))
1242 goto success;
1243 addr = vma->vm_end;
1244 }
1245diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
1246index 3fad89e..3047da5 100644
1247--- a/arch/ia64/include/asm/atomic.h
1248+++ b/arch/ia64/include/asm/atomic.h
1249@@ -209,6 +209,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
1250 #define atomic64_inc(v) atomic64_add(1, (v))
1251 #define atomic64_dec(v) atomic64_sub(1, (v))
1252
1253+#define atomic64_read_unchecked(v) atomic64_read(v)
1254+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1255+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1256+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1257+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1258+#define atomic64_inc_unchecked(v) atomic64_inc(v)
1259+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1260+#define atomic64_dec_unchecked(v) atomic64_dec(v)
1261+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1262+
1263 /* Atomic operations are already serializing */
1264 #define smp_mb__before_atomic_dec() barrier()
1265 #define smp_mb__after_atomic_dec() barrier()
1266diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1267index b5298eb..67c6e62 100644
1268--- a/arch/ia64/include/asm/elf.h
1269+++ b/arch/ia64/include/asm/elf.h
1270@@ -42,6 +42,13 @@
1271 */
1272 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1273
1274+#ifdef CONFIG_PAX_ASLR
1275+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1276+
1277+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1278+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1279+#endif
1280+
1281 #define PT_IA_64_UNWIND 0x70000001
1282
1283 /* IA-64 relocations: */
1284diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1285index 1a97af3..7529d31 100644
1286--- a/arch/ia64/include/asm/pgtable.h
1287+++ b/arch/ia64/include/asm/pgtable.h
1288@@ -12,7 +12,7 @@
1289 * David Mosberger-Tang <davidm@hpl.hp.com>
1290 */
1291
1292-
1293+#include <linux/const.h>
1294 #include <asm/mman.h>
1295 #include <asm/page.h>
1296 #include <asm/processor.h>
1297@@ -143,6 +143,17 @@
1298 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1299 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1300 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1301+
1302+#ifdef CONFIG_PAX_PAGEEXEC
1303+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1304+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1305+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1306+#else
1307+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1308+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1309+# define PAGE_COPY_NOEXEC PAGE_COPY
1310+#endif
1311+
1312 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1313 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1314 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1315diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1316index b77768d..e0795eb 100644
1317--- a/arch/ia64/include/asm/spinlock.h
1318+++ b/arch/ia64/include/asm/spinlock.h
1319@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
1320 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1321
1322 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1323- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1324+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1325 }
1326
1327 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
1328diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1329index 449c8c0..432a3d2 100644
1330--- a/arch/ia64/include/asm/uaccess.h
1331+++ b/arch/ia64/include/asm/uaccess.h
1332@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1333 const void *__cu_from = (from); \
1334 long __cu_len = (n); \
1335 \
1336- if (__access_ok(__cu_to, __cu_len, get_fs())) \
1337+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1338 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1339 __cu_len; \
1340 })
1341@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1342 long __cu_len = (n); \
1343 \
1344 __chk_user_ptr(__cu_from); \
1345- if (__access_ok(__cu_from, __cu_len, get_fs())) \
1346+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1347 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1348 __cu_len; \
1349 })
1350diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1351index 24603be..948052d 100644
1352--- a/arch/ia64/kernel/module.c
1353+++ b/arch/ia64/kernel/module.c
1354@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
1355 void
1356 module_free (struct module *mod, void *module_region)
1357 {
1358- if (mod && mod->arch.init_unw_table &&
1359- module_region == mod->module_init) {
1360+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1361 unw_remove_unwind_table(mod->arch.init_unw_table);
1362 mod->arch.init_unw_table = NULL;
1363 }
1364@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1365 }
1366
1367 static inline int
1368+in_init_rx (const struct module *mod, uint64_t addr)
1369+{
1370+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1371+}
1372+
1373+static inline int
1374+in_init_rw (const struct module *mod, uint64_t addr)
1375+{
1376+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1377+}
1378+
1379+static inline int
1380 in_init (const struct module *mod, uint64_t addr)
1381 {
1382- return addr - (uint64_t) mod->module_init < mod->init_size;
1383+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1384+}
1385+
1386+static inline int
1387+in_core_rx (const struct module *mod, uint64_t addr)
1388+{
1389+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1390+}
1391+
1392+static inline int
1393+in_core_rw (const struct module *mod, uint64_t addr)
1394+{
1395+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1396 }
1397
1398 static inline int
1399 in_core (const struct module *mod, uint64_t addr)
1400 {
1401- return addr - (uint64_t) mod->module_core < mod->core_size;
1402+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1403 }
1404
1405 static inline int
1406@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1407 break;
1408
1409 case RV_BDREL:
1410- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1411+ if (in_init_rx(mod, val))
1412+ val -= (uint64_t) mod->module_init_rx;
1413+ else if (in_init_rw(mod, val))
1414+ val -= (uint64_t) mod->module_init_rw;
1415+ else if (in_core_rx(mod, val))
1416+ val -= (uint64_t) mod->module_core_rx;
1417+ else if (in_core_rw(mod, val))
1418+ val -= (uint64_t) mod->module_core_rw;
1419 break;
1420
1421 case RV_LTV:
1422@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1423 * addresses have been selected...
1424 */
1425 uint64_t gp;
1426- if (mod->core_size > MAX_LTOFF)
1427+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1428 /*
1429 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1430 * at the end of the module.
1431 */
1432- gp = mod->core_size - MAX_LTOFF / 2;
1433+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1434 else
1435- gp = mod->core_size / 2;
1436- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1437+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1438+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1439 mod->arch.gp = gp;
1440 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1441 }
1442diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1443index 609d500..7dde2a8 100644
1444--- a/arch/ia64/kernel/sys_ia64.c
1445+++ b/arch/ia64/kernel/sys_ia64.c
1446@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1447 if (REGION_NUMBER(addr) == RGN_HPAGE)
1448 addr = 0;
1449 #endif
1450+
1451+#ifdef CONFIG_PAX_RANDMMAP
1452+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1453+ addr = mm->free_area_cache;
1454+ else
1455+#endif
1456+
1457 if (!addr)
1458 addr = mm->free_area_cache;
1459
1460@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1461 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1462 /* At this point: (!vma || addr < vma->vm_end). */
1463 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1464- if (start_addr != TASK_UNMAPPED_BASE) {
1465+ if (start_addr != mm->mmap_base) {
1466 /* Start a new search --- just in case we missed some holes. */
1467- addr = TASK_UNMAPPED_BASE;
1468+ addr = mm->mmap_base;
1469 goto full_search;
1470 }
1471 return -ENOMEM;
1472 }
1473- if (!vma || addr + len <= vma->vm_start) {
1474+ if (check_heap_stack_gap(vma, addr, len)) {
1475 /* Remember the address where we stopped this search: */
1476 mm->free_area_cache = addr + len;
1477 return addr;
1478diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1479index 53c0ba0..2accdde 100644
1480--- a/arch/ia64/kernel/vmlinux.lds.S
1481+++ b/arch/ia64/kernel/vmlinux.lds.S
1482@@ -199,7 +199,7 @@ SECTIONS {
1483 /* Per-cpu data: */
1484 . = ALIGN(PERCPU_PAGE_SIZE);
1485 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
1486- __phys_per_cpu_start = __per_cpu_load;
1487+ __phys_per_cpu_start = per_cpu_load;
1488 /*
1489 * ensure percpu data fits
1490 * into percpu page size
1491diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1492index 20b3593..1ce77f0 100644
1493--- a/arch/ia64/mm/fault.c
1494+++ b/arch/ia64/mm/fault.c
1495@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
1496 return pte_present(pte);
1497 }
1498
1499+#ifdef CONFIG_PAX_PAGEEXEC
1500+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1501+{
1502+ unsigned long i;
1503+
1504+ printk(KERN_ERR "PAX: bytes at PC: ");
1505+ for (i = 0; i < 8; i++) {
1506+ unsigned int c;
1507+ if (get_user(c, (unsigned int *)pc+i))
1508+ printk(KERN_CONT "???????? ");
1509+ else
1510+ printk(KERN_CONT "%08x ", c);
1511+ }
1512+ printk("\n");
1513+}
1514+#endif
1515+
1516 void __kprobes
1517 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1518 {
1519@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1520 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1521 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1522
1523- if ((vma->vm_flags & mask) != mask)
1524+ if ((vma->vm_flags & mask) != mask) {
1525+
1526+#ifdef CONFIG_PAX_PAGEEXEC
1527+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1528+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1529+ goto bad_area;
1530+
1531+ up_read(&mm->mmap_sem);
1532+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1533+ do_group_exit(SIGKILL);
1534+ }
1535+#endif
1536+
1537 goto bad_area;
1538
1539+ }
1540+
1541 /*
1542 * If for any reason at all we couldn't handle the fault, make
1543 * sure we exit gracefully rather than endlessly redo the
1544diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1545index 5ca674b..e0e1b70 100644
1546--- a/arch/ia64/mm/hugetlbpage.c
1547+++ b/arch/ia64/mm/hugetlbpage.c
1548@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1549 /* At this point: (!vmm || addr < vmm->vm_end). */
1550 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1551 return -ENOMEM;
1552- if (!vmm || (addr + len) <= vmm->vm_start)
1553+ if (check_heap_stack_gap(vmm, addr, len))
1554 return addr;
1555 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1556 }
1557diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1558index 00cb0e2..2ad8024 100644
1559--- a/arch/ia64/mm/init.c
1560+++ b/arch/ia64/mm/init.c
1561@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1562 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1563 vma->vm_end = vma->vm_start + PAGE_SIZE;
1564 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1565+
1566+#ifdef CONFIG_PAX_PAGEEXEC
1567+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1568+ vma->vm_flags &= ~VM_EXEC;
1569+
1570+#ifdef CONFIG_PAX_MPROTECT
1571+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1572+ vma->vm_flags &= ~VM_MAYEXEC;
1573+#endif
1574+
1575+ }
1576+#endif
1577+
1578 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1579 down_write(&current->mm->mmap_sem);
1580 if (insert_vm_struct(current->mm, vma)) {
1581diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1582index 82abd15..d95ae5d 100644
1583--- a/arch/m32r/lib/usercopy.c
1584+++ b/arch/m32r/lib/usercopy.c
1585@@ -14,6 +14,9 @@
1586 unsigned long
1587 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1588 {
1589+ if ((long)n < 0)
1590+ return n;
1591+
1592 prefetch(from);
1593 if (access_ok(VERIFY_WRITE, to, n))
1594 __copy_user(to,from,n);
1595@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1596 unsigned long
1597 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1598 {
1599+ if ((long)n < 0)
1600+ return n;
1601+
1602 prefetchw(to);
1603 if (access_ok(VERIFY_READ, from, n))
1604 __copy_user_zeroing(to,from,n);
1605diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
1606index 1d93f81..67794d0 100644
1607--- a/arch/mips/include/asm/atomic.h
1608+++ b/arch/mips/include/asm/atomic.h
1609@@ -21,6 +21,10 @@
1610 #include <asm/war.h>
1611 #include <asm/system.h>
1612
1613+#ifdef CONFIG_GENERIC_ATOMIC64
1614+#include <asm-generic/atomic64.h>
1615+#endif
1616+
1617 #define ATOMIC_INIT(i) { (i) }
1618
1619 /*
1620@@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
1621 */
1622 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
1623
1624+#define atomic64_read_unchecked(v) atomic64_read(v)
1625+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1626+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1627+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1628+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1629+#define atomic64_inc_unchecked(v) atomic64_inc(v)
1630+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1631+#define atomic64_dec_unchecked(v) atomic64_dec(v)
1632+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1633+
1634 #endif /* CONFIG_64BIT */
1635
1636 /*
1637diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1638index 455c0ac..ad65fbe 100644
1639--- a/arch/mips/include/asm/elf.h
1640+++ b/arch/mips/include/asm/elf.h
1641@@ -372,13 +372,16 @@ extern const char *__elf_platform;
1642 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1643 #endif
1644
1645+#ifdef CONFIG_PAX_ASLR
1646+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1647+
1648+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1649+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1650+#endif
1651+
1652 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1653 struct linux_binprm;
1654 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1655 int uses_interp);
1656
1657-struct mm_struct;
1658-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1659-#define arch_randomize_brk arch_randomize_brk
1660-
1661 #endif /* _ASM_ELF_H */
1662diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1663index e59cd1a..8e329d6 100644
1664--- a/arch/mips/include/asm/page.h
1665+++ b/arch/mips/include/asm/page.h
1666@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1667 #ifdef CONFIG_CPU_MIPS32
1668 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1669 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1670- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1671+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1672 #else
1673 typedef struct { unsigned long long pte; } pte_t;
1674 #define pte_val(x) ((x).pte)
1675diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1676index 6018c80..7c37203 100644
1677--- a/arch/mips/include/asm/system.h
1678+++ b/arch/mips/include/asm/system.h
1679@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1680 */
1681 #define __ARCH_WANT_UNLOCKED_CTXSW
1682
1683-extern unsigned long arch_align_stack(unsigned long sp);
1684+#define arch_align_stack(x) ((x) & ~0xfUL)
1685
1686 #endif /* _ASM_SYSTEM_H */
1687diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1688index 9fdd8bc..4bd7f1a 100644
1689--- a/arch/mips/kernel/binfmt_elfn32.c
1690+++ b/arch/mips/kernel/binfmt_elfn32.c
1691@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1692 #undef ELF_ET_DYN_BASE
1693 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1694
1695+#ifdef CONFIG_PAX_ASLR
1696+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1697+
1698+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1699+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1700+#endif
1701+
1702 #include <asm/processor.h>
1703 #include <linux/module.h>
1704 #include <linux/elfcore.h>
1705diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
1706index ff44823..97f8906 100644
1707--- a/arch/mips/kernel/binfmt_elfo32.c
1708+++ b/arch/mips/kernel/binfmt_elfo32.c
1709@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1710 #undef ELF_ET_DYN_BASE
1711 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1712
1713+#ifdef CONFIG_PAX_ASLR
1714+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1715+
1716+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1717+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1718+#endif
1719+
1720 #include <asm/processor.h>
1721
1722 /*
1723diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
1724index c47f96e..661d418 100644
1725--- a/arch/mips/kernel/process.c
1726+++ b/arch/mips/kernel/process.c
1727@@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_struct *task)
1728 out:
1729 return pc;
1730 }
1731-
1732-/*
1733- * Don't forget that the stack pointer must be aligned on a 8 bytes
1734- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1735- */
1736-unsigned long arch_align_stack(unsigned long sp)
1737-{
1738- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1739- sp -= get_random_int() & ~PAGE_MASK;
1740-
1741- return sp & ALMASK;
1742-}
1743diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
1744index 937cf33..adb39bb 100644
1745--- a/arch/mips/mm/fault.c
1746+++ b/arch/mips/mm/fault.c
1747@@ -28,6 +28,23 @@
1748 #include <asm/highmem.h> /* For VMALLOC_END */
1749 #include <linux/kdebug.h>
1750
1751+#ifdef CONFIG_PAX_PAGEEXEC
1752+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1753+{
1754+ unsigned long i;
1755+
1756+ printk(KERN_ERR "PAX: bytes at PC: ");
1757+ for (i = 0; i < 5; i++) {
1758+ unsigned int c;
1759+ if (get_user(c, (unsigned int *)pc+i))
1760+ printk(KERN_CONT "???????? ");
1761+ else
1762+ printk(KERN_CONT "%08x ", c);
1763+ }
1764+ printk("\n");
1765+}
1766+#endif
1767+
1768 /*
1769 * This routine handles page faults. It determines the address,
1770 * and the problem, and then passes it off to one of the appropriate
1771diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
1772index 302d779..7d35bf8 100644
1773--- a/arch/mips/mm/mmap.c
1774+++ b/arch/mips/mm/mmap.c
1775@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1776 do_color_align = 1;
1777
1778 /* requesting a specific address */
1779+
1780+#ifdef CONFIG_PAX_RANDMMAP
1781+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1782+#endif
1783+
1784 if (addr) {
1785 if (do_color_align)
1786 addr = COLOUR_ALIGN(addr, pgoff);
1787@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1788 addr = PAGE_ALIGN(addr);
1789
1790 vma = find_vma(mm, addr);
1791- if (TASK_SIZE - len >= addr &&
1792- (!vma || addr + len <= vma->vm_start))
1793+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1794 return addr;
1795 }
1796
1797@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1798 /* At this point: (!vma || addr < vma->vm_end). */
1799 if (TASK_SIZE - len < addr)
1800 return -ENOMEM;
1801- if (!vma || addr + len <= vma->vm_start)
1802+ if (check_heap_stack_gap(vmm, addr, len))
1803 return addr;
1804 addr = vma->vm_end;
1805 if (do_color_align)
1806@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1807 /* make sure it can fit in the remaining address space */
1808 if (likely(addr > len)) {
1809 vma = find_vma(mm, addr - len);
1810- if (!vma || addr <= vma->vm_start) {
1811+ if (check_heap_stack_gap(vmm, addr - len, len))
1812 /* cache the address as a hint for next time */
1813 return mm->free_area_cache = addr - len;
1814 }
1815@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1816 * return with success:
1817 */
1818 vma = find_vma(mm, addr);
1819- if (likely(!vma || addr + len <= vma->vm_start)) {
1820+ if (check_heap_stack_gap(vmm, addr, len)) {
1821 /* cache the address as a hint for next time */
1822 return mm->free_area_cache = addr;
1823 }
1824@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1825 mm->unmap_area = arch_unmap_area_topdown;
1826 }
1827 }
1828-
1829-static inline unsigned long brk_rnd(void)
1830-{
1831- unsigned long rnd = get_random_int();
1832-
1833- rnd = rnd << PAGE_SHIFT;
1834- /* 8MB for 32bit, 256MB for 64bit */
1835- if (TASK_IS_32BIT_ADDR)
1836- rnd = rnd & 0x7ffffful;
1837- else
1838- rnd = rnd & 0xffffffful;
1839-
1840- return rnd;
1841-}
1842-
1843-unsigned long arch_randomize_brk(struct mm_struct *mm)
1844-{
1845- unsigned long base = mm->brk;
1846- unsigned long ret;
1847-
1848- ret = PAGE_ALIGN(base + brk_rnd());
1849-
1850- if (ret < mm->brk)
1851- return mm->brk;
1852-
1853- return ret;
1854-}
1855diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
1856index 4054b31..a10c105 100644
1857--- a/arch/parisc/include/asm/atomic.h
1858+++ b/arch/parisc/include/asm/atomic.h
1859@@ -335,6 +335,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
1860
1861 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
1862
1863+#define atomic64_read_unchecked(v) atomic64_read(v)
1864+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1865+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1866+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1867+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1868+#define atomic64_inc_unchecked(v) atomic64_inc(v)
1869+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1870+#define atomic64_dec_unchecked(v) atomic64_dec(v)
1871+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1872+
1873 #endif /* !CONFIG_64BIT */
1874
1875
1876diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
1877index 19f6cb1..6c78cf2 100644
1878--- a/arch/parisc/include/asm/elf.h
1879+++ b/arch/parisc/include/asm/elf.h
1880@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
1881
1882 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1883
1884+#ifdef CONFIG_PAX_ASLR
1885+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1886+
1887+#define PAX_DELTA_MMAP_LEN 16
1888+#define PAX_DELTA_STACK_LEN 16
1889+#endif
1890+
1891 /* This yields a mask that user programs can use to figure out what
1892 instruction set this CPU supports. This could be done in user space,
1893 but it's not easy, and we've already done it here. */
1894diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
1895index 22dadeb..f6c2be4 100644
1896--- a/arch/parisc/include/asm/pgtable.h
1897+++ b/arch/parisc/include/asm/pgtable.h
1898@@ -210,6 +210,17 @@ struct vm_area_struct;
1899 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1900 #define PAGE_COPY PAGE_EXECREAD
1901 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1902+
1903+#ifdef CONFIG_PAX_PAGEEXEC
1904+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1905+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1906+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1907+#else
1908+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1909+# define PAGE_COPY_NOEXEC PAGE_COPY
1910+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1911+#endif
1912+
1913 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1914 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1915 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1916diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
1917index 5e34ccf..672bc9c 100644
1918--- a/arch/parisc/kernel/module.c
1919+++ b/arch/parisc/kernel/module.c
1920@@ -98,16 +98,38 @@
1921
1922 /* three functions to determine where in the module core
1923 * or init pieces the location is */
1924+static inline int in_init_rx(struct module *me, void *loc)
1925+{
1926+ return (loc >= me->module_init_rx &&
1927+ loc < (me->module_init_rx + me->init_size_rx));
1928+}
1929+
1930+static inline int in_init_rw(struct module *me, void *loc)
1931+{
1932+ return (loc >= me->module_init_rw &&
1933+ loc < (me->module_init_rw + me->init_size_rw));
1934+}
1935+
1936 static inline int in_init(struct module *me, void *loc)
1937 {
1938- return (loc >= me->module_init &&
1939- loc <= (me->module_init + me->init_size));
1940+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1941+}
1942+
1943+static inline int in_core_rx(struct module *me, void *loc)
1944+{
1945+ return (loc >= me->module_core_rx &&
1946+ loc < (me->module_core_rx + me->core_size_rx));
1947+}
1948+
1949+static inline int in_core_rw(struct module *me, void *loc)
1950+{
1951+ return (loc >= me->module_core_rw &&
1952+ loc < (me->module_core_rw + me->core_size_rw));
1953 }
1954
1955 static inline int in_core(struct module *me, void *loc)
1956 {
1957- return (loc >= me->module_core &&
1958- loc <= (me->module_core + me->core_size));
1959+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1960 }
1961
1962 static inline int in_local(struct module *me, void *loc)
1963@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
1964 }
1965
1966 /* align things a bit */
1967- me->core_size = ALIGN(me->core_size, 16);
1968- me->arch.got_offset = me->core_size;
1969- me->core_size += gots * sizeof(struct got_entry);
1970+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1971+ me->arch.got_offset = me->core_size_rw;
1972+ me->core_size_rw += gots * sizeof(struct got_entry);
1973
1974- me->core_size = ALIGN(me->core_size, 16);
1975- me->arch.fdesc_offset = me->core_size;
1976- me->core_size += fdescs * sizeof(Elf_Fdesc);
1977+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1978+ me->arch.fdesc_offset = me->core_size_rw;
1979+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1980
1981 me->arch.got_max = gots;
1982 me->arch.fdesc_max = fdescs;
1983@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1984
1985 BUG_ON(value == 0);
1986
1987- got = me->module_core + me->arch.got_offset;
1988+ got = me->module_core_rw + me->arch.got_offset;
1989 for (i = 0; got[i].addr; i++)
1990 if (got[i].addr == value)
1991 goto out;
1992@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1993 #ifdef CONFIG_64BIT
1994 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1995 {
1996- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1997+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1998
1999 if (!value) {
2000 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
2001@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2002
2003 /* Create new one */
2004 fdesc->addr = value;
2005- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2006+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2007 return (Elf_Addr)fdesc;
2008 }
2009 #endif /* CONFIG_64BIT */
2010@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
2011
2012 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
2013 end = table + sechdrs[me->arch.unwind_section].sh_size;
2014- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2015+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2016
2017 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
2018 me->arch.unwind_section, table, end, gp);
2019diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
2020index c9b9322..02d8940 100644
2021--- a/arch/parisc/kernel/sys_parisc.c
2022+++ b/arch/parisc/kernel/sys_parisc.c
2023@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
2024 /* At this point: (!vma || addr < vma->vm_end). */
2025 if (TASK_SIZE - len < addr)
2026 return -ENOMEM;
2027- if (!vma || addr + len <= vma->vm_start)
2028+ if (check_heap_stack_gap(vma, addr, len))
2029 return addr;
2030 addr = vma->vm_end;
2031 }
2032@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
2033 /* At this point: (!vma || addr < vma->vm_end). */
2034 if (TASK_SIZE - len < addr)
2035 return -ENOMEM;
2036- if (!vma || addr + len <= vma->vm_start)
2037+ if (check_heap_stack_gap(vma, addr, len))
2038 return addr;
2039 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
2040 if (addr < vma->vm_end) /* handle wraparound */
2041@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2042 if (flags & MAP_FIXED)
2043 return addr;
2044 if (!addr)
2045- addr = TASK_UNMAPPED_BASE;
2046+ addr = current->mm->mmap_base;
2047
2048 if (filp) {
2049 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
2050diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
2051index f19e660..414fe24 100644
2052--- a/arch/parisc/kernel/traps.c
2053+++ b/arch/parisc/kernel/traps.c
2054@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
2055
2056 down_read(&current->mm->mmap_sem);
2057 vma = find_vma(current->mm,regs->iaoq[0]);
2058- if (vma && (regs->iaoq[0] >= vma->vm_start)
2059- && (vma->vm_flags & VM_EXEC)) {
2060-
2061+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
2062 fault_address = regs->iaoq[0];
2063 fault_space = regs->iasq[0];
2064
2065diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
2066index 18162ce..94de376 100644
2067--- a/arch/parisc/mm/fault.c
2068+++ b/arch/parisc/mm/fault.c
2069@@ -15,6 +15,7 @@
2070 #include <linux/sched.h>
2071 #include <linux/interrupt.h>
2072 #include <linux/module.h>
2073+#include <linux/unistd.h>
2074
2075 #include <asm/uaccess.h>
2076 #include <asm/traps.h>
2077@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
2078 static unsigned long
2079 parisc_acctyp(unsigned long code, unsigned int inst)
2080 {
2081- if (code == 6 || code == 16)
2082+ if (code == 6 || code == 7 || code == 16)
2083 return VM_EXEC;
2084
2085 switch (inst & 0xf0000000) {
2086@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
2087 }
2088 #endif
2089
2090+#ifdef CONFIG_PAX_PAGEEXEC
2091+/*
2092+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
2093+ *
2094+ * returns 1 when task should be killed
2095+ * 2 when rt_sigreturn trampoline was detected
2096+ * 3 when unpatched PLT trampoline was detected
2097+ */
2098+static int pax_handle_fetch_fault(struct pt_regs *regs)
2099+{
2100+
2101+#ifdef CONFIG_PAX_EMUPLT
2102+ int err;
2103+
2104+ do { /* PaX: unpatched PLT emulation */
2105+ unsigned int bl, depwi;
2106+
2107+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
2108+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
2109+
2110+ if (err)
2111+ break;
2112+
2113+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
2114+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
2115+
2116+ err = get_user(ldw, (unsigned int *)addr);
2117+ err |= get_user(bv, (unsigned int *)(addr+4));
2118+ err |= get_user(ldw2, (unsigned int *)(addr+8));
2119+
2120+ if (err)
2121+ break;
2122+
2123+ if (ldw == 0x0E801096U &&
2124+ bv == 0xEAC0C000U &&
2125+ ldw2 == 0x0E881095U)
2126+ {
2127+ unsigned int resolver, map;
2128+
2129+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
2130+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
2131+ if (err)
2132+ break;
2133+
2134+ regs->gr[20] = instruction_pointer(regs)+8;
2135+ regs->gr[21] = map;
2136+ regs->gr[22] = resolver;
2137+ regs->iaoq[0] = resolver | 3UL;
2138+ regs->iaoq[1] = regs->iaoq[0] + 4;
2139+ return 3;
2140+ }
2141+ }
2142+ } while (0);
2143+#endif
2144+
2145+#ifdef CONFIG_PAX_EMUTRAMP
2146+
2147+#ifndef CONFIG_PAX_EMUSIGRT
2148+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
2149+ return 1;
2150+#endif
2151+
2152+ do { /* PaX: rt_sigreturn emulation */
2153+ unsigned int ldi1, ldi2, bel, nop;
2154+
2155+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2156+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2157+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2158+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2159+
2160+ if (err)
2161+ break;
2162+
2163+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2164+ ldi2 == 0x3414015AU &&
2165+ bel == 0xE4008200U &&
2166+ nop == 0x08000240U)
2167+ {
2168+ regs->gr[25] = (ldi1 & 2) >> 1;
2169+ regs->gr[20] = __NR_rt_sigreturn;
2170+ regs->gr[31] = regs->iaoq[1] + 16;
2171+ regs->sr[0] = regs->iasq[1];
2172+ regs->iaoq[0] = 0x100UL;
2173+ regs->iaoq[1] = regs->iaoq[0] + 4;
2174+ regs->iasq[0] = regs->sr[2];
2175+ regs->iasq[1] = regs->sr[2];
2176+ return 2;
2177+ }
2178+ } while (0);
2179+#endif
2180+
2181+ return 1;
2182+}
2183+
2184+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2185+{
2186+ unsigned long i;
2187+
2188+ printk(KERN_ERR "PAX: bytes at PC: ");
2189+ for (i = 0; i < 5; i++) {
2190+ unsigned int c;
2191+ if (get_user(c, (unsigned int *)pc+i))
2192+ printk(KERN_CONT "???????? ");
2193+ else
2194+ printk(KERN_CONT "%08x ", c);
2195+ }
2196+ printk("\n");
2197+}
2198+#endif
2199+
2200 int fixup_exception(struct pt_regs *regs)
2201 {
2202 const struct exception_table_entry *fix;
2203@@ -192,8 +303,33 @@ good_area:
2204
2205 acc_type = parisc_acctyp(code,regs->iir);
2206
2207- if ((vma->vm_flags & acc_type) != acc_type)
2208+ if ((vma->vm_flags & acc_type) != acc_type) {
2209+
2210+#ifdef CONFIG_PAX_PAGEEXEC
2211+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2212+ (address & ~3UL) == instruction_pointer(regs))
2213+ {
2214+ up_read(&mm->mmap_sem);
2215+ switch (pax_handle_fetch_fault(regs)) {
2216+
2217+#ifdef CONFIG_PAX_EMUPLT
2218+ case 3:
2219+ return;
2220+#endif
2221+
2222+#ifdef CONFIG_PAX_EMUTRAMP
2223+ case 2:
2224+ return;
2225+#endif
2226+
2227+ }
2228+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2229+ do_group_exit(SIGKILL);
2230+ }
2231+#endif
2232+
2233 goto bad_area;
2234+ }
2235
2236 /*
2237 * If for any reason at all we couldn't handle the fault, make
2238diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
2239index 02e41b5..ec6e26c 100644
2240--- a/arch/powerpc/include/asm/atomic.h
2241+++ b/arch/powerpc/include/asm/atomic.h
2242@@ -469,6 +469,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2243
2244 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
2245
2246+#define atomic64_read_unchecked(v) atomic64_read(v)
2247+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2248+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2249+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2250+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2251+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2252+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2253+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2254+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2255+
2256 #endif /* __powerpc64__ */
2257
2258 #endif /* __KERNEL__ */
2259diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2260index 3bf9cca..e7457d0 100644
2261--- a/arch/powerpc/include/asm/elf.h
2262+++ b/arch/powerpc/include/asm/elf.h
2263@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2264 the loader. We need to make sure that it is out of the way of the program
2265 that it will "exec", and that there is sufficient room for the brk. */
2266
2267-extern unsigned long randomize_et_dyn(unsigned long base);
2268-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2269+#define ELF_ET_DYN_BASE (0x20000000)
2270+
2271+#ifdef CONFIG_PAX_ASLR
2272+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2273+
2274+#ifdef __powerpc64__
2275+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
2276+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
2277+#else
2278+#define PAX_DELTA_MMAP_LEN 15
2279+#define PAX_DELTA_STACK_LEN 15
2280+#endif
2281+#endif
2282
2283 /*
2284 * Our registers are always unsigned longs, whether we're a 32 bit
2285@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2286 (0x7ff >> (PAGE_SHIFT - 12)) : \
2287 (0x3ffff >> (PAGE_SHIFT - 12)))
2288
2289-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2290-#define arch_randomize_brk arch_randomize_brk
2291-
2292 #endif /* __KERNEL__ */
2293
2294 /*
2295diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2296index bca8fdc..61e9580 100644
2297--- a/arch/powerpc/include/asm/kmap_types.h
2298+++ b/arch/powerpc/include/asm/kmap_types.h
2299@@ -27,6 +27,7 @@ enum km_type {
2300 KM_PPC_SYNC_PAGE,
2301 KM_PPC_SYNC_ICACHE,
2302 KM_KDB,
2303+ KM_CLEARPAGE,
2304 KM_TYPE_NR
2305 };
2306
2307diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
2308index d4a7f64..451de1c 100644
2309--- a/arch/powerpc/include/asm/mman.h
2310+++ b/arch/powerpc/include/asm/mman.h
2311@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
2312 }
2313 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
2314
2315-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
2316+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
2317 {
2318 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
2319 }
2320diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2321index dd9c4fd..a2ced87 100644
2322--- a/arch/powerpc/include/asm/page.h
2323+++ b/arch/powerpc/include/asm/page.h
2324@@ -141,8 +141,9 @@ extern phys_addr_t kernstart_addr;
2325 * and needs to be executable. This means the whole heap ends
2326 * up being executable.
2327 */
2328-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2329- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2330+#define VM_DATA_DEFAULT_FLAGS32 \
2331+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2332+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2333
2334 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2335 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2336@@ -170,6 +171,9 @@ extern phys_addr_t kernstart_addr;
2337 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2338 #endif
2339
2340+#define ktla_ktva(addr) (addr)
2341+#define ktva_ktla(addr) (addr)
2342+
2343 /*
2344 * Use the top bit of the higher-level page table entries to indicate whether
2345 * the entries we point to contain hugepages. This works because we know that
2346diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2347index fb40ede..d3ce956 100644
2348--- a/arch/powerpc/include/asm/page_64.h
2349+++ b/arch/powerpc/include/asm/page_64.h
2350@@ -144,15 +144,18 @@ do { \
2351 * stack by default, so in the absence of a PT_GNU_STACK program header
2352 * we turn execute permission off.
2353 */
2354-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2355- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2356+#define VM_STACK_DEFAULT_FLAGS32 \
2357+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2358+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2359
2360 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2361 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2362
2363+#ifndef CONFIG_PAX_PAGEEXEC
2364 #define VM_STACK_DEFAULT_FLAGS \
2365 (is_32bit_task() ? \
2366 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2367+#endif
2368
2369 #include <asm-generic/getorder.h>
2370
2371diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2372index 88b0bd9..e32bc67 100644
2373--- a/arch/powerpc/include/asm/pgtable.h
2374+++ b/arch/powerpc/include/asm/pgtable.h
2375@@ -2,6 +2,7 @@
2376 #define _ASM_POWERPC_PGTABLE_H
2377 #ifdef __KERNEL__
2378
2379+#include <linux/const.h>
2380 #ifndef __ASSEMBLY__
2381 #include <asm/processor.h> /* For TASK_SIZE */
2382 #include <asm/mmu.h>
2383diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2384index 4aad413..85d86bf 100644
2385--- a/arch/powerpc/include/asm/pte-hash32.h
2386+++ b/arch/powerpc/include/asm/pte-hash32.h
2387@@ -21,6 +21,7 @@
2388 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2389 #define _PAGE_USER 0x004 /* usermode access allowed */
2390 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2391+#define _PAGE_EXEC _PAGE_GUARDED
2392 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2393 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2394 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2395diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2396index 559da19..7e5835c 100644
2397--- a/arch/powerpc/include/asm/reg.h
2398+++ b/arch/powerpc/include/asm/reg.h
2399@@ -212,6 +212,7 @@
2400 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2401 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2402 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2403+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2404 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2405 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2406 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2407diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2408index e30a13d..2b7d994 100644
2409--- a/arch/powerpc/include/asm/system.h
2410+++ b/arch/powerpc/include/asm/system.h
2411@@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2412 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2413 #endif
2414
2415-extern unsigned long arch_align_stack(unsigned long sp);
2416+#define arch_align_stack(x) ((x) & ~0xfUL)
2417
2418 /* Used in very early kernel initialization. */
2419 extern unsigned long reloc_offset(void);
2420diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2421index bd0fb84..a42a14b 100644
2422--- a/arch/powerpc/include/asm/uaccess.h
2423+++ b/arch/powerpc/include/asm/uaccess.h
2424@@ -13,6 +13,8 @@
2425 #define VERIFY_READ 0
2426 #define VERIFY_WRITE 1
2427
2428+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2429+
2430 /*
2431 * The fs value determines whether argument validity checking should be
2432 * performed or not. If get_fs() == USER_DS, checking is performed, with
2433@@ -327,52 +329,6 @@ do { \
2434 extern unsigned long __copy_tofrom_user(void __user *to,
2435 const void __user *from, unsigned long size);
2436
2437-#ifndef __powerpc64__
2438-
2439-static inline unsigned long copy_from_user(void *to,
2440- const void __user *from, unsigned long n)
2441-{
2442- unsigned long over;
2443-
2444- if (access_ok(VERIFY_READ, from, n))
2445- return __copy_tofrom_user((__force void __user *)to, from, n);
2446- if ((unsigned long)from < TASK_SIZE) {
2447- over = (unsigned long)from + n - TASK_SIZE;
2448- return __copy_tofrom_user((__force void __user *)to, from,
2449- n - over) + over;
2450- }
2451- return n;
2452-}
2453-
2454-static inline unsigned long copy_to_user(void __user *to,
2455- const void *from, unsigned long n)
2456-{
2457- unsigned long over;
2458-
2459- if (access_ok(VERIFY_WRITE, to, n))
2460- return __copy_tofrom_user(to, (__force void __user *)from, n);
2461- if ((unsigned long)to < TASK_SIZE) {
2462- over = (unsigned long)to + n - TASK_SIZE;
2463- return __copy_tofrom_user(to, (__force void __user *)from,
2464- n - over) + over;
2465- }
2466- return n;
2467-}
2468-
2469-#else /* __powerpc64__ */
2470-
2471-#define __copy_in_user(to, from, size) \
2472- __copy_tofrom_user((to), (from), (size))
2473-
2474-extern unsigned long copy_from_user(void *to, const void __user *from,
2475- unsigned long n);
2476-extern unsigned long copy_to_user(void __user *to, const void *from,
2477- unsigned long n);
2478-extern unsigned long copy_in_user(void __user *to, const void __user *from,
2479- unsigned long n);
2480-
2481-#endif /* __powerpc64__ */
2482-
2483 static inline unsigned long __copy_from_user_inatomic(void *to,
2484 const void __user *from, unsigned long n)
2485 {
2486@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2487 if (ret == 0)
2488 return 0;
2489 }
2490+
2491+ if (!__builtin_constant_p(n))
2492+ check_object_size(to, n, false);
2493+
2494 return __copy_tofrom_user((__force void __user *)to, from, n);
2495 }
2496
2497@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2498 if (ret == 0)
2499 return 0;
2500 }
2501+
2502+ if (!__builtin_constant_p(n))
2503+ check_object_size(from, n, true);
2504+
2505 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2506 }
2507
2508@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2509 return __copy_to_user_inatomic(to, from, size);
2510 }
2511
2512+#ifndef __powerpc64__
2513+
2514+static inline unsigned long __must_check copy_from_user(void *to,
2515+ const void __user *from, unsigned long n)
2516+{
2517+ unsigned long over;
2518+
2519+ if ((long)n < 0)
2520+ return n;
2521+
2522+ if (access_ok(VERIFY_READ, from, n)) {
2523+ if (!__builtin_constant_p(n))
2524+ check_object_size(to, n, false);
2525+ return __copy_tofrom_user((__force void __user *)to, from, n);
2526+ }
2527+ if ((unsigned long)from < TASK_SIZE) {
2528+ over = (unsigned long)from + n - TASK_SIZE;
2529+ if (!__builtin_constant_p(n - over))
2530+ check_object_size(to, n - over, false);
2531+ return __copy_tofrom_user((__force void __user *)to, from,
2532+ n - over) + over;
2533+ }
2534+ return n;
2535+}
2536+
2537+static inline unsigned long __must_check copy_to_user(void __user *to,
2538+ const void *from, unsigned long n)
2539+{
2540+ unsigned long over;
2541+
2542+ if ((long)n < 0)
2543+ return n;
2544+
2545+ if (access_ok(VERIFY_WRITE, to, n)) {
2546+ if (!__builtin_constant_p(n))
2547+ check_object_size(from, n, true);
2548+ return __copy_tofrom_user(to, (__force void __user *)from, n);
2549+ }
2550+ if ((unsigned long)to < TASK_SIZE) {
2551+ over = (unsigned long)to + n - TASK_SIZE;
2552+ if (!__builtin_constant_p(n))
2553+ check_object_size(from, n - over, true);
2554+ return __copy_tofrom_user(to, (__force void __user *)from,
2555+ n - over) + over;
2556+ }
2557+ return n;
2558+}
2559+
2560+#else /* __powerpc64__ */
2561+
2562+#define __copy_in_user(to, from, size) \
2563+ __copy_tofrom_user((to), (from), (size))
2564+
2565+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2566+{
2567+ if ((long)n < 0 || n > INT_MAX)
2568+ return n;
2569+
2570+ if (!__builtin_constant_p(n))
2571+ check_object_size(to, n, false);
2572+
2573+ if (likely(access_ok(VERIFY_READ, from, n)))
2574+ n = __copy_from_user(to, from, n);
2575+ else
2576+ memset(to, 0, n);
2577+ return n;
2578+}
2579+
2580+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2581+{
2582+ if ((long)n < 0 || n > INT_MAX)
2583+ return n;
2584+
2585+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
2586+ if (!__builtin_constant_p(n))
2587+ check_object_size(from, n, true);
2588+ n = __copy_to_user(to, from, n);
2589+ }
2590+ return n;
2591+}
2592+
2593+extern unsigned long copy_in_user(void __user *to, const void __user *from,
2594+ unsigned long n);
2595+
2596+#endif /* __powerpc64__ */
2597+
2598 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2599
2600 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2601diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
2602index 429983c..7af363b 100644
2603--- a/arch/powerpc/kernel/exceptions-64e.S
2604+++ b/arch/powerpc/kernel/exceptions-64e.S
2605@@ -587,6 +587,7 @@ storage_fault_common:
2606 std r14,_DAR(r1)
2607 std r15,_DSISR(r1)
2608 addi r3,r1,STACK_FRAME_OVERHEAD
2609+ bl .save_nvgprs
2610 mr r4,r14
2611 mr r5,r15
2612 ld r14,PACA_EXGEN+EX_R14(r13)
2613@@ -596,8 +597,7 @@ storage_fault_common:
2614 cmpdi r3,0
2615 bne- 1f
2616 b .ret_from_except_lite
2617-1: bl .save_nvgprs
2618- mr r5,r3
2619+1: mr r5,r3
2620 addi r3,r1,STACK_FRAME_OVERHEAD
2621 ld r4,_DAR(r1)
2622 bl .bad_page_fault
2623diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
2624index cf9c69b..ebc9640 100644
2625--- a/arch/powerpc/kernel/exceptions-64s.S
2626+++ b/arch/powerpc/kernel/exceptions-64s.S
2627@@ -1004,10 +1004,10 @@ handle_page_fault:
2628 11: ld r4,_DAR(r1)
2629 ld r5,_DSISR(r1)
2630 addi r3,r1,STACK_FRAME_OVERHEAD
2631+ bl .save_nvgprs
2632 bl .do_page_fault
2633 cmpdi r3,0
2634 beq+ 13f
2635- bl .save_nvgprs
2636 mr r5,r3
2637 addi r3,r1,STACK_FRAME_OVERHEAD
2638 lwz r4,_DAR(r1)
2639diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
2640index 0b6d796..d760ddb 100644
2641--- a/arch/powerpc/kernel/module_32.c
2642+++ b/arch/powerpc/kernel/module_32.c
2643@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
2644 me->arch.core_plt_section = i;
2645 }
2646 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2647- printk("Module doesn't contain .plt or .init.plt sections.\n");
2648+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2649 return -ENOEXEC;
2650 }
2651
2652@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
2653
2654 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2655 /* Init, or core PLT? */
2656- if (location >= mod->module_core
2657- && location < mod->module_core + mod->core_size)
2658+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2659+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2660 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2661- else
2662+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2663+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2664 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2665+ else {
2666+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2667+ return ~0UL;
2668+ }
2669
2670 /* Find this entry, or if that fails, the next avail. entry */
2671 while (entry->jump[0]) {
2672diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
2673index 6457574..08b28d3 100644
2674--- a/arch/powerpc/kernel/process.c
2675+++ b/arch/powerpc/kernel/process.c
2676@@ -660,8 +660,8 @@ void show_regs(struct pt_regs * regs)
2677 * Lookup NIP late so we have the best change of getting the
2678 * above info out without failing
2679 */
2680- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2681- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2682+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2683+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2684 #endif
2685 show_stack(current, (unsigned long *) regs->gpr[1]);
2686 if (!user_mode(regs))
2687@@ -1165,10 +1165,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2688 newsp = stack[0];
2689 ip = stack[STACK_FRAME_LR_SAVE];
2690 if (!firstframe || ip != lr) {
2691- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2692+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2693 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2694 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2695- printk(" (%pS)",
2696+ printk(" (%pA)",
2697 (void *)current->ret_stack[curr_frame].ret);
2698 curr_frame--;
2699 }
2700@@ -1188,7 +1188,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2701 struct pt_regs *regs = (struct pt_regs *)
2702 (sp + STACK_FRAME_OVERHEAD);
2703 lr = regs->link;
2704- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2705+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2706 regs->trap, (void *)regs->nip, (void *)lr);
2707 firstframe = 1;
2708 }
2709@@ -1263,58 +1263,3 @@ void thread_info_cache_init(void)
2710 }
2711
2712 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2713-
2714-unsigned long arch_align_stack(unsigned long sp)
2715-{
2716- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2717- sp -= get_random_int() & ~PAGE_MASK;
2718- return sp & ~0xf;
2719-}
2720-
2721-static inline unsigned long brk_rnd(void)
2722-{
2723- unsigned long rnd = 0;
2724-
2725- /* 8MB for 32bit, 1GB for 64bit */
2726- if (is_32bit_task())
2727- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2728- else
2729- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2730-
2731- return rnd << PAGE_SHIFT;
2732-}
2733-
2734-unsigned long arch_randomize_brk(struct mm_struct *mm)
2735-{
2736- unsigned long base = mm->brk;
2737- unsigned long ret;
2738-
2739-#ifdef CONFIG_PPC_STD_MMU_64
2740- /*
2741- * If we are using 1TB segments and we are allowed to randomise
2742- * the heap, we can put it above 1TB so it is backed by a 1TB
2743- * segment. Otherwise the heap will be in the bottom 1TB
2744- * which always uses 256MB segments and this may result in a
2745- * performance penalty.
2746- */
2747- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2748- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2749-#endif
2750-
2751- ret = PAGE_ALIGN(base + brk_rnd());
2752-
2753- if (ret < mm->brk)
2754- return mm->brk;
2755-
2756- return ret;
2757-}
2758-
2759-unsigned long randomize_et_dyn(unsigned long base)
2760-{
2761- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2762-
2763- if (ret < base)
2764- return base;
2765-
2766- return ret;
2767-}
2768diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
2769index 836a5a1..27289a3 100644
2770--- a/arch/powerpc/kernel/signal_32.c
2771+++ b/arch/powerpc/kernel/signal_32.c
2772@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
2773 /* Save user registers on the stack */
2774 frame = &rt_sf->uc.uc_mcontext;
2775 addr = frame;
2776- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2777+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2778 if (save_user_regs(regs, frame, 0, 1))
2779 goto badframe;
2780 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2781diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
2782index a50b5ec..547078a 100644
2783--- a/arch/powerpc/kernel/signal_64.c
2784+++ b/arch/powerpc/kernel/signal_64.c
2785@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
2786 current->thread.fpscr.val = 0;
2787
2788 /* Set up to return from userspace. */
2789- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2790+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2791 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2792 } else {
2793 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2794diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
2795index 5459d14..10f8070 100644
2796--- a/arch/powerpc/kernel/traps.c
2797+++ b/arch/powerpc/kernel/traps.c
2798@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2799 static inline void pmac_backlight_unblank(void) { }
2800 #endif
2801
2802+extern void gr_handle_kernel_exploit(void);
2803+
2804 int die(const char *str, struct pt_regs *regs, long err)
2805 {
2806 static struct {
2807@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs *regs, long err)
2808 if (panic_on_oops)
2809 panic("Fatal exception");
2810
2811+ gr_handle_kernel_exploit();
2812+
2813 oops_exit();
2814 do_exit(err);
2815
2816diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
2817index 7d14bb6..1305601 100644
2818--- a/arch/powerpc/kernel/vdso.c
2819+++ b/arch/powerpc/kernel/vdso.c
2820@@ -35,6 +35,7 @@
2821 #include <asm/firmware.h>
2822 #include <asm/vdso.h>
2823 #include <asm/vdso_datapage.h>
2824+#include <asm/mman.h>
2825
2826 #include "setup.h"
2827
2828@@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2829 vdso_base = VDSO32_MBASE;
2830 #endif
2831
2832- current->mm->context.vdso_base = 0;
2833+ current->mm->context.vdso_base = ~0UL;
2834
2835 /* vDSO has a problem and was disabled, just don't "enable" it for the
2836 * process
2837@@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2838 vdso_base = get_unmapped_area(NULL, vdso_base,
2839 (vdso_pages << PAGE_SHIFT) +
2840 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2841- 0, 0);
2842+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2843 if (IS_ERR_VALUE(vdso_base)) {
2844 rc = vdso_base;
2845 goto fail_mmapsem;
2846diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
2847index 5eea6f3..5d10396 100644
2848--- a/arch/powerpc/lib/usercopy_64.c
2849+++ b/arch/powerpc/lib/usercopy_64.c
2850@@ -9,22 +9,6 @@
2851 #include <linux/module.h>
2852 #include <asm/uaccess.h>
2853
2854-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2855-{
2856- if (likely(access_ok(VERIFY_READ, from, n)))
2857- n = __copy_from_user(to, from, n);
2858- else
2859- memset(to, 0, n);
2860- return n;
2861-}
2862-
2863-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2864-{
2865- if (likely(access_ok(VERIFY_WRITE, to, n)))
2866- n = __copy_to_user(to, from, n);
2867- return n;
2868-}
2869-
2870 unsigned long copy_in_user(void __user *to, const void __user *from,
2871 unsigned long n)
2872 {
2873@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
2874 return n;
2875 }
2876
2877-EXPORT_SYMBOL(copy_from_user);
2878-EXPORT_SYMBOL(copy_to_user);
2879 EXPORT_SYMBOL(copy_in_user);
2880
2881diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
2882index 5efe8c9..db9ceef 100644
2883--- a/arch/powerpc/mm/fault.c
2884+++ b/arch/powerpc/mm/fault.c
2885@@ -32,6 +32,10 @@
2886 #include <linux/perf_event.h>
2887 #include <linux/magic.h>
2888 #include <linux/ratelimit.h>
2889+#include <linux/slab.h>
2890+#include <linux/pagemap.h>
2891+#include <linux/compiler.h>
2892+#include <linux/unistd.h>
2893
2894 #include <asm/firmware.h>
2895 #include <asm/page.h>
2896@@ -43,6 +47,7 @@
2897 #include <asm/tlbflush.h>
2898 #include <asm/siginfo.h>
2899 #include <mm/mmu_decl.h>
2900+#include <asm/ptrace.h>
2901
2902 #ifdef CONFIG_KPROBES
2903 static inline int notify_page_fault(struct pt_regs *regs)
2904@@ -66,6 +71,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
2905 }
2906 #endif
2907
2908+#ifdef CONFIG_PAX_PAGEEXEC
2909+/*
2910+ * PaX: decide what to do with offenders (regs->nip = fault address)
2911+ *
2912+ * returns 1 when task should be killed
2913+ */
2914+static int pax_handle_fetch_fault(struct pt_regs *regs)
2915+{
2916+ return 1;
2917+}
2918+
2919+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2920+{
2921+ unsigned long i;
2922+
2923+ printk(KERN_ERR "PAX: bytes at PC: ");
2924+ for (i = 0; i < 5; i++) {
2925+ unsigned int c;
2926+ if (get_user(c, (unsigned int __user *)pc+i))
2927+ printk(KERN_CONT "???????? ");
2928+ else
2929+ printk(KERN_CONT "%08x ", c);
2930+ }
2931+ printk("\n");
2932+}
2933+#endif
2934+
2935 /*
2936 * Check whether the instruction at regs->nip is a store using
2937 * an update addressing form which will update r1.
2938@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
2939 * indicate errors in DSISR but can validly be set in SRR1.
2940 */
2941 if (trap == 0x400)
2942- error_code &= 0x48200000;
2943+ error_code &= 0x58200000;
2944 else
2945 is_write = error_code & DSISR_ISSTORE;
2946 #else
2947@@ -259,7 +291,7 @@ good_area:
2948 * "undefined". Of those that can be set, this is the only
2949 * one which seems bad.
2950 */
2951- if (error_code & 0x10000000)
2952+ if (error_code & DSISR_GUARDED)
2953 /* Guarded storage error. */
2954 goto bad_area;
2955 #endif /* CONFIG_8xx */
2956@@ -274,7 +306,7 @@ good_area:
2957 * processors use the same I/D cache coherency mechanism
2958 * as embedded.
2959 */
2960- if (error_code & DSISR_PROTFAULT)
2961+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2962 goto bad_area;
2963 #endif /* CONFIG_PPC_STD_MMU */
2964
2965@@ -343,6 +375,23 @@ bad_area:
2966 bad_area_nosemaphore:
2967 /* User mode accesses cause a SIGSEGV */
2968 if (user_mode(regs)) {
2969+
2970+#ifdef CONFIG_PAX_PAGEEXEC
2971+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2972+#ifdef CONFIG_PPC_STD_MMU
2973+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2974+#else
2975+ if (is_exec && regs->nip == address) {
2976+#endif
2977+ switch (pax_handle_fetch_fault(regs)) {
2978+ }
2979+
2980+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2981+ do_group_exit(SIGKILL);
2982+ }
2983+ }
2984+#endif
2985+
2986 _exception(SIGSEGV, regs, code, address);
2987 return 0;
2988 }
2989diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
2990index 5a783d8..c23e14b 100644
2991--- a/arch/powerpc/mm/mmap_64.c
2992+++ b/arch/powerpc/mm/mmap_64.c
2993@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2994 */
2995 if (mmap_is_legacy()) {
2996 mm->mmap_base = TASK_UNMAPPED_BASE;
2997+
2998+#ifdef CONFIG_PAX_RANDMMAP
2999+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3000+ mm->mmap_base += mm->delta_mmap;
3001+#endif
3002+
3003 mm->get_unmapped_area = arch_get_unmapped_area;
3004 mm->unmap_area = arch_unmap_area;
3005 } else {
3006 mm->mmap_base = mmap_base();
3007+
3008+#ifdef CONFIG_PAX_RANDMMAP
3009+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3010+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3011+#endif
3012+
3013 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3014 mm->unmap_area = arch_unmap_area_topdown;
3015 }
3016diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
3017index 73709f7..6b90313 100644
3018--- a/arch/powerpc/mm/slice.c
3019+++ b/arch/powerpc/mm/slice.c
3020@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
3021 if ((mm->task_size - len) < addr)
3022 return 0;
3023 vma = find_vma(mm, addr);
3024- return (!vma || (addr + len) <= vma->vm_start);
3025+ return check_heap_stack_gap(vma, addr, len);
3026 }
3027
3028 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3029@@ -256,7 +256,7 @@ full_search:
3030 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3031 continue;
3032 }
3033- if (!vma || addr + len <= vma->vm_start) {
3034+ if (check_heap_stack_gap(vma, addr, len)) {
3035 /*
3036 * Remember the place where we stopped the search:
3037 */
3038@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3039 }
3040 }
3041
3042- addr = mm->mmap_base;
3043- while (addr > len) {
3044+ if (mm->mmap_base < len)
3045+ addr = -ENOMEM;
3046+ else
3047+ addr = mm->mmap_base - len;
3048+
3049+ while (!IS_ERR_VALUE(addr)) {
3050 /* Go down by chunk size */
3051- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3052+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
3053
3054 /* Check for hit with different page size */
3055 mask = slice_range_to_mask(addr, len);
3056@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3057 * return with success:
3058 */
3059 vma = find_vma(mm, addr);
3060- if (!vma || (addr + len) <= vma->vm_start) {
3061+ if (check_heap_stack_gap(vma, addr, len)) {
3062 /* remember the address as a hint for next time */
3063 if (use_cache)
3064 mm->free_area_cache = addr;
3065@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3066 mm->cached_hole_size = vma->vm_start - addr;
3067
3068 /* try just below the current vma->vm_start */
3069- addr = vma->vm_start;
3070+ addr = skip_heap_stack_gap(vma, len);
3071 }
3072
3073 /*
3074@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
3075 if (fixed && addr > (mm->task_size - len))
3076 return -EINVAL;
3077
3078+#ifdef CONFIG_PAX_RANDMMAP
3079+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3080+ addr = 0;
3081+#endif
3082+
3083 /* If hint, make sure it matches our alignment restrictions */
3084 if (!fixed && addr) {
3085 addr = _ALIGN_UP(addr, 1ul << pshift);
3086diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
3087index 8517d2a..d2738d4 100644
3088--- a/arch/s390/include/asm/atomic.h
3089+++ b/arch/s390/include/asm/atomic.h
3090@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
3091 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
3092 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3093
3094+#define atomic64_read_unchecked(v) atomic64_read(v)
3095+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3096+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3097+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3098+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3099+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3100+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3101+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3102+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3103+
3104 #define smp_mb__before_atomic_dec() smp_mb()
3105 #define smp_mb__after_atomic_dec() smp_mb()
3106 #define smp_mb__before_atomic_inc() smp_mb()
3107diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
3108index 547f1a6..0b22b53 100644
3109--- a/arch/s390/include/asm/elf.h
3110+++ b/arch/s390/include/asm/elf.h
3111@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
3112 the loader. We need to make sure that it is out of the way of the program
3113 that it will "exec", and that there is sufficient room for the brk. */
3114
3115-extern unsigned long randomize_et_dyn(unsigned long base);
3116-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
3117+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3118+
3119+#ifdef CONFIG_PAX_ASLR
3120+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3121+
3122+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
3123+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
3124+#endif
3125
3126 /* This yields a mask that user programs can use to figure out what
3127 instruction set this CPU supports. */
3128@@ -211,7 +217,4 @@ struct linux_binprm;
3129 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
3130 int arch_setup_additional_pages(struct linux_binprm *, int);
3131
3132-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3133-#define arch_randomize_brk arch_randomize_brk
3134-
3135 #endif
3136diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
3137index ef573c1..75a1ce6 100644
3138--- a/arch/s390/include/asm/system.h
3139+++ b/arch/s390/include/asm/system.h
3140@@ -262,7 +262,7 @@ extern void (*_machine_restart)(char *command);
3141 extern void (*_machine_halt)(void);
3142 extern void (*_machine_power_off)(void);
3143
3144-extern unsigned long arch_align_stack(unsigned long sp);
3145+#define arch_align_stack(x) ((x) & ~0xfUL)
3146
3147 static inline int tprot(unsigned long addr)
3148 {
3149diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
3150index 2b23885..e136e31 100644
3151--- a/arch/s390/include/asm/uaccess.h
3152+++ b/arch/s390/include/asm/uaccess.h
3153@@ -235,6 +235,10 @@ static inline unsigned long __must_check
3154 copy_to_user(void __user *to, const void *from, unsigned long n)
3155 {
3156 might_fault();
3157+
3158+ if ((long)n < 0)
3159+ return n;
3160+
3161 if (access_ok(VERIFY_WRITE, to, n))
3162 n = __copy_to_user(to, from, n);
3163 return n;
3164@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
3165 static inline unsigned long __must_check
3166 __copy_from_user(void *to, const void __user *from, unsigned long n)
3167 {
3168+ if ((long)n < 0)
3169+ return n;
3170+
3171 if (__builtin_constant_p(n) && (n <= 256))
3172 return uaccess.copy_from_user_small(n, from, to);
3173 else
3174@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
3175 unsigned int sz = __compiletime_object_size(to);
3176
3177 might_fault();
3178+
3179+ if ((long)n < 0)
3180+ return n;
3181+
3182 if (unlikely(sz != -1 && sz < n)) {
3183 copy_from_user_overflow();
3184 return n;
3185diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
3186index dfcb343..eda788a 100644
3187--- a/arch/s390/kernel/module.c
3188+++ b/arch/s390/kernel/module.c
3189@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
3190
3191 /* Increase core size by size of got & plt and set start
3192 offsets for got and plt. */
3193- me->core_size = ALIGN(me->core_size, 4);
3194- me->arch.got_offset = me->core_size;
3195- me->core_size += me->arch.got_size;
3196- me->arch.plt_offset = me->core_size;
3197- me->core_size += me->arch.plt_size;
3198+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
3199+ me->arch.got_offset = me->core_size_rw;
3200+ me->core_size_rw += me->arch.got_size;
3201+ me->arch.plt_offset = me->core_size_rx;
3202+ me->core_size_rx += me->arch.plt_size;
3203 return 0;
3204 }
3205
3206@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3207 if (info->got_initialized == 0) {
3208 Elf_Addr *gotent;
3209
3210- gotent = me->module_core + me->arch.got_offset +
3211+ gotent = me->module_core_rw + me->arch.got_offset +
3212 info->got_offset;
3213 *gotent = val;
3214 info->got_initialized = 1;
3215@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3216 else if (r_type == R_390_GOTENT ||
3217 r_type == R_390_GOTPLTENT)
3218 *(unsigned int *) loc =
3219- (val + (Elf_Addr) me->module_core - loc) >> 1;
3220+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3221 else if (r_type == R_390_GOT64 ||
3222 r_type == R_390_GOTPLT64)
3223 *(unsigned long *) loc = val;
3224@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3225 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3226 if (info->plt_initialized == 0) {
3227 unsigned int *ip;
3228- ip = me->module_core + me->arch.plt_offset +
3229+ ip = me->module_core_rx + me->arch.plt_offset +
3230 info->plt_offset;
3231 #ifndef CONFIG_64BIT
3232 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3233@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3234 val - loc + 0xffffUL < 0x1ffffeUL) ||
3235 (r_type == R_390_PLT32DBL &&
3236 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3237- val = (Elf_Addr) me->module_core +
3238+ val = (Elf_Addr) me->module_core_rx +
3239 me->arch.plt_offset +
3240 info->plt_offset;
3241 val += rela->r_addend - loc;
3242@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3243 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3244 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3245 val = val + rela->r_addend -
3246- ((Elf_Addr) me->module_core + me->arch.got_offset);
3247+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3248 if (r_type == R_390_GOTOFF16)
3249 *(unsigned short *) loc = val;
3250 else if (r_type == R_390_GOTOFF32)
3251@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3252 break;
3253 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3254 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3255- val = (Elf_Addr) me->module_core + me->arch.got_offset +
3256+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3257 rela->r_addend - loc;
3258 if (r_type == R_390_GOTPC)
3259 *(unsigned int *) loc = val;
3260diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
3261index 9451b21..ed8956f 100644
3262--- a/arch/s390/kernel/process.c
3263+++ b/arch/s390/kernel/process.c
3264@@ -321,39 +321,3 @@ unsigned long get_wchan(struct task_struct *p)
3265 }
3266 return 0;
3267 }
3268-
3269-unsigned long arch_align_stack(unsigned long sp)
3270-{
3271- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3272- sp -= get_random_int() & ~PAGE_MASK;
3273- return sp & ~0xf;
3274-}
3275-
3276-static inline unsigned long brk_rnd(void)
3277-{
3278- /* 8MB for 32bit, 1GB for 64bit */
3279- if (is_32bit_task())
3280- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
3281- else
3282- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
3283-}
3284-
3285-unsigned long arch_randomize_brk(struct mm_struct *mm)
3286-{
3287- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
3288-
3289- if (ret < mm->brk)
3290- return mm->brk;
3291- return ret;
3292-}
3293-
3294-unsigned long randomize_et_dyn(unsigned long base)
3295-{
3296- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3297-
3298- if (!(current->flags & PF_RANDOMIZE))
3299- return base;
3300- if (ret < base)
3301- return base;
3302- return ret;
3303-}
3304diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
3305index f09c748..cf9ec1d 100644
3306--- a/arch/s390/mm/mmap.c
3307+++ b/arch/s390/mm/mmap.c
3308@@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3309 */
3310 if (mmap_is_legacy()) {
3311 mm->mmap_base = TASK_UNMAPPED_BASE;
3312+
3313+#ifdef CONFIG_PAX_RANDMMAP
3314+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3315+ mm->mmap_base += mm->delta_mmap;
3316+#endif
3317+
3318 mm->get_unmapped_area = arch_get_unmapped_area;
3319 mm->unmap_area = arch_unmap_area;
3320 } else {
3321 mm->mmap_base = mmap_base();
3322+
3323+#ifdef CONFIG_PAX_RANDMMAP
3324+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3325+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3326+#endif
3327+
3328 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3329 mm->unmap_area = arch_unmap_area_topdown;
3330 }
3331@@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3332 */
3333 if (mmap_is_legacy()) {
3334 mm->mmap_base = TASK_UNMAPPED_BASE;
3335+
3336+#ifdef CONFIG_PAX_RANDMMAP
3337+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3338+ mm->mmap_base += mm->delta_mmap;
3339+#endif
3340+
3341 mm->get_unmapped_area = s390_get_unmapped_area;
3342 mm->unmap_area = arch_unmap_area;
3343 } else {
3344 mm->mmap_base = mmap_base();
3345+
3346+#ifdef CONFIG_PAX_RANDMMAP
3347+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3348+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3349+#endif
3350+
3351 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3352 mm->unmap_area = arch_unmap_area_topdown;
3353 }
3354diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
3355index 589d5c7..669e274 100644
3356--- a/arch/score/include/asm/system.h
3357+++ b/arch/score/include/asm/system.h
3358@@ -17,7 +17,7 @@ do { \
3359 #define finish_arch_switch(prev) do {} while (0)
3360
3361 typedef void (*vi_handler_t)(void);
3362-extern unsigned long arch_align_stack(unsigned long sp);
3363+#define arch_align_stack(x) (x)
3364
3365 #define mb() barrier()
3366 #define rmb() barrier()
3367diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
3368index 25d0803..d6c8e36 100644
3369--- a/arch/score/kernel/process.c
3370+++ b/arch/score/kernel/process.c
3371@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
3372
3373 return task_pt_regs(task)->cp0_epc;
3374 }
3375-
3376-unsigned long arch_align_stack(unsigned long sp)
3377-{
3378- return sp;
3379-}
3380diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
3381index afeb710..d1d1289 100644
3382--- a/arch/sh/mm/mmap.c
3383+++ b/arch/sh/mm/mmap.c
3384@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3385 addr = PAGE_ALIGN(addr);
3386
3387 vma = find_vma(mm, addr);
3388- if (TASK_SIZE - len >= addr &&
3389- (!vma || addr + len <= vma->vm_start))
3390+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3391 return addr;
3392 }
3393
3394@@ -106,7 +105,7 @@ full_search:
3395 }
3396 return -ENOMEM;
3397 }
3398- if (likely(!vma || addr + len <= vma->vm_start)) {
3399+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3400 /*
3401 * Remember the place where we stopped the search:
3402 */
3403@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3404 addr = PAGE_ALIGN(addr);
3405
3406 vma = find_vma(mm, addr);
3407- if (TASK_SIZE - len >= addr &&
3408- (!vma || addr + len <= vma->vm_start))
3409+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3410 return addr;
3411 }
3412
3413@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3414 /* make sure it can fit in the remaining address space */
3415 if (likely(addr > len)) {
3416 vma = find_vma(mm, addr-len);
3417- if (!vma || addr <= vma->vm_start) {
3418+ if (check_heap_stack_gap(vma, addr - len, len)) {
3419 /* remember the address as a hint for next time */
3420 return (mm->free_area_cache = addr-len);
3421 }
3422@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3423 if (unlikely(mm->mmap_base < len))
3424 goto bottomup;
3425
3426- addr = mm->mmap_base-len;
3427- if (do_colour_align)
3428- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3429+ addr = mm->mmap_base - len;
3430
3431 do {
3432+ if (do_colour_align)
3433+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3434 /*
3435 * Lookup failure means no vma is above this address,
3436 * else if new region fits below vma->vm_start,
3437 * return with success:
3438 */
3439 vma = find_vma(mm, addr);
3440- if (likely(!vma || addr+len <= vma->vm_start)) {
3441+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3442 /* remember the address as a hint for next time */
3443 return (mm->free_area_cache = addr);
3444 }
3445@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3446 mm->cached_hole_size = vma->vm_start - addr;
3447
3448 /* try just below the current vma->vm_start */
3449- addr = vma->vm_start-len;
3450- if (do_colour_align)
3451- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3452- } while (likely(len < vma->vm_start));
3453+ addr = skip_heap_stack_gap(vma, len);
3454+ } while (!IS_ERR_VALUE(addr));
3455
3456 bottomup:
3457 /*
3458diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
3459index f92602e..27060b2 100644
3460--- a/arch/sparc/Kconfig
3461+++ b/arch/sparc/Kconfig
3462@@ -31,6 +31,7 @@ config SPARC
3463
3464 config SPARC32
3465 def_bool !64BIT
3466+ select GENERIC_ATOMIC64
3467
3468 config SPARC64
3469 def_bool 64BIT
3470diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
3471index ad1fb5d..fc5315b 100644
3472--- a/arch/sparc/Makefile
3473+++ b/arch/sparc/Makefile
3474@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
3475 # Export what is needed by arch/sparc/boot/Makefile
3476 export VMLINUX_INIT VMLINUX_MAIN
3477 VMLINUX_INIT := $(head-y) $(init-y)
3478-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
3479+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
3480 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
3481 VMLINUX_MAIN += $(drivers-y) $(net-y)
3482
3483diff --git a/arch/sparc/include/asm/atomic.h b/arch/sparc/include/asm/atomic.h
3484index 8ff83d8..4a459c2 100644
3485--- a/arch/sparc/include/asm/atomic.h
3486+++ b/arch/sparc/include/asm/atomic.h
3487@@ -4,5 +4,6 @@
3488 #include <asm/atomic_64.h>
3489 #else
3490 #include <asm/atomic_32.h>
3491+#include <asm-generic/atomic64.h>
3492 #endif
3493 #endif
3494diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
3495index 9f421df..b81fc12 100644
3496--- a/arch/sparc/include/asm/atomic_64.h
3497+++ b/arch/sparc/include/asm/atomic_64.h
3498@@ -14,18 +14,40 @@
3499 #define ATOMIC64_INIT(i) { (i) }
3500
3501 #define atomic_read(v) (*(volatile int *)&(v)->counter)
3502+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3503+{
3504+ return v->counter;
3505+}
3506 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
3507+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3508+{
3509+ return v->counter;
3510+}
3511
3512 #define atomic_set(v, i) (((v)->counter) = i)
3513+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3514+{
3515+ v->counter = i;
3516+}
3517 #define atomic64_set(v, i) (((v)->counter) = i)
3518+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3519+{
3520+ v->counter = i;
3521+}
3522
3523 extern void atomic_add(int, atomic_t *);
3524+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3525 extern void atomic64_add(long, atomic64_t *);
3526+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3527 extern void atomic_sub(int, atomic_t *);
3528+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3529 extern void atomic64_sub(long, atomic64_t *);
3530+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3531
3532 extern int atomic_add_ret(int, atomic_t *);
3533+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3534 extern long atomic64_add_ret(long, atomic64_t *);
3535+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3536 extern int atomic_sub_ret(int, atomic_t *);
3537 extern long atomic64_sub_ret(long, atomic64_t *);
3538
3539@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3540 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3541
3542 #define atomic_inc_return(v) atomic_add_ret(1, v)
3543+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3544+{
3545+ return atomic_add_ret_unchecked(1, v);
3546+}
3547 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3548+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3549+{
3550+ return atomic64_add_ret_unchecked(1, v);
3551+}
3552
3553 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3554 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3555
3556 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3557+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3558+{
3559+ return atomic_add_ret_unchecked(i, v);
3560+}
3561 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3562+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3563+{
3564+ return atomic64_add_ret_unchecked(i, v);
3565+}
3566
3567 /*
3568 * atomic_inc_and_test - increment and test
3569@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3570 * other cases.
3571 */
3572 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3573+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3574+{
3575+ return atomic_inc_return_unchecked(v) == 0;
3576+}
3577 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3578
3579 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3580@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3581 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3582
3583 #define atomic_inc(v) atomic_add(1, v)
3584+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3585+{
3586+ atomic_add_unchecked(1, v);
3587+}
3588 #define atomic64_inc(v) atomic64_add(1, v)
3589+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3590+{
3591+ atomic64_add_unchecked(1, v);
3592+}
3593
3594 #define atomic_dec(v) atomic_sub(1, v)
3595+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3596+{
3597+ atomic_sub_unchecked(1, v);
3598+}
3599 #define atomic64_dec(v) atomic64_sub(1, v)
3600+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3601+{
3602+ atomic64_sub_unchecked(1, v);
3603+}
3604
3605 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3606 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3607
3608 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3609+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3610+{
3611+ return cmpxchg(&v->counter, old, new);
3612+}
3613 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3614+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3615+{
3616+ return xchg(&v->counter, new);
3617+}
3618
3619 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3620 {
3621- int c, old;
3622+ int c, old, new;
3623 c = atomic_read(v);
3624 for (;;) {
3625- if (unlikely(c == (u)))
3626+ if (unlikely(c == u))
3627 break;
3628- old = atomic_cmpxchg((v), c, c + (a));
3629+
3630+ asm volatile("addcc %2, %0, %0\n"
3631+
3632+#ifdef CONFIG_PAX_REFCOUNT
3633+ "tvs %%icc, 6\n"
3634+#endif
3635+
3636+ : "=r" (new)
3637+ : "0" (c), "ir" (a)
3638+ : "cc");
3639+
3640+ old = atomic_cmpxchg(v, c, new);
3641 if (likely(old == c))
3642 break;
3643 c = old;
3644@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3645 #define atomic64_cmpxchg(v, o, n) \
3646 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3647 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3648+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3649+{
3650+ return xchg(&v->counter, new);
3651+}
3652
3653 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3654 {
3655- long c, old;
3656+ long c, old, new;
3657 c = atomic64_read(v);
3658 for (;;) {
3659- if (unlikely(c == (u)))
3660+ if (unlikely(c == u))
3661 break;
3662- old = atomic64_cmpxchg((v), c, c + (a));
3663+
3664+ asm volatile("addcc %2, %0, %0\n"
3665+
3666+#ifdef CONFIG_PAX_REFCOUNT
3667+ "tvs %%xcc, 6\n"
3668+#endif
3669+
3670+ : "=r" (new)
3671+ : "0" (c), "ir" (a)
3672+ : "cc");
3673+
3674+ old = atomic64_cmpxchg(v, c, new);
3675 if (likely(old == c))
3676 break;
3677 c = old;
3678 }
3679- return c != (u);
3680+ return c != u;
3681 }
3682
3683 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3684diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
3685index 69358b5..17b4745 100644
3686--- a/arch/sparc/include/asm/cache.h
3687+++ b/arch/sparc/include/asm/cache.h
3688@@ -10,7 +10,7 @@
3689 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3690
3691 #define L1_CACHE_SHIFT 5
3692-#define L1_CACHE_BYTES 32
3693+#define L1_CACHE_BYTES 32UL
3694
3695 #ifdef CONFIG_SPARC32
3696 #define SMP_CACHE_BYTES_SHIFT 5
3697diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
3698index 4269ca6..e3da77f 100644
3699--- a/arch/sparc/include/asm/elf_32.h
3700+++ b/arch/sparc/include/asm/elf_32.h
3701@@ -114,6 +114,13 @@ typedef struct {
3702
3703 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3704
3705+#ifdef CONFIG_PAX_ASLR
3706+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3707+
3708+#define PAX_DELTA_MMAP_LEN 16
3709+#define PAX_DELTA_STACK_LEN 16
3710+#endif
3711+
3712 /* This yields a mask that user programs can use to figure out what
3713 instruction set this cpu supports. This can NOT be done in userspace
3714 on Sparc. */
3715diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
3716index 7df8b7f..4946269 100644
3717--- a/arch/sparc/include/asm/elf_64.h
3718+++ b/arch/sparc/include/asm/elf_64.h
3719@@ -180,6 +180,13 @@ typedef struct {
3720 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3721 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3722
3723+#ifdef CONFIG_PAX_ASLR
3724+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3725+
3726+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3727+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3728+#endif
3729+
3730 extern unsigned long sparc64_elf_hwcap;
3731 #define ELF_HWCAP sparc64_elf_hwcap
3732
3733diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
3734index 156707b..aefa786 100644
3735--- a/arch/sparc/include/asm/page_32.h
3736+++ b/arch/sparc/include/asm/page_32.h
3737@@ -8,6 +8,8 @@
3738 #ifndef _SPARC_PAGE_H
3739 #define _SPARC_PAGE_H
3740
3741+#include <linux/const.h>
3742+
3743 #define PAGE_SHIFT 12
3744
3745 #ifndef __ASSEMBLY__
3746diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
3747index a790cc6..091ed94 100644
3748--- a/arch/sparc/include/asm/pgtable_32.h
3749+++ b/arch/sparc/include/asm/pgtable_32.h
3750@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3751 BTFIXUPDEF_INT(page_none)
3752 BTFIXUPDEF_INT(page_copy)
3753 BTFIXUPDEF_INT(page_readonly)
3754+
3755+#ifdef CONFIG_PAX_PAGEEXEC
3756+BTFIXUPDEF_INT(page_shared_noexec)
3757+BTFIXUPDEF_INT(page_copy_noexec)
3758+BTFIXUPDEF_INT(page_readonly_noexec)
3759+#endif
3760+
3761 BTFIXUPDEF_INT(page_kernel)
3762
3763 #define PMD_SHIFT SUN4C_PMD_SHIFT
3764@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3765 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3766 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3767
3768+#ifdef CONFIG_PAX_PAGEEXEC
3769+extern pgprot_t PAGE_SHARED_NOEXEC;
3770+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3771+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3772+#else
3773+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3774+# define PAGE_COPY_NOEXEC PAGE_COPY
3775+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3776+#endif
3777+
3778 extern unsigned long page_kernel;
3779
3780 #ifdef MODULE
3781diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
3782index f6ae2b2..b03ffc7 100644
3783--- a/arch/sparc/include/asm/pgtsrmmu.h
3784+++ b/arch/sparc/include/asm/pgtsrmmu.h
3785@@ -115,6 +115,13 @@
3786 SRMMU_EXEC | SRMMU_REF)
3787 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3788 SRMMU_EXEC | SRMMU_REF)
3789+
3790+#ifdef CONFIG_PAX_PAGEEXEC
3791+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3792+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3793+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3794+#endif
3795+
3796 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3797 SRMMU_DIRTY | SRMMU_REF)
3798
3799diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
3800index 9689176..63c18ea 100644
3801--- a/arch/sparc/include/asm/spinlock_64.h
3802+++ b/arch/sparc/include/asm/spinlock_64.h
3803@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
3804
3805 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3806
3807-static void inline arch_read_lock(arch_rwlock_t *lock)
3808+static inline void arch_read_lock(arch_rwlock_t *lock)
3809 {
3810 unsigned long tmp1, tmp2;
3811
3812 __asm__ __volatile__ (
3813 "1: ldsw [%2], %0\n"
3814 " brlz,pn %0, 2f\n"
3815-"4: add %0, 1, %1\n"
3816+"4: addcc %0, 1, %1\n"
3817+
3818+#ifdef CONFIG_PAX_REFCOUNT
3819+" tvs %%icc, 6\n"
3820+#endif
3821+
3822 " cas [%2], %0, %1\n"
3823 " cmp %0, %1\n"
3824 " bne,pn %%icc, 1b\n"
3825@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
3826 " .previous"
3827 : "=&r" (tmp1), "=&r" (tmp2)
3828 : "r" (lock)
3829- : "memory");
3830+ : "memory", "cc");
3831 }
3832
3833-static int inline arch_read_trylock(arch_rwlock_t *lock)
3834+static inline int arch_read_trylock(arch_rwlock_t *lock)
3835 {
3836 int tmp1, tmp2;
3837
3838@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3839 "1: ldsw [%2], %0\n"
3840 " brlz,a,pn %0, 2f\n"
3841 " mov 0, %0\n"
3842-" add %0, 1, %1\n"
3843+" addcc %0, 1, %1\n"
3844+
3845+#ifdef CONFIG_PAX_REFCOUNT
3846+" tvs %%icc, 6\n"
3847+#endif
3848+
3849 " cas [%2], %0, %1\n"
3850 " cmp %0, %1\n"
3851 " bne,pn %%icc, 1b\n"
3852@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3853 return tmp1;
3854 }
3855
3856-static void inline arch_read_unlock(arch_rwlock_t *lock)
3857+static inline void arch_read_unlock(arch_rwlock_t *lock)
3858 {
3859 unsigned long tmp1, tmp2;
3860
3861 __asm__ __volatile__(
3862 "1: lduw [%2], %0\n"
3863-" sub %0, 1, %1\n"
3864+" subcc %0, 1, %1\n"
3865+
3866+#ifdef CONFIG_PAX_REFCOUNT
3867+" tvs %%icc, 6\n"
3868+#endif
3869+
3870 " cas [%2], %0, %1\n"
3871 " cmp %0, %1\n"
3872 " bne,pn %%xcc, 1b\n"
3873@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
3874 : "memory");
3875 }
3876
3877-static void inline arch_write_lock(arch_rwlock_t *lock)
3878+static inline void arch_write_lock(arch_rwlock_t *lock)
3879 {
3880 unsigned long mask, tmp1, tmp2;
3881
3882@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
3883 : "memory");
3884 }
3885
3886-static void inline arch_write_unlock(arch_rwlock_t *lock)
3887+static inline void arch_write_unlock(arch_rwlock_t *lock)
3888 {
3889 __asm__ __volatile__(
3890 " stw %%g0, [%0]"
3891@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
3892 : "memory");
3893 }
3894
3895-static int inline arch_write_trylock(arch_rwlock_t *lock)
3896+static inline int arch_write_trylock(arch_rwlock_t *lock)
3897 {
3898 unsigned long mask, tmp1, tmp2, result;
3899
3900diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
3901index fa57532..e1a4c53 100644
3902--- a/arch/sparc/include/asm/thread_info_32.h
3903+++ b/arch/sparc/include/asm/thread_info_32.h
3904@@ -50,6 +50,8 @@ struct thread_info {
3905 unsigned long w_saved;
3906
3907 struct restart_block restart_block;
3908+
3909+ unsigned long lowest_stack;
3910 };
3911
3912 /*
3913diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
3914index 60d86be..952dea1 100644
3915--- a/arch/sparc/include/asm/thread_info_64.h
3916+++ b/arch/sparc/include/asm/thread_info_64.h
3917@@ -63,6 +63,8 @@ struct thread_info {
3918 struct pt_regs *kern_una_regs;
3919 unsigned int kern_una_insn;
3920
3921+ unsigned long lowest_stack;
3922+
3923 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3924 };
3925
3926diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
3927index e88fbe5..96b0ce5 100644
3928--- a/arch/sparc/include/asm/uaccess.h
3929+++ b/arch/sparc/include/asm/uaccess.h
3930@@ -1,5 +1,13 @@
3931 #ifndef ___ASM_SPARC_UACCESS_H
3932 #define ___ASM_SPARC_UACCESS_H
3933+
3934+#ifdef __KERNEL__
3935+#ifndef __ASSEMBLY__
3936+#include <linux/types.h>
3937+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3938+#endif
3939+#endif
3940+
3941 #if defined(__sparc__) && defined(__arch64__)
3942 #include <asm/uaccess_64.h>
3943 #else
3944diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
3945index 8303ac4..07f333d 100644
3946--- a/arch/sparc/include/asm/uaccess_32.h
3947+++ b/arch/sparc/include/asm/uaccess_32.h
3948@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
3949
3950 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3951 {
3952- if (n && __access_ok((unsigned long) to, n))
3953+ if ((long)n < 0)
3954+ return n;
3955+
3956+ if (n && __access_ok((unsigned long) to, n)) {
3957+ if (!__builtin_constant_p(n))
3958+ check_object_size(from, n, true);
3959 return __copy_user(to, (__force void __user *) from, n);
3960- else
3961+ } else
3962 return n;
3963 }
3964
3965 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3966 {
3967+ if ((long)n < 0)
3968+ return n;
3969+
3970+ if (!__builtin_constant_p(n))
3971+ check_object_size(from, n, true);
3972+
3973 return __copy_user(to, (__force void __user *) from, n);
3974 }
3975
3976 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3977 {
3978- if (n && __access_ok((unsigned long) from, n))
3979+ if ((long)n < 0)
3980+ return n;
3981+
3982+ if (n && __access_ok((unsigned long) from, n)) {
3983+ if (!__builtin_constant_p(n))
3984+ check_object_size(to, n, false);
3985 return __copy_user((__force void __user *) to, from, n);
3986- else
3987+ } else
3988 return n;
3989 }
3990
3991 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3992 {
3993+ if ((long)n < 0)
3994+ return n;
3995+
3996 return __copy_user((__force void __user *) to, from, n);
3997 }
3998
3999diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
4000index 3e1449f..5293a0e 100644
4001--- a/arch/sparc/include/asm/uaccess_64.h
4002+++ b/arch/sparc/include/asm/uaccess_64.h
4003@@ -10,6 +10,7 @@
4004 #include <linux/compiler.h>
4005 #include <linux/string.h>
4006 #include <linux/thread_info.h>
4007+#include <linux/kernel.h>
4008 #include <asm/asi.h>
4009 #include <asm/system.h>
4010 #include <asm/spitfire.h>
4011@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
4012 static inline unsigned long __must_check
4013 copy_from_user(void *to, const void __user *from, unsigned long size)
4014 {
4015- unsigned long ret = ___copy_from_user(to, from, size);
4016+ unsigned long ret;
4017
4018+ if ((long)size < 0 || size > INT_MAX)
4019+ return size;
4020+
4021+ if (!__builtin_constant_p(size))
4022+ check_object_size(to, size, false);
4023+
4024+ ret = ___copy_from_user(to, from, size);
4025 if (unlikely(ret))
4026 ret = copy_from_user_fixup(to, from, size);
4027
4028@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
4029 static inline unsigned long __must_check
4030 copy_to_user(void __user *to, const void *from, unsigned long size)
4031 {
4032- unsigned long ret = ___copy_to_user(to, from, size);
4033+ unsigned long ret;
4034
4035+ if ((long)size < 0 || size > INT_MAX)
4036+ return size;
4037+
4038+ if (!__builtin_constant_p(size))
4039+ check_object_size(from, size, true);
4040+
4041+ ret = ___copy_to_user(to, from, size);
4042 if (unlikely(ret))
4043 ret = copy_to_user_fixup(to, from, size);
4044 return ret;
4045diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
4046index cb85458..e063f17 100644
4047--- a/arch/sparc/kernel/Makefile
4048+++ b/arch/sparc/kernel/Makefile
4049@@ -3,7 +3,7 @@
4050 #
4051
4052 asflags-y := -ansi
4053-ccflags-y := -Werror
4054+#ccflags-y := -Werror
4055
4056 extra-y := head_$(BITS).o
4057 extra-y += init_task.o
4058diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
4059index f793742..4d880af 100644
4060--- a/arch/sparc/kernel/process_32.c
4061+++ b/arch/sparc/kernel/process_32.c
4062@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
4063 rw->ins[4], rw->ins[5],
4064 rw->ins[6],
4065 rw->ins[7]);
4066- printk("%pS\n", (void *) rw->ins[7]);
4067+ printk("%pA\n", (void *) rw->ins[7]);
4068 rw = (struct reg_window32 *) rw->ins[6];
4069 }
4070 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4071@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
4072
4073 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4074 r->psr, r->pc, r->npc, r->y, print_tainted());
4075- printk("PC: <%pS>\n", (void *) r->pc);
4076+ printk("PC: <%pA>\n", (void *) r->pc);
4077 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4078 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4079 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4080 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4081 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4082 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4083- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4084+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4085
4086 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4087 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4088@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4089 rw = (struct reg_window32 *) fp;
4090 pc = rw->ins[7];
4091 printk("[%08lx : ", pc);
4092- printk("%pS ] ", (void *) pc);
4093+ printk("%pA ] ", (void *) pc);
4094 fp = rw->ins[6];
4095 } while (++count < 16);
4096 printk("\n");
4097diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
4098index 3739a06..48b2ff0 100644
4099--- a/arch/sparc/kernel/process_64.c
4100+++ b/arch/sparc/kernel/process_64.c
4101@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
4102 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4103 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4104 if (regs->tstate & TSTATE_PRIV)
4105- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4106+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4107 }
4108
4109 void show_regs(struct pt_regs *regs)
4110 {
4111 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4112 regs->tpc, regs->tnpc, regs->y, print_tainted());
4113- printk("TPC: <%pS>\n", (void *) regs->tpc);
4114+ printk("TPC: <%pA>\n", (void *) regs->tpc);
4115 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4116 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4117 regs->u_regs[3]);
4118@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4119 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4120 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4121 regs->u_regs[15]);
4122- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4123+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4124 show_regwindow(regs);
4125 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
4126 }
4127@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void)
4128 ((tp && tp->task) ? tp->task->pid : -1));
4129
4130 if (gp->tstate & TSTATE_PRIV) {
4131- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4132+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4133 (void *) gp->tpc,
4134 (void *) gp->o7,
4135 (void *) gp->i7,
4136diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
4137index 42b282f..28ce9f2 100644
4138--- a/arch/sparc/kernel/sys_sparc_32.c
4139+++ b/arch/sparc/kernel/sys_sparc_32.c
4140@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4141 if (ARCH_SUN4C && len > 0x20000000)
4142 return -ENOMEM;
4143 if (!addr)
4144- addr = TASK_UNMAPPED_BASE;
4145+ addr = current->mm->mmap_base;
4146
4147 if (flags & MAP_SHARED)
4148 addr = COLOUR_ALIGN(addr);
4149@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4150 }
4151 if (TASK_SIZE - PAGE_SIZE - len < addr)
4152 return -ENOMEM;
4153- if (!vmm || addr + len <= vmm->vm_start)
4154+ if (check_heap_stack_gap(vmm, addr, len))
4155 return addr;
4156 addr = vmm->vm_end;
4157 if (flags & MAP_SHARED)
4158diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
4159index 441521a..b767073 100644
4160--- a/arch/sparc/kernel/sys_sparc_64.c
4161+++ b/arch/sparc/kernel/sys_sparc_64.c
4162@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4163 /* We do not accept a shared mapping if it would violate
4164 * cache aliasing constraints.
4165 */
4166- if ((flags & MAP_SHARED) &&
4167+ if ((filp || (flags & MAP_SHARED)) &&
4168 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4169 return -EINVAL;
4170 return addr;
4171@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4172 if (filp || (flags & MAP_SHARED))
4173 do_color_align = 1;
4174
4175+#ifdef CONFIG_PAX_RANDMMAP
4176+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4177+#endif
4178+
4179 if (addr) {
4180 if (do_color_align)
4181 addr = COLOUR_ALIGN(addr, pgoff);
4182@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4183 addr = PAGE_ALIGN(addr);
4184
4185 vma = find_vma(mm, addr);
4186- if (task_size - len >= addr &&
4187- (!vma || addr + len <= vma->vm_start))
4188+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4189 return addr;
4190 }
4191
4192 if (len > mm->cached_hole_size) {
4193- start_addr = addr = mm->free_area_cache;
4194+ start_addr = addr = mm->free_area_cache;
4195 } else {
4196- start_addr = addr = TASK_UNMAPPED_BASE;
4197+ start_addr = addr = mm->mmap_base;
4198 mm->cached_hole_size = 0;
4199 }
4200
4201@@ -174,14 +177,14 @@ full_search:
4202 vma = find_vma(mm, VA_EXCLUDE_END);
4203 }
4204 if (unlikely(task_size < addr)) {
4205- if (start_addr != TASK_UNMAPPED_BASE) {
4206- start_addr = addr = TASK_UNMAPPED_BASE;
4207+ if (start_addr != mm->mmap_base) {
4208+ start_addr = addr = mm->mmap_base;
4209 mm->cached_hole_size = 0;
4210 goto full_search;
4211 }
4212 return -ENOMEM;
4213 }
4214- if (likely(!vma || addr + len <= vma->vm_start)) {
4215+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4216 /*
4217 * Remember the place where we stopped the search:
4218 */
4219@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4220 /* We do not accept a shared mapping if it would violate
4221 * cache aliasing constraints.
4222 */
4223- if ((flags & MAP_SHARED) &&
4224+ if ((filp || (flags & MAP_SHARED)) &&
4225 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4226 return -EINVAL;
4227 return addr;
4228@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4229 addr = PAGE_ALIGN(addr);
4230
4231 vma = find_vma(mm, addr);
4232- if (task_size - len >= addr &&
4233- (!vma || addr + len <= vma->vm_start))
4234+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4235 return addr;
4236 }
4237
4238@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4239 /* make sure it can fit in the remaining address space */
4240 if (likely(addr > len)) {
4241 vma = find_vma(mm, addr-len);
4242- if (!vma || addr <= vma->vm_start) {
4243+ if (check_heap_stack_gap(vma, addr - len, len)) {
4244 /* remember the address as a hint for next time */
4245 return (mm->free_area_cache = addr-len);
4246 }
4247@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4248 if (unlikely(mm->mmap_base < len))
4249 goto bottomup;
4250
4251- addr = mm->mmap_base-len;
4252- if (do_color_align)
4253- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4254+ addr = mm->mmap_base - len;
4255
4256 do {
4257+ if (do_color_align)
4258+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4259 /*
4260 * Lookup failure means no vma is above this address,
4261 * else if new region fits below vma->vm_start,
4262 * return with success:
4263 */
4264 vma = find_vma(mm, addr);
4265- if (likely(!vma || addr+len <= vma->vm_start)) {
4266+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4267 /* remember the address as a hint for next time */
4268 return (mm->free_area_cache = addr);
4269 }
4270@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4271 mm->cached_hole_size = vma->vm_start - addr;
4272
4273 /* try just below the current vma->vm_start */
4274- addr = vma->vm_start-len;
4275- if (do_color_align)
4276- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4277- } while (likely(len < vma->vm_start));
4278+ addr = skip_heap_stack_gap(vma, len);
4279+ } while (!IS_ERR_VALUE(addr));
4280
4281 bottomup:
4282 /*
4283@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4284 gap == RLIM_INFINITY ||
4285 sysctl_legacy_va_layout) {
4286 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4287+
4288+#ifdef CONFIG_PAX_RANDMMAP
4289+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4290+ mm->mmap_base += mm->delta_mmap;
4291+#endif
4292+
4293 mm->get_unmapped_area = arch_get_unmapped_area;
4294 mm->unmap_area = arch_unmap_area;
4295 } else {
4296@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4297 gap = (task_size / 6 * 5);
4298
4299 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4300+
4301+#ifdef CONFIG_PAX_RANDMMAP
4302+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4303+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4304+#endif
4305+
4306 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4307 mm->unmap_area = arch_unmap_area_topdown;
4308 }
4309diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
4310index 591f20c..0f1b925 100644
4311--- a/arch/sparc/kernel/traps_32.c
4312+++ b/arch/sparc/kernel/traps_32.c
4313@@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
4314 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4315 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4316
4317+extern void gr_handle_kernel_exploit(void);
4318+
4319 void die_if_kernel(char *str, struct pt_regs *regs)
4320 {
4321 static int die_counter;
4322@@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4323 count++ < 30 &&
4324 (((unsigned long) rw) >= PAGE_OFFSET) &&
4325 !(((unsigned long) rw) & 0x7)) {
4326- printk("Caller[%08lx]: %pS\n", rw->ins[7],
4327+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
4328 (void *) rw->ins[7]);
4329 rw = (struct reg_window32 *)rw->ins[6];
4330 }
4331 }
4332 printk("Instruction DUMP:");
4333 instruction_dump ((unsigned long *) regs->pc);
4334- if(regs->psr & PSR_PS)
4335+ if(regs->psr & PSR_PS) {
4336+ gr_handle_kernel_exploit();
4337 do_exit(SIGKILL);
4338+ }
4339 do_exit(SIGSEGV);
4340 }
4341
4342diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
4343index 0cbdaa4..438e4c9 100644
4344--- a/arch/sparc/kernel/traps_64.c
4345+++ b/arch/sparc/kernel/traps_64.c
4346@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
4347 i + 1,
4348 p->trapstack[i].tstate, p->trapstack[i].tpc,
4349 p->trapstack[i].tnpc, p->trapstack[i].tt);
4350- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4351+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4352 }
4353 }
4354
4355@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
4356
4357 lvl -= 0x100;
4358 if (regs->tstate & TSTATE_PRIV) {
4359+
4360+#ifdef CONFIG_PAX_REFCOUNT
4361+ if (lvl == 6)
4362+ pax_report_refcount_overflow(regs);
4363+#endif
4364+
4365 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4366 die_if_kernel(buffer, regs);
4367 }
4368@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
4369 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4370 {
4371 char buffer[32];
4372-
4373+
4374 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4375 0, lvl, SIGTRAP) == NOTIFY_STOP)
4376 return;
4377
4378+#ifdef CONFIG_PAX_REFCOUNT
4379+ if (lvl == 6)
4380+ pax_report_refcount_overflow(regs);
4381+#endif
4382+
4383 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4384
4385 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4386@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
4387 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4388 printk("%s" "ERROR(%d): ",
4389 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4390- printk("TPC<%pS>\n", (void *) regs->tpc);
4391+ printk("TPC<%pA>\n", (void *) regs->tpc);
4392 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4393 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4394 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4395@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4396 smp_processor_id(),
4397 (type & 0x1) ? 'I' : 'D',
4398 regs->tpc);
4399- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4400+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4401 panic("Irrecoverable Cheetah+ parity error.");
4402 }
4403
4404@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4405 smp_processor_id(),
4406 (type & 0x1) ? 'I' : 'D',
4407 regs->tpc);
4408- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4409+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4410 }
4411
4412 struct sun4v_error_entry {
4413@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
4414
4415 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4416 regs->tpc, tl);
4417- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4418+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4419 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4420- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4421+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4422 (void *) regs->u_regs[UREG_I7]);
4423 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4424 "pte[%lx] error[%lx]\n",
4425@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
4426
4427 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4428 regs->tpc, tl);
4429- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4430+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4431 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4432- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4433+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4434 (void *) regs->u_regs[UREG_I7]);
4435 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4436 "pte[%lx] error[%lx]\n",
4437@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4438 fp = (unsigned long)sf->fp + STACK_BIAS;
4439 }
4440
4441- printk(" [%016lx] %pS\n", pc, (void *) pc);
4442+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4443 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4444 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
4445 int index = tsk->curr_ret_stack;
4446 if (tsk->ret_stack && index >= graph) {
4447 pc = tsk->ret_stack[index - graph].ret;
4448- printk(" [%016lx] %pS\n", pc, (void *) pc);
4449+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4450 graph++;
4451 }
4452 }
4453@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
4454 return (struct reg_window *) (fp + STACK_BIAS);
4455 }
4456
4457+extern void gr_handle_kernel_exploit(void);
4458+
4459 void die_if_kernel(char *str, struct pt_regs *regs)
4460 {
4461 static int die_counter;
4462@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4463 while (rw &&
4464 count++ < 30 &&
4465 kstack_valid(tp, (unsigned long) rw)) {
4466- printk("Caller[%016lx]: %pS\n", rw->ins[7],
4467+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
4468 (void *) rw->ins[7]);
4469
4470 rw = kernel_stack_up(rw);
4471@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4472 }
4473 user_instruction_dump ((unsigned int __user *) regs->tpc);
4474 }
4475- if (regs->tstate & TSTATE_PRIV)
4476+ if (regs->tstate & TSTATE_PRIV) {
4477+ gr_handle_kernel_exploit();
4478 do_exit(SIGKILL);
4479+ }
4480 do_exit(SIGSEGV);
4481 }
4482 EXPORT_SYMBOL(die_if_kernel);
4483diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
4484index 76e4ac1..78f8bb1 100644
4485--- a/arch/sparc/kernel/unaligned_64.c
4486+++ b/arch/sparc/kernel/unaligned_64.c
4487@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
4488 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
4489
4490 if (__ratelimit(&ratelimit)) {
4491- printk("Kernel unaligned access at TPC[%lx] %pS\n",
4492+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
4493 regs->tpc, (void *) regs->tpc);
4494 }
4495 }
4496diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
4497index a3fc437..fea9957 100644
4498--- a/arch/sparc/lib/Makefile
4499+++ b/arch/sparc/lib/Makefile
4500@@ -2,7 +2,7 @@
4501 #
4502
4503 asflags-y := -ansi -DST_DIV0=0x02
4504-ccflags-y := -Werror
4505+#ccflags-y := -Werror
4506
4507 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4508 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4509diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
4510index 59186e0..f747d7a 100644
4511--- a/arch/sparc/lib/atomic_64.S
4512+++ b/arch/sparc/lib/atomic_64.S
4513@@ -18,7 +18,12 @@
4514 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4515 BACKOFF_SETUP(%o2)
4516 1: lduw [%o1], %g1
4517- add %g1, %o0, %g7
4518+ addcc %g1, %o0, %g7
4519+
4520+#ifdef CONFIG_PAX_REFCOUNT
4521+ tvs %icc, 6
4522+#endif
4523+
4524 cas [%o1], %g1, %g7
4525 cmp %g1, %g7
4526 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4527@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4528 2: BACKOFF_SPIN(%o2, %o3, 1b)
4529 .size atomic_add, .-atomic_add
4530
4531+ .globl atomic_add_unchecked
4532+ .type atomic_add_unchecked,#function
4533+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4534+ BACKOFF_SETUP(%o2)
4535+1: lduw [%o1], %g1
4536+ add %g1, %o0, %g7
4537+ cas [%o1], %g1, %g7
4538+ cmp %g1, %g7
4539+ bne,pn %icc, 2f
4540+ nop
4541+ retl
4542+ nop
4543+2: BACKOFF_SPIN(%o2, %o3, 1b)
4544+ .size atomic_add_unchecked, .-atomic_add_unchecked
4545+
4546 .globl atomic_sub
4547 .type atomic_sub,#function
4548 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4549 BACKOFF_SETUP(%o2)
4550 1: lduw [%o1], %g1
4551- sub %g1, %o0, %g7
4552+ subcc %g1, %o0, %g7
4553+
4554+#ifdef CONFIG_PAX_REFCOUNT
4555+ tvs %icc, 6
4556+#endif
4557+
4558 cas [%o1], %g1, %g7
4559 cmp %g1, %g7
4560 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4561@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4562 2: BACKOFF_SPIN(%o2, %o3, 1b)
4563 .size atomic_sub, .-atomic_sub
4564
4565+ .globl atomic_sub_unchecked
4566+ .type atomic_sub_unchecked,#function
4567+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4568+ BACKOFF_SETUP(%o2)
4569+1: lduw [%o1], %g1
4570+ sub %g1, %o0, %g7
4571+ cas [%o1], %g1, %g7
4572+ cmp %g1, %g7
4573+ bne,pn %icc, 2f
4574+ nop
4575+ retl
4576+ nop
4577+2: BACKOFF_SPIN(%o2, %o3, 1b)
4578+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
4579+
4580 .globl atomic_add_ret
4581 .type atomic_add_ret,#function
4582 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4583 BACKOFF_SETUP(%o2)
4584 1: lduw [%o1], %g1
4585- add %g1, %o0, %g7
4586+ addcc %g1, %o0, %g7
4587+
4588+#ifdef CONFIG_PAX_REFCOUNT
4589+ tvs %icc, 6
4590+#endif
4591+
4592 cas [%o1], %g1, %g7
4593 cmp %g1, %g7
4594 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4595@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4596 2: BACKOFF_SPIN(%o2, %o3, 1b)
4597 .size atomic_add_ret, .-atomic_add_ret
4598
4599+ .globl atomic_add_ret_unchecked
4600+ .type atomic_add_ret_unchecked,#function
4601+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4602+ BACKOFF_SETUP(%o2)
4603+1: lduw [%o1], %g1
4604+ addcc %g1, %o0, %g7
4605+ cas [%o1], %g1, %g7
4606+ cmp %g1, %g7
4607+ bne,pn %icc, 2f
4608+ add %g7, %o0, %g7
4609+ sra %g7, 0, %o0
4610+ retl
4611+ nop
4612+2: BACKOFF_SPIN(%o2, %o3, 1b)
4613+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4614+
4615 .globl atomic_sub_ret
4616 .type atomic_sub_ret,#function
4617 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4618 BACKOFF_SETUP(%o2)
4619 1: lduw [%o1], %g1
4620- sub %g1, %o0, %g7
4621+ subcc %g1, %o0, %g7
4622+
4623+#ifdef CONFIG_PAX_REFCOUNT
4624+ tvs %icc, 6
4625+#endif
4626+
4627 cas [%o1], %g1, %g7
4628 cmp %g1, %g7
4629 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4630@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4631 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4632 BACKOFF_SETUP(%o2)
4633 1: ldx [%o1], %g1
4634- add %g1, %o0, %g7
4635+ addcc %g1, %o0, %g7
4636+
4637+#ifdef CONFIG_PAX_REFCOUNT
4638+ tvs %xcc, 6
4639+#endif
4640+
4641 casx [%o1], %g1, %g7
4642 cmp %g1, %g7
4643 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4644@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4645 2: BACKOFF_SPIN(%o2, %o3, 1b)
4646 .size atomic64_add, .-atomic64_add
4647
4648+ .globl atomic64_add_unchecked
4649+ .type atomic64_add_unchecked,#function
4650+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4651+ BACKOFF_SETUP(%o2)
4652+1: ldx [%o1], %g1
4653+ addcc %g1, %o0, %g7
4654+ casx [%o1], %g1, %g7
4655+ cmp %g1, %g7
4656+ bne,pn %xcc, 2f
4657+ nop
4658+ retl
4659+ nop
4660+2: BACKOFF_SPIN(%o2, %o3, 1b)
4661+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
4662+
4663 .globl atomic64_sub
4664 .type atomic64_sub,#function
4665 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4666 BACKOFF_SETUP(%o2)
4667 1: ldx [%o1], %g1
4668- sub %g1, %o0, %g7
4669+ subcc %g1, %o0, %g7
4670+
4671+#ifdef CONFIG_PAX_REFCOUNT
4672+ tvs %xcc, 6
4673+#endif
4674+
4675 casx [%o1], %g1, %g7
4676 cmp %g1, %g7
4677 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4678@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4679 2: BACKOFF_SPIN(%o2, %o3, 1b)
4680 .size atomic64_sub, .-atomic64_sub
4681
4682+ .globl atomic64_sub_unchecked
4683+ .type atomic64_sub_unchecked,#function
4684+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4685+ BACKOFF_SETUP(%o2)
4686+1: ldx [%o1], %g1
4687+ subcc %g1, %o0, %g7
4688+ casx [%o1], %g1, %g7
4689+ cmp %g1, %g7
4690+ bne,pn %xcc, 2f
4691+ nop
4692+ retl
4693+ nop
4694+2: BACKOFF_SPIN(%o2, %o3, 1b)
4695+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4696+
4697 .globl atomic64_add_ret
4698 .type atomic64_add_ret,#function
4699 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4700 BACKOFF_SETUP(%o2)
4701 1: ldx [%o1], %g1
4702- add %g1, %o0, %g7
4703+ addcc %g1, %o0, %g7
4704+
4705+#ifdef CONFIG_PAX_REFCOUNT
4706+ tvs %xcc, 6
4707+#endif
4708+
4709 casx [%o1], %g1, %g7
4710 cmp %g1, %g7
4711 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4712@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4713 2: BACKOFF_SPIN(%o2, %o3, 1b)
4714 .size atomic64_add_ret, .-atomic64_add_ret
4715
4716+ .globl atomic64_add_ret_unchecked
4717+ .type atomic64_add_ret_unchecked,#function
4718+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4719+ BACKOFF_SETUP(%o2)
4720+1: ldx [%o1], %g1
4721+ addcc %g1, %o0, %g7
4722+ casx [%o1], %g1, %g7
4723+ cmp %g1, %g7
4724+ bne,pn %xcc, 2f
4725+ add %g7, %o0, %g7
4726+ mov %g7, %o0
4727+ retl
4728+ nop
4729+2: BACKOFF_SPIN(%o2, %o3, 1b)
4730+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4731+
4732 .globl atomic64_sub_ret
4733 .type atomic64_sub_ret,#function
4734 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4735 BACKOFF_SETUP(%o2)
4736 1: ldx [%o1], %g1
4737- sub %g1, %o0, %g7
4738+ subcc %g1, %o0, %g7
4739+
4740+#ifdef CONFIG_PAX_REFCOUNT
4741+ tvs %xcc, 6
4742+#endif
4743+
4744 casx [%o1], %g1, %g7
4745 cmp %g1, %g7
4746 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4747diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
4748index 1b30bb3..b4a16c7 100644
4749--- a/arch/sparc/lib/ksyms.c
4750+++ b/arch/sparc/lib/ksyms.c
4751@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4752
4753 /* Atomic counter implementation. */
4754 EXPORT_SYMBOL(atomic_add);
4755+EXPORT_SYMBOL(atomic_add_unchecked);
4756 EXPORT_SYMBOL(atomic_add_ret);
4757+EXPORT_SYMBOL(atomic_add_ret_unchecked);
4758 EXPORT_SYMBOL(atomic_sub);
4759+EXPORT_SYMBOL(atomic_sub_unchecked);
4760 EXPORT_SYMBOL(atomic_sub_ret);
4761 EXPORT_SYMBOL(atomic64_add);
4762+EXPORT_SYMBOL(atomic64_add_unchecked);
4763 EXPORT_SYMBOL(atomic64_add_ret);
4764+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4765 EXPORT_SYMBOL(atomic64_sub);
4766+EXPORT_SYMBOL(atomic64_sub_unchecked);
4767 EXPORT_SYMBOL(atomic64_sub_ret);
4768
4769 /* Atomic bit operations. */
4770diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
4771index 301421c..e2535d1 100644
4772--- a/arch/sparc/mm/Makefile
4773+++ b/arch/sparc/mm/Makefile
4774@@ -2,7 +2,7 @@
4775 #
4776
4777 asflags-y := -ansi
4778-ccflags-y := -Werror
4779+#ccflags-y := -Werror
4780
4781 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
4782 obj-y += fault_$(BITS).o
4783diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
4784index 8023fd7..c8e89e9 100644
4785--- a/arch/sparc/mm/fault_32.c
4786+++ b/arch/sparc/mm/fault_32.c
4787@@ -21,6 +21,9 @@
4788 #include <linux/perf_event.h>
4789 #include <linux/interrupt.h>
4790 #include <linux/kdebug.h>
4791+#include <linux/slab.h>
4792+#include <linux/pagemap.h>
4793+#include <linux/compiler.h>
4794
4795 #include <asm/system.h>
4796 #include <asm/page.h>
4797@@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
4798 return safe_compute_effective_address(regs, insn);
4799 }
4800
4801+#ifdef CONFIG_PAX_PAGEEXEC
4802+#ifdef CONFIG_PAX_DLRESOLVE
4803+static void pax_emuplt_close(struct vm_area_struct *vma)
4804+{
4805+ vma->vm_mm->call_dl_resolve = 0UL;
4806+}
4807+
4808+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4809+{
4810+ unsigned int *kaddr;
4811+
4812+ vmf->page = alloc_page(GFP_HIGHUSER);
4813+ if (!vmf->page)
4814+ return VM_FAULT_OOM;
4815+
4816+ kaddr = kmap(vmf->page);
4817+ memset(kaddr, 0, PAGE_SIZE);
4818+ kaddr[0] = 0x9DE3BFA8U; /* save */
4819+ flush_dcache_page(vmf->page);
4820+ kunmap(vmf->page);
4821+ return VM_FAULT_MAJOR;
4822+}
4823+
4824+static const struct vm_operations_struct pax_vm_ops = {
4825+ .close = pax_emuplt_close,
4826+ .fault = pax_emuplt_fault
4827+};
4828+
4829+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4830+{
4831+ int ret;
4832+
4833+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4834+ vma->vm_mm = current->mm;
4835+ vma->vm_start = addr;
4836+ vma->vm_end = addr + PAGE_SIZE;
4837+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4838+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4839+ vma->vm_ops = &pax_vm_ops;
4840+
4841+ ret = insert_vm_struct(current->mm, vma);
4842+ if (ret)
4843+ return ret;
4844+
4845+ ++current->mm->total_vm;
4846+ return 0;
4847+}
4848+#endif
4849+
4850+/*
4851+ * PaX: decide what to do with offenders (regs->pc = fault address)
4852+ *
4853+ * returns 1 when task should be killed
4854+ * 2 when patched PLT trampoline was detected
4855+ * 3 when unpatched PLT trampoline was detected
4856+ */
4857+static int pax_handle_fetch_fault(struct pt_regs *regs)
4858+{
4859+
4860+#ifdef CONFIG_PAX_EMUPLT
4861+ int err;
4862+
4863+ do { /* PaX: patched PLT emulation #1 */
4864+ unsigned int sethi1, sethi2, jmpl;
4865+
4866+ err = get_user(sethi1, (unsigned int *)regs->pc);
4867+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4868+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4869+
4870+ if (err)
4871+ break;
4872+
4873+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4874+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4875+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4876+ {
4877+ unsigned int addr;
4878+
4879+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4880+ addr = regs->u_regs[UREG_G1];
4881+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4882+ regs->pc = addr;
4883+ regs->npc = addr+4;
4884+ return 2;
4885+ }
4886+ } while (0);
4887+
4888+ { /* PaX: patched PLT emulation #2 */
4889+ unsigned int ba;
4890+
4891+ err = get_user(ba, (unsigned int *)regs->pc);
4892+
4893+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4894+ unsigned int addr;
4895+
4896+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4897+ regs->pc = addr;
4898+ regs->npc = addr+4;
4899+ return 2;
4900+ }
4901+ }
4902+
4903+ do { /* PaX: patched PLT emulation #3 */
4904+ unsigned int sethi, jmpl, nop;
4905+
4906+ err = get_user(sethi, (unsigned int *)regs->pc);
4907+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4908+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4909+
4910+ if (err)
4911+ break;
4912+
4913+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4914+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4915+ nop == 0x01000000U)
4916+ {
4917+ unsigned int addr;
4918+
4919+ addr = (sethi & 0x003FFFFFU) << 10;
4920+ regs->u_regs[UREG_G1] = addr;
4921+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4922+ regs->pc = addr;
4923+ regs->npc = addr+4;
4924+ return 2;
4925+ }
4926+ } while (0);
4927+
4928+ do { /* PaX: unpatched PLT emulation step 1 */
4929+ unsigned int sethi, ba, nop;
4930+
4931+ err = get_user(sethi, (unsigned int *)regs->pc);
4932+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4933+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4934+
4935+ if (err)
4936+ break;
4937+
4938+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4939+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4940+ nop == 0x01000000U)
4941+ {
4942+ unsigned int addr, save, call;
4943+
4944+ if ((ba & 0xFFC00000U) == 0x30800000U)
4945+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4946+ else
4947+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4948+
4949+ err = get_user(save, (unsigned int *)addr);
4950+ err |= get_user(call, (unsigned int *)(addr+4));
4951+ err |= get_user(nop, (unsigned int *)(addr+8));
4952+ if (err)
4953+ break;
4954+
4955+#ifdef CONFIG_PAX_DLRESOLVE
4956+ if (save == 0x9DE3BFA8U &&
4957+ (call & 0xC0000000U) == 0x40000000U &&
4958+ nop == 0x01000000U)
4959+ {
4960+ struct vm_area_struct *vma;
4961+ unsigned long call_dl_resolve;
4962+
4963+ down_read(&current->mm->mmap_sem);
4964+ call_dl_resolve = current->mm->call_dl_resolve;
4965+ up_read(&current->mm->mmap_sem);
4966+ if (likely(call_dl_resolve))
4967+ goto emulate;
4968+
4969+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4970+
4971+ down_write(&current->mm->mmap_sem);
4972+ if (current->mm->call_dl_resolve) {
4973+ call_dl_resolve = current->mm->call_dl_resolve;
4974+ up_write(&current->mm->mmap_sem);
4975+ if (vma)
4976+ kmem_cache_free(vm_area_cachep, vma);
4977+ goto emulate;
4978+ }
4979+
4980+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4981+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4982+ up_write(&current->mm->mmap_sem);
4983+ if (vma)
4984+ kmem_cache_free(vm_area_cachep, vma);
4985+ return 1;
4986+ }
4987+
4988+ if (pax_insert_vma(vma, call_dl_resolve)) {
4989+ up_write(&current->mm->mmap_sem);
4990+ kmem_cache_free(vm_area_cachep, vma);
4991+ return 1;
4992+ }
4993+
4994+ current->mm->call_dl_resolve = call_dl_resolve;
4995+ up_write(&current->mm->mmap_sem);
4996+
4997+emulate:
4998+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4999+ regs->pc = call_dl_resolve;
5000+ regs->npc = addr+4;
5001+ return 3;
5002+ }
5003+#endif
5004+
5005+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5006+ if ((save & 0xFFC00000U) == 0x05000000U &&
5007+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5008+ nop == 0x01000000U)
5009+ {
5010+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5011+ regs->u_regs[UREG_G2] = addr + 4;
5012+ addr = (save & 0x003FFFFFU) << 10;
5013+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5014+ regs->pc = addr;
5015+ regs->npc = addr+4;
5016+ return 3;
5017+ }
5018+ }
5019+ } while (0);
5020+
5021+ do { /* PaX: unpatched PLT emulation step 2 */
5022+ unsigned int save, call, nop;
5023+
5024+ err = get_user(save, (unsigned int *)(regs->pc-4));
5025+ err |= get_user(call, (unsigned int *)regs->pc);
5026+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
5027+ if (err)
5028+ break;
5029+
5030+ if (save == 0x9DE3BFA8U &&
5031+ (call & 0xC0000000U) == 0x40000000U &&
5032+ nop == 0x01000000U)
5033+ {
5034+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5035+
5036+ regs->u_regs[UREG_RETPC] = regs->pc;
5037+ regs->pc = dl_resolve;
5038+ regs->npc = dl_resolve+4;
5039+ return 3;
5040+ }
5041+ } while (0);
5042+#endif
5043+
5044+ return 1;
5045+}
5046+
5047+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5048+{
5049+ unsigned long i;
5050+
5051+ printk(KERN_ERR "PAX: bytes at PC: ");
5052+ for (i = 0; i < 8; i++) {
5053+ unsigned int c;
5054+ if (get_user(c, (unsigned int *)pc+i))
5055+ printk(KERN_CONT "???????? ");
5056+ else
5057+ printk(KERN_CONT "%08x ", c);
5058+ }
5059+ printk("\n");
5060+}
5061+#endif
5062+
5063 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
5064 int text_fault)
5065 {
5066@@ -280,6 +545,24 @@ good_area:
5067 if(!(vma->vm_flags & VM_WRITE))
5068 goto bad_area;
5069 } else {
5070+
5071+#ifdef CONFIG_PAX_PAGEEXEC
5072+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5073+ up_read(&mm->mmap_sem);
5074+ switch (pax_handle_fetch_fault(regs)) {
5075+
5076+#ifdef CONFIG_PAX_EMUPLT
5077+ case 2:
5078+ case 3:
5079+ return;
5080+#endif
5081+
5082+ }
5083+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5084+ do_group_exit(SIGKILL);
5085+ }
5086+#endif
5087+
5088 /* Allow reads even for write-only mappings */
5089 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5090 goto bad_area;
5091diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
5092index 504c062..6fcb9c6 100644
5093--- a/arch/sparc/mm/fault_64.c
5094+++ b/arch/sparc/mm/fault_64.c
5095@@ -21,6 +21,9 @@
5096 #include <linux/kprobes.h>
5097 #include <linux/kdebug.h>
5098 #include <linux/percpu.h>
5099+#include <linux/slab.h>
5100+#include <linux/pagemap.h>
5101+#include <linux/compiler.h>
5102
5103 #include <asm/page.h>
5104 #include <asm/pgtable.h>
5105@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
5106 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5107 regs->tpc);
5108 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5109- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5110+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5111 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5112 dump_stack();
5113 unhandled_fault(regs->tpc, current, regs);
5114@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
5115 show_regs(regs);
5116 }
5117
5118+#ifdef CONFIG_PAX_PAGEEXEC
5119+#ifdef CONFIG_PAX_DLRESOLVE
5120+static void pax_emuplt_close(struct vm_area_struct *vma)
5121+{
5122+ vma->vm_mm->call_dl_resolve = 0UL;
5123+}
5124+
5125+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5126+{
5127+ unsigned int *kaddr;
5128+
5129+ vmf->page = alloc_page(GFP_HIGHUSER);
5130+ if (!vmf->page)
5131+ return VM_FAULT_OOM;
5132+
5133+ kaddr = kmap(vmf->page);
5134+ memset(kaddr, 0, PAGE_SIZE);
5135+ kaddr[0] = 0x9DE3BFA8U; /* save */
5136+ flush_dcache_page(vmf->page);
5137+ kunmap(vmf->page);
5138+ return VM_FAULT_MAJOR;
5139+}
5140+
5141+static const struct vm_operations_struct pax_vm_ops = {
5142+ .close = pax_emuplt_close,
5143+ .fault = pax_emuplt_fault
5144+};
5145+
5146+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5147+{
5148+ int ret;
5149+
5150+ INIT_LIST_HEAD(&vma->anon_vma_chain);
5151+ vma->vm_mm = current->mm;
5152+ vma->vm_start = addr;
5153+ vma->vm_end = addr + PAGE_SIZE;
5154+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5155+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5156+ vma->vm_ops = &pax_vm_ops;
5157+
5158+ ret = insert_vm_struct(current->mm, vma);
5159+ if (ret)
5160+ return ret;
5161+
5162+ ++current->mm->total_vm;
5163+ return 0;
5164+}
5165+#endif
5166+
5167+/*
5168+ * PaX: decide what to do with offenders (regs->tpc = fault address)
5169+ *
5170+ * returns 1 when task should be killed
5171+ * 2 when patched PLT trampoline was detected
5172+ * 3 when unpatched PLT trampoline was detected
5173+ */
5174+static int pax_handle_fetch_fault(struct pt_regs *regs)
5175+{
5176+
5177+#ifdef CONFIG_PAX_EMUPLT
5178+ int err;
5179+
5180+ do { /* PaX: patched PLT emulation #1 */
5181+ unsigned int sethi1, sethi2, jmpl;
5182+
5183+ err = get_user(sethi1, (unsigned int *)regs->tpc);
5184+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5185+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5186+
5187+ if (err)
5188+ break;
5189+
5190+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5191+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5192+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5193+ {
5194+ unsigned long addr;
5195+
5196+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5197+ addr = regs->u_regs[UREG_G1];
5198+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5199+
5200+ if (test_thread_flag(TIF_32BIT))
5201+ addr &= 0xFFFFFFFFUL;
5202+
5203+ regs->tpc = addr;
5204+ regs->tnpc = addr+4;
5205+ return 2;
5206+ }
5207+ } while (0);
5208+
5209+ { /* PaX: patched PLT emulation #2 */
5210+ unsigned int ba;
5211+
5212+ err = get_user(ba, (unsigned int *)regs->tpc);
5213+
5214+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5215+ unsigned long addr;
5216+
5217+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5218+
5219+ if (test_thread_flag(TIF_32BIT))
5220+ addr &= 0xFFFFFFFFUL;
5221+
5222+ regs->tpc = addr;
5223+ regs->tnpc = addr+4;
5224+ return 2;
5225+ }
5226+ }
5227+
5228+ do { /* PaX: patched PLT emulation #3 */
5229+ unsigned int sethi, jmpl, nop;
5230+
5231+ err = get_user(sethi, (unsigned int *)regs->tpc);
5232+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5233+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5234+
5235+ if (err)
5236+ break;
5237+
5238+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5239+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5240+ nop == 0x01000000U)
5241+ {
5242+ unsigned long addr;
5243+
5244+ addr = (sethi & 0x003FFFFFU) << 10;
5245+ regs->u_regs[UREG_G1] = addr;
5246+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5247+
5248+ if (test_thread_flag(TIF_32BIT))
5249+ addr &= 0xFFFFFFFFUL;
5250+
5251+ regs->tpc = addr;
5252+ regs->tnpc = addr+4;
5253+ return 2;
5254+ }
5255+ } while (0);
5256+
5257+ do { /* PaX: patched PLT emulation #4 */
5258+ unsigned int sethi, mov1, call, mov2;
5259+
5260+ err = get_user(sethi, (unsigned int *)regs->tpc);
5261+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5262+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
5263+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5264+
5265+ if (err)
5266+ break;
5267+
5268+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5269+ mov1 == 0x8210000FU &&
5270+ (call & 0xC0000000U) == 0x40000000U &&
5271+ mov2 == 0x9E100001U)
5272+ {
5273+ unsigned long addr;
5274+
5275+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5276+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5277+
5278+ if (test_thread_flag(TIF_32BIT))
5279+ addr &= 0xFFFFFFFFUL;
5280+
5281+ regs->tpc = addr;
5282+ regs->tnpc = addr+4;
5283+ return 2;
5284+ }
5285+ } while (0);
5286+
5287+ do { /* PaX: patched PLT emulation #5 */
5288+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5289+
5290+ err = get_user(sethi, (unsigned int *)regs->tpc);
5291+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5292+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5293+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5294+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5295+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5296+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5297+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5298+
5299+ if (err)
5300+ break;
5301+
5302+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5303+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5304+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5305+ (or1 & 0xFFFFE000U) == 0x82106000U &&
5306+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5307+ sllx == 0x83287020U &&
5308+ jmpl == 0x81C04005U &&
5309+ nop == 0x01000000U)
5310+ {
5311+ unsigned long addr;
5312+
5313+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5314+ regs->u_regs[UREG_G1] <<= 32;
5315+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5316+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5317+ regs->tpc = addr;
5318+ regs->tnpc = addr+4;
5319+ return 2;
5320+ }
5321+ } while (0);
5322+
5323+ do { /* PaX: patched PLT emulation #6 */
5324+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5325+
5326+ err = get_user(sethi, (unsigned int *)regs->tpc);
5327+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5328+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5329+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5330+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
5331+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5332+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5333+
5334+ if (err)
5335+ break;
5336+
5337+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5338+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5339+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5340+ sllx == 0x83287020U &&
5341+ (or & 0xFFFFE000U) == 0x8A116000U &&
5342+ jmpl == 0x81C04005U &&
5343+ nop == 0x01000000U)
5344+ {
5345+ unsigned long addr;
5346+
5347+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5348+ regs->u_regs[UREG_G1] <<= 32;
5349+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5350+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5351+ regs->tpc = addr;
5352+ regs->tnpc = addr+4;
5353+ return 2;
5354+ }
5355+ } while (0);
5356+
5357+ do { /* PaX: unpatched PLT emulation step 1 */
5358+ unsigned int sethi, ba, nop;
5359+
5360+ err = get_user(sethi, (unsigned int *)regs->tpc);
5361+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5362+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5363+
5364+ if (err)
5365+ break;
5366+
5367+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5368+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5369+ nop == 0x01000000U)
5370+ {
5371+ unsigned long addr;
5372+ unsigned int save, call;
5373+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5374+
5375+ if ((ba & 0xFFC00000U) == 0x30800000U)
5376+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5377+ else
5378+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5379+
5380+ if (test_thread_flag(TIF_32BIT))
5381+ addr &= 0xFFFFFFFFUL;
5382+
5383+ err = get_user(save, (unsigned int *)addr);
5384+ err |= get_user(call, (unsigned int *)(addr+4));
5385+ err |= get_user(nop, (unsigned int *)(addr+8));
5386+ if (err)
5387+ break;
5388+
5389+#ifdef CONFIG_PAX_DLRESOLVE
5390+ if (save == 0x9DE3BFA8U &&
5391+ (call & 0xC0000000U) == 0x40000000U &&
5392+ nop == 0x01000000U)
5393+ {
5394+ struct vm_area_struct *vma;
5395+ unsigned long call_dl_resolve;
5396+
5397+ down_read(&current->mm->mmap_sem);
5398+ call_dl_resolve = current->mm->call_dl_resolve;
5399+ up_read(&current->mm->mmap_sem);
5400+ if (likely(call_dl_resolve))
5401+ goto emulate;
5402+
5403+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5404+
5405+ down_write(&current->mm->mmap_sem);
5406+ if (current->mm->call_dl_resolve) {
5407+ call_dl_resolve = current->mm->call_dl_resolve;
5408+ up_write(&current->mm->mmap_sem);
5409+ if (vma)
5410+ kmem_cache_free(vm_area_cachep, vma);
5411+ goto emulate;
5412+ }
5413+
5414+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5415+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5416+ up_write(&current->mm->mmap_sem);
5417+ if (vma)
5418+ kmem_cache_free(vm_area_cachep, vma);
5419+ return 1;
5420+ }
5421+
5422+ if (pax_insert_vma(vma, call_dl_resolve)) {
5423+ up_write(&current->mm->mmap_sem);
5424+ kmem_cache_free(vm_area_cachep, vma);
5425+ return 1;
5426+ }
5427+
5428+ current->mm->call_dl_resolve = call_dl_resolve;
5429+ up_write(&current->mm->mmap_sem);
5430+
5431+emulate:
5432+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5433+ regs->tpc = call_dl_resolve;
5434+ regs->tnpc = addr+4;
5435+ return 3;
5436+ }
5437+#endif
5438+
5439+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5440+ if ((save & 0xFFC00000U) == 0x05000000U &&
5441+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5442+ nop == 0x01000000U)
5443+ {
5444+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5445+ regs->u_regs[UREG_G2] = addr + 4;
5446+ addr = (save & 0x003FFFFFU) << 10;
5447+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5448+
5449+ if (test_thread_flag(TIF_32BIT))
5450+ addr &= 0xFFFFFFFFUL;
5451+
5452+ regs->tpc = addr;
5453+ regs->tnpc = addr+4;
5454+ return 3;
5455+ }
5456+
5457+ /* PaX: 64-bit PLT stub */
5458+ err = get_user(sethi1, (unsigned int *)addr);
5459+ err |= get_user(sethi2, (unsigned int *)(addr+4));
5460+ err |= get_user(or1, (unsigned int *)(addr+8));
5461+ err |= get_user(or2, (unsigned int *)(addr+12));
5462+ err |= get_user(sllx, (unsigned int *)(addr+16));
5463+ err |= get_user(add, (unsigned int *)(addr+20));
5464+ err |= get_user(jmpl, (unsigned int *)(addr+24));
5465+ err |= get_user(nop, (unsigned int *)(addr+28));
5466+ if (err)
5467+ break;
5468+
5469+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5470+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5471+ (or1 & 0xFFFFE000U) == 0x88112000U &&
5472+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5473+ sllx == 0x89293020U &&
5474+ add == 0x8A010005U &&
5475+ jmpl == 0x89C14000U &&
5476+ nop == 0x01000000U)
5477+ {
5478+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5479+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5480+ regs->u_regs[UREG_G4] <<= 32;
5481+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5482+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5483+ regs->u_regs[UREG_G4] = addr + 24;
5484+ addr = regs->u_regs[UREG_G5];
5485+ regs->tpc = addr;
5486+ regs->tnpc = addr+4;
5487+ return 3;
5488+ }
5489+ }
5490+ } while (0);
5491+
5492+#ifdef CONFIG_PAX_DLRESOLVE
5493+ do { /* PaX: unpatched PLT emulation step 2 */
5494+ unsigned int save, call, nop;
5495+
5496+ err = get_user(save, (unsigned int *)(regs->tpc-4));
5497+ err |= get_user(call, (unsigned int *)regs->tpc);
5498+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5499+ if (err)
5500+ break;
5501+
5502+ if (save == 0x9DE3BFA8U &&
5503+ (call & 0xC0000000U) == 0x40000000U &&
5504+ nop == 0x01000000U)
5505+ {
5506+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5507+
5508+ if (test_thread_flag(TIF_32BIT))
5509+ dl_resolve &= 0xFFFFFFFFUL;
5510+
5511+ regs->u_regs[UREG_RETPC] = regs->tpc;
5512+ regs->tpc = dl_resolve;
5513+ regs->tnpc = dl_resolve+4;
5514+ return 3;
5515+ }
5516+ } while (0);
5517+#endif
5518+
5519+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5520+ unsigned int sethi, ba, nop;
5521+
5522+ err = get_user(sethi, (unsigned int *)regs->tpc);
5523+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5524+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5525+
5526+ if (err)
5527+ break;
5528+
5529+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5530+ (ba & 0xFFF00000U) == 0x30600000U &&
5531+ nop == 0x01000000U)
5532+ {
5533+ unsigned long addr;
5534+
5535+ addr = (sethi & 0x003FFFFFU) << 10;
5536+ regs->u_regs[UREG_G1] = addr;
5537+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5538+
5539+ if (test_thread_flag(TIF_32BIT))
5540+ addr &= 0xFFFFFFFFUL;
5541+
5542+ regs->tpc = addr;
5543+ regs->tnpc = addr+4;
5544+ return 2;
5545+ }
5546+ } while (0);
5547+
5548+#endif
5549+
5550+ return 1;
5551+}
5552+
5553+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5554+{
5555+ unsigned long i;
5556+
5557+ printk(KERN_ERR "PAX: bytes at PC: ");
5558+ for (i = 0; i < 8; i++) {
5559+ unsigned int c;
5560+ if (get_user(c, (unsigned int *)pc+i))
5561+ printk(KERN_CONT "???????? ");
5562+ else
5563+ printk(KERN_CONT "%08x ", c);
5564+ }
5565+ printk("\n");
5566+}
5567+#endif
5568+
5569 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5570 {
5571 struct mm_struct *mm = current->mm;
5572@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5573 if (!vma)
5574 goto bad_area;
5575
5576+#ifdef CONFIG_PAX_PAGEEXEC
5577+ /* PaX: detect ITLB misses on non-exec pages */
5578+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5579+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5580+ {
5581+ if (address != regs->tpc)
5582+ goto good_area;
5583+
5584+ up_read(&mm->mmap_sem);
5585+ switch (pax_handle_fetch_fault(regs)) {
5586+
5587+#ifdef CONFIG_PAX_EMUPLT
5588+ case 2:
5589+ case 3:
5590+ return;
5591+#endif
5592+
5593+ }
5594+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5595+ do_group_exit(SIGKILL);
5596+ }
5597+#endif
5598+
5599 /* Pure DTLB misses do not tell us whether the fault causing
5600 * load/store/atomic was a write or not, it only says that there
5601 * was no match. So in such a case we (carefully) read the
5602diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
5603index 07e1453..0a7d9e9 100644
5604--- a/arch/sparc/mm/hugetlbpage.c
5605+++ b/arch/sparc/mm/hugetlbpage.c
5606@@ -67,7 +67,7 @@ full_search:
5607 }
5608 return -ENOMEM;
5609 }
5610- if (likely(!vma || addr + len <= vma->vm_start)) {
5611+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5612 /*
5613 * Remember the place where we stopped the search:
5614 */
5615@@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5616 /* make sure it can fit in the remaining address space */
5617 if (likely(addr > len)) {
5618 vma = find_vma(mm, addr-len);
5619- if (!vma || addr <= vma->vm_start) {
5620+ if (check_heap_stack_gap(vma, addr - len, len)) {
5621 /* remember the address as a hint for next time */
5622 return (mm->free_area_cache = addr-len);
5623 }
5624@@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5625 if (unlikely(mm->mmap_base < len))
5626 goto bottomup;
5627
5628- addr = (mm->mmap_base-len) & HPAGE_MASK;
5629+ addr = mm->mmap_base - len;
5630
5631 do {
5632+ addr &= HPAGE_MASK;
5633 /*
5634 * Lookup failure means no vma is above this address,
5635 * else if new region fits below vma->vm_start,
5636 * return with success:
5637 */
5638 vma = find_vma(mm, addr);
5639- if (likely(!vma || addr+len <= vma->vm_start)) {
5640+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5641 /* remember the address as a hint for next time */
5642 return (mm->free_area_cache = addr);
5643 }
5644@@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5645 mm->cached_hole_size = vma->vm_start - addr;
5646
5647 /* try just below the current vma->vm_start */
5648- addr = (vma->vm_start-len) & HPAGE_MASK;
5649- } while (likely(len < vma->vm_start));
5650+ addr = skip_heap_stack_gap(vma, len);
5651+ } while (!IS_ERR_VALUE(addr));
5652
5653 bottomup:
5654 /*
5655@@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
5656 if (addr) {
5657 addr = ALIGN(addr, HPAGE_SIZE);
5658 vma = find_vma(mm, addr);
5659- if (task_size - len >= addr &&
5660- (!vma || addr + len <= vma->vm_start))
5661+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5662 return addr;
5663 }
5664 if (mm->get_unmapped_area == arch_get_unmapped_area)
5665diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
5666index 7b00de6..78239f4 100644
5667--- a/arch/sparc/mm/init_32.c
5668+++ b/arch/sparc/mm/init_32.c
5669@@ -316,6 +316,9 @@ extern void device_scan(void);
5670 pgprot_t PAGE_SHARED __read_mostly;
5671 EXPORT_SYMBOL(PAGE_SHARED);
5672
5673+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5674+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5675+
5676 void __init paging_init(void)
5677 {
5678 switch(sparc_cpu_model) {
5679@@ -344,17 +347,17 @@ void __init paging_init(void)
5680
5681 /* Initialize the protection map with non-constant, MMU dependent values. */
5682 protection_map[0] = PAGE_NONE;
5683- protection_map[1] = PAGE_READONLY;
5684- protection_map[2] = PAGE_COPY;
5685- protection_map[3] = PAGE_COPY;
5686+ protection_map[1] = PAGE_READONLY_NOEXEC;
5687+ protection_map[2] = PAGE_COPY_NOEXEC;
5688+ protection_map[3] = PAGE_COPY_NOEXEC;
5689 protection_map[4] = PAGE_READONLY;
5690 protection_map[5] = PAGE_READONLY;
5691 protection_map[6] = PAGE_COPY;
5692 protection_map[7] = PAGE_COPY;
5693 protection_map[8] = PAGE_NONE;
5694- protection_map[9] = PAGE_READONLY;
5695- protection_map[10] = PAGE_SHARED;
5696- protection_map[11] = PAGE_SHARED;
5697+ protection_map[9] = PAGE_READONLY_NOEXEC;
5698+ protection_map[10] = PAGE_SHARED_NOEXEC;
5699+ protection_map[11] = PAGE_SHARED_NOEXEC;
5700 protection_map[12] = PAGE_READONLY;
5701 protection_map[13] = PAGE_READONLY;
5702 protection_map[14] = PAGE_SHARED;
5703diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
5704index cbef74e..c38fead 100644
5705--- a/arch/sparc/mm/srmmu.c
5706+++ b/arch/sparc/mm/srmmu.c
5707@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5708 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5709 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5710 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5711+
5712+#ifdef CONFIG_PAX_PAGEEXEC
5713+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5714+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5715+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5716+#endif
5717+
5718 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5719 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5720
5721diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
5722index 27fe667..36d474c 100644
5723--- a/arch/tile/include/asm/atomic_64.h
5724+++ b/arch/tile/include/asm/atomic_64.h
5725@@ -142,6 +142,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
5726
5727 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5728
5729+#define atomic64_read_unchecked(v) atomic64_read(v)
5730+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5731+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5732+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5733+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5734+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5735+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5736+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5737+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5738+
5739 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
5740 #define smp_mb__before_atomic_dec() smp_mb()
5741 #define smp_mb__after_atomic_dec() smp_mb()
5742diff --git a/arch/um/Makefile b/arch/um/Makefile
5743index 7730af6..cce5b19 100644
5744--- a/arch/um/Makefile
5745+++ b/arch/um/Makefile
5746@@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
5747 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
5748 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
5749
5750+ifdef CONSTIFY_PLUGIN
5751+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5752+endif
5753+
5754 #This will adjust *FLAGS accordingly to the platform.
5755 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
5756
5757diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
5758index 6c03acd..a5e0215 100644
5759--- a/arch/um/include/asm/kmap_types.h
5760+++ b/arch/um/include/asm/kmap_types.h
5761@@ -23,6 +23,7 @@ enum km_type {
5762 KM_IRQ1,
5763 KM_SOFTIRQ0,
5764 KM_SOFTIRQ1,
5765+ KM_CLEARPAGE,
5766 KM_TYPE_NR
5767 };
5768
5769diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
5770index 7cfc3ce..cbd1a58 100644
5771--- a/arch/um/include/asm/page.h
5772+++ b/arch/um/include/asm/page.h
5773@@ -14,6 +14,9 @@
5774 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5775 #define PAGE_MASK (~(PAGE_SIZE-1))
5776
5777+#define ktla_ktva(addr) (addr)
5778+#define ktva_ktla(addr) (addr)
5779+
5780 #ifndef __ASSEMBLY__
5781
5782 struct page;
5783diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
5784index c533835..84db18e 100644
5785--- a/arch/um/kernel/process.c
5786+++ b/arch/um/kernel/process.c
5787@@ -406,22 +406,6 @@ int singlestepping(void * t)
5788 return 2;
5789 }
5790
5791-/*
5792- * Only x86 and x86_64 have an arch_align_stack().
5793- * All other arches have "#define arch_align_stack(x) (x)"
5794- * in their asm/system.h
5795- * As this is included in UML from asm-um/system-generic.h,
5796- * we can use it to behave as the subarch does.
5797- */
5798-#ifndef arch_align_stack
5799-unsigned long arch_align_stack(unsigned long sp)
5800-{
5801- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5802- sp -= get_random_int() % 8192;
5803- return sp & ~0xf;
5804-}
5805-#endif
5806-
5807 unsigned long get_wchan(struct task_struct *p)
5808 {
5809 unsigned long stack_page, sp, ip;
5810diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
5811index efb4294..61bc18c 100644
5812--- a/arch/x86/Kconfig
5813+++ b/arch/x86/Kconfig
5814@@ -235,7 +235,7 @@ config X86_HT
5815
5816 config X86_32_LAZY_GS
5817 def_bool y
5818- depends on X86_32 && !CC_STACKPROTECTOR
5819+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
5820
5821 config ARCH_HWEIGHT_CFLAGS
5822 string
5823@@ -1022,7 +1022,7 @@ choice
5824
5825 config NOHIGHMEM
5826 bool "off"
5827- depends on !X86_NUMAQ
5828+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5829 ---help---
5830 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
5831 However, the address space of 32-bit x86 processors is only 4
5832@@ -1059,7 +1059,7 @@ config NOHIGHMEM
5833
5834 config HIGHMEM4G
5835 bool "4GB"
5836- depends on !X86_NUMAQ
5837+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5838 ---help---
5839 Select this if you have a 32-bit processor and between 1 and 4
5840 gigabytes of physical RAM.
5841@@ -1113,7 +1113,7 @@ config PAGE_OFFSET
5842 hex
5843 default 0xB0000000 if VMSPLIT_3G_OPT
5844 default 0x80000000 if VMSPLIT_2G
5845- default 0x78000000 if VMSPLIT_2G_OPT
5846+ default 0x70000000 if VMSPLIT_2G_OPT
5847 default 0x40000000 if VMSPLIT_1G
5848 default 0xC0000000
5849 depends on X86_32
5850@@ -1496,6 +1496,7 @@ config SECCOMP
5851
5852 config CC_STACKPROTECTOR
5853 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
5854+ depends on X86_64 || !PAX_MEMORY_UDEREF
5855 ---help---
5856 This option turns on the -fstack-protector GCC feature. This
5857 feature puts, at the beginning of functions, a canary value on
5858@@ -1553,6 +1554,7 @@ config KEXEC_JUMP
5859 config PHYSICAL_START
5860 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
5861 default "0x1000000"
5862+ range 0x400000 0x40000000
5863 ---help---
5864 This gives the physical address where the kernel is loaded.
5865
5866@@ -1616,6 +1618,7 @@ config X86_NEED_RELOCS
5867 config PHYSICAL_ALIGN
5868 hex "Alignment value to which kernel should be aligned" if X86_32
5869 default "0x1000000"
5870+ range 0x400000 0x1000000 if PAX_KERNEXEC
5871 range 0x2000 0x1000000
5872 ---help---
5873 This value puts the alignment restrictions on physical address
5874@@ -1647,9 +1650,10 @@ config HOTPLUG_CPU
5875 Say N if you want to disable CPU hotplug.
5876
5877 config COMPAT_VDSO
5878- def_bool y
5879+ def_bool n
5880 prompt "Compat VDSO support"
5881 depends on X86_32 || IA32_EMULATION
5882+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
5883 ---help---
5884 Map the 32-bit VDSO to the predictable old-style address too.
5885
5886diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
5887index e3ca7e0..b30b28a 100644
5888--- a/arch/x86/Kconfig.cpu
5889+++ b/arch/x86/Kconfig.cpu
5890@@ -341,7 +341,7 @@ config X86_PPRO_FENCE
5891
5892 config X86_F00F_BUG
5893 def_bool y
5894- depends on M586MMX || M586TSC || M586 || M486 || M386
5895+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
5896
5897 config X86_INVD_BUG
5898 def_bool y
5899@@ -365,7 +365,7 @@ config X86_POPAD_OK
5900
5901 config X86_ALIGNMENT_16
5902 def_bool y
5903- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5904+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5905
5906 config X86_INTEL_USERCOPY
5907 def_bool y
5908@@ -411,7 +411,7 @@ config X86_CMPXCHG64
5909 # generates cmov.
5910 config X86_CMOV
5911 def_bool y
5912- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5913+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5914
5915 config X86_MINIMUM_CPU_FAMILY
5916 int
5917diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
5918index bf56e17..05f9891 100644
5919--- a/arch/x86/Kconfig.debug
5920+++ b/arch/x86/Kconfig.debug
5921@@ -81,7 +81,7 @@ config X86_PTDUMP
5922 config DEBUG_RODATA
5923 bool "Write protect kernel read-only data structures"
5924 default y
5925- depends on DEBUG_KERNEL
5926+ depends on DEBUG_KERNEL && BROKEN
5927 ---help---
5928 Mark the kernel read-only data as write-protected in the pagetables,
5929 in order to catch accidental (and incorrect) writes to such const
5930@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
5931
5932 config DEBUG_SET_MODULE_RONX
5933 bool "Set loadable kernel module data as NX and text as RO"
5934- depends on MODULES
5935+ depends on MODULES && BROKEN
5936 ---help---
5937 This option helps catch unintended modifications to loadable
5938 kernel module's text and read-only data. It also prevents execution
5939diff --git a/arch/x86/Makefile b/arch/x86/Makefile
5940index b02e509..2631e48 100644
5941--- a/arch/x86/Makefile
5942+++ b/arch/x86/Makefile
5943@@ -46,6 +46,7 @@ else
5944 UTS_MACHINE := x86_64
5945 CHECKFLAGS += -D__x86_64__ -m64
5946
5947+ biarch := $(call cc-option,-m64)
5948 KBUILD_AFLAGS += -m64
5949 KBUILD_CFLAGS += -m64
5950
5951@@ -195,3 +196,12 @@ define archhelp
5952 echo ' FDARGS="..." arguments for the booted kernel'
5953 echo ' FDINITRD=file initrd for the booted kernel'
5954 endef
5955+
5956+define OLD_LD
5957+
5958+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
5959+*** Please upgrade your binutils to 2.18 or newer
5960+endef
5961+
5962+archprepare:
5963+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
5964diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
5965index 95365a8..52f857b 100644
5966--- a/arch/x86/boot/Makefile
5967+++ b/arch/x86/boot/Makefile
5968@@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
5969 $(call cc-option, -fno-stack-protector) \
5970 $(call cc-option, -mpreferred-stack-boundary=2)
5971 KBUILD_CFLAGS += $(call cc-option, -m32)
5972+ifdef CONSTIFY_PLUGIN
5973+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5974+endif
5975 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5976 GCOV_PROFILE := n
5977
5978diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
5979index 878e4b9..20537ab 100644
5980--- a/arch/x86/boot/bitops.h
5981+++ b/arch/x86/boot/bitops.h
5982@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5983 u8 v;
5984 const u32 *p = (const u32 *)addr;
5985
5986- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5987+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5988 return v;
5989 }
5990
5991@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5992
5993 static inline void set_bit(int nr, void *addr)
5994 {
5995- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5996+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5997 }
5998
5999 #endif /* BOOT_BITOPS_H */
6000diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
6001index c7093bd..d4247ffe0 100644
6002--- a/arch/x86/boot/boot.h
6003+++ b/arch/x86/boot/boot.h
6004@@ -85,7 +85,7 @@ static inline void io_delay(void)
6005 static inline u16 ds(void)
6006 {
6007 u16 seg;
6008- asm("movw %%ds,%0" : "=rm" (seg));
6009+ asm volatile("movw %%ds,%0" : "=rm" (seg));
6010 return seg;
6011 }
6012
6013@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
6014 static inline int memcmp(const void *s1, const void *s2, size_t len)
6015 {
6016 u8 diff;
6017- asm("repe; cmpsb; setnz %0"
6018+ asm volatile("repe; cmpsb; setnz %0"
6019 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
6020 return diff;
6021 }
6022diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
6023index 09664ef..edc5d03 100644
6024--- a/arch/x86/boot/compressed/Makefile
6025+++ b/arch/x86/boot/compressed/Makefile
6026@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
6027 KBUILD_CFLAGS += $(cflags-y)
6028 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
6029 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
6030+ifdef CONSTIFY_PLUGIN
6031+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6032+endif
6033
6034 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6035 GCOV_PROFILE := n
6036diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
6037index 67a655a..b924059 100644
6038--- a/arch/x86/boot/compressed/head_32.S
6039+++ b/arch/x86/boot/compressed/head_32.S
6040@@ -76,7 +76,7 @@ ENTRY(startup_32)
6041 notl %eax
6042 andl %eax, %ebx
6043 #else
6044- movl $LOAD_PHYSICAL_ADDR, %ebx
6045+ movl $____LOAD_PHYSICAL_ADDR, %ebx
6046 #endif
6047
6048 /* Target address to relocate to for decompression */
6049@@ -162,7 +162,7 @@ relocated:
6050 * and where it was actually loaded.
6051 */
6052 movl %ebp, %ebx
6053- subl $LOAD_PHYSICAL_ADDR, %ebx
6054+ subl $____LOAD_PHYSICAL_ADDR, %ebx
6055 jz 2f /* Nothing to be done if loaded at compiled addr. */
6056 /*
6057 * Process relocations.
6058@@ -170,8 +170,7 @@ relocated:
6059
6060 1: subl $4, %edi
6061 movl (%edi), %ecx
6062- testl %ecx, %ecx
6063- jz 2f
6064+ jecxz 2f
6065 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
6066 jmp 1b
6067 2:
6068diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
6069index 35af09d..99c9676 100644
6070--- a/arch/x86/boot/compressed/head_64.S
6071+++ b/arch/x86/boot/compressed/head_64.S
6072@@ -91,7 +91,7 @@ ENTRY(startup_32)
6073 notl %eax
6074 andl %eax, %ebx
6075 #else
6076- movl $LOAD_PHYSICAL_ADDR, %ebx
6077+ movl $____LOAD_PHYSICAL_ADDR, %ebx
6078 #endif
6079
6080 /* Target address to relocate to for decompression */
6081@@ -233,7 +233,7 @@ ENTRY(startup_64)
6082 notq %rax
6083 andq %rax, %rbp
6084 #else
6085- movq $LOAD_PHYSICAL_ADDR, %rbp
6086+ movq $____LOAD_PHYSICAL_ADDR, %rbp
6087 #endif
6088
6089 /* Target address to relocate to for decompression */
6090diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
6091index 3a19d04..7c1d55a 100644
6092--- a/arch/x86/boot/compressed/misc.c
6093+++ b/arch/x86/boot/compressed/misc.c
6094@@ -310,7 +310,7 @@ static void parse_elf(void *output)
6095 case PT_LOAD:
6096 #ifdef CONFIG_RELOCATABLE
6097 dest = output;
6098- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
6099+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
6100 #else
6101 dest = (void *)(phdr->p_paddr);
6102 #endif
6103@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
6104 error("Destination address too large");
6105 #endif
6106 #ifndef CONFIG_RELOCATABLE
6107- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
6108+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
6109 error("Wrong destination address");
6110 #endif
6111
6112diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
6113index 89bbf4e..869908e 100644
6114--- a/arch/x86/boot/compressed/relocs.c
6115+++ b/arch/x86/boot/compressed/relocs.c
6116@@ -13,8 +13,11 @@
6117
6118 static void die(char *fmt, ...);
6119
6120+#include "../../../../include/generated/autoconf.h"
6121+
6122 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
6123 static Elf32_Ehdr ehdr;
6124+static Elf32_Phdr *phdr;
6125 static unsigned long reloc_count, reloc_idx;
6126 static unsigned long *relocs;
6127
6128@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
6129 }
6130 }
6131
6132+static void read_phdrs(FILE *fp)
6133+{
6134+ unsigned int i;
6135+
6136+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6137+ if (!phdr) {
6138+ die("Unable to allocate %d program headers\n",
6139+ ehdr.e_phnum);
6140+ }
6141+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6142+ die("Seek to %d failed: %s\n",
6143+ ehdr.e_phoff, strerror(errno));
6144+ }
6145+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6146+ die("Cannot read ELF program headers: %s\n",
6147+ strerror(errno));
6148+ }
6149+ for(i = 0; i < ehdr.e_phnum; i++) {
6150+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6151+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6152+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6153+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6154+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6155+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6156+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6157+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6158+ }
6159+
6160+}
6161+
6162 static void read_shdrs(FILE *fp)
6163 {
6164- int i;
6165+ unsigned int i;
6166 Elf32_Shdr shdr;
6167
6168 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6169@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
6170
6171 static void read_strtabs(FILE *fp)
6172 {
6173- int i;
6174+ unsigned int i;
6175 for (i = 0; i < ehdr.e_shnum; i++) {
6176 struct section *sec = &secs[i];
6177 if (sec->shdr.sh_type != SHT_STRTAB) {
6178@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
6179
6180 static void read_symtabs(FILE *fp)
6181 {
6182- int i,j;
6183+ unsigned int i,j;
6184 for (i = 0; i < ehdr.e_shnum; i++) {
6185 struct section *sec = &secs[i];
6186 if (sec->shdr.sh_type != SHT_SYMTAB) {
6187@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
6188
6189 static void read_relocs(FILE *fp)
6190 {
6191- int i,j;
6192+ unsigned int i,j;
6193+ uint32_t base;
6194+
6195 for (i = 0; i < ehdr.e_shnum; i++) {
6196 struct section *sec = &secs[i];
6197 if (sec->shdr.sh_type != SHT_REL) {
6198@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
6199 die("Cannot read symbol table: %s\n",
6200 strerror(errno));
6201 }
6202+ base = 0;
6203+ for (j = 0; j < ehdr.e_phnum; j++) {
6204+ if (phdr[j].p_type != PT_LOAD )
6205+ continue;
6206+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6207+ continue;
6208+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6209+ break;
6210+ }
6211 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6212 Elf32_Rel *rel = &sec->reltab[j];
6213- rel->r_offset = elf32_to_cpu(rel->r_offset);
6214+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6215 rel->r_info = elf32_to_cpu(rel->r_info);
6216 }
6217 }
6218@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
6219
6220 static void print_absolute_symbols(void)
6221 {
6222- int i;
6223+ unsigned int i;
6224 printf("Absolute symbols\n");
6225 printf(" Num: Value Size Type Bind Visibility Name\n");
6226 for (i = 0; i < ehdr.e_shnum; i++) {
6227 struct section *sec = &secs[i];
6228 char *sym_strtab;
6229 Elf32_Sym *sh_symtab;
6230- int j;
6231+ unsigned int j;
6232
6233 if (sec->shdr.sh_type != SHT_SYMTAB) {
6234 continue;
6235@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
6236
6237 static void print_absolute_relocs(void)
6238 {
6239- int i, printed = 0;
6240+ unsigned int i, printed = 0;
6241
6242 for (i = 0; i < ehdr.e_shnum; i++) {
6243 struct section *sec = &secs[i];
6244 struct section *sec_applies, *sec_symtab;
6245 char *sym_strtab;
6246 Elf32_Sym *sh_symtab;
6247- int j;
6248+ unsigned int j;
6249 if (sec->shdr.sh_type != SHT_REL) {
6250 continue;
6251 }
6252@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
6253
6254 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6255 {
6256- int i;
6257+ unsigned int i;
6258 /* Walk through the relocations */
6259 for (i = 0; i < ehdr.e_shnum; i++) {
6260 char *sym_strtab;
6261 Elf32_Sym *sh_symtab;
6262 struct section *sec_applies, *sec_symtab;
6263- int j;
6264+ unsigned int j;
6265 struct section *sec = &secs[i];
6266
6267 if (sec->shdr.sh_type != SHT_REL) {
6268@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6269 !is_rel_reloc(sym_name(sym_strtab, sym))) {
6270 continue;
6271 }
6272+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6273+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6274+ continue;
6275+
6276+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6277+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6278+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6279+ continue;
6280+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6281+ continue;
6282+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6283+ continue;
6284+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6285+ continue;
6286+#endif
6287+
6288 switch (r_type) {
6289 case R_386_NONE:
6290 case R_386_PC32:
6291@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
6292
6293 static void emit_relocs(int as_text)
6294 {
6295- int i;
6296+ unsigned int i;
6297 /* Count how many relocations I have and allocate space for them. */
6298 reloc_count = 0;
6299 walk_relocs(count_reloc);
6300@@ -665,6 +725,7 @@ int main(int argc, char **argv)
6301 fname, strerror(errno));
6302 }
6303 read_ehdr(fp);
6304+ read_phdrs(fp);
6305 read_shdrs(fp);
6306 read_strtabs(fp);
6307 read_symtabs(fp);
6308diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
6309index 4d3ff03..e4972ff 100644
6310--- a/arch/x86/boot/cpucheck.c
6311+++ b/arch/x86/boot/cpucheck.c
6312@@ -74,7 +74,7 @@ static int has_fpu(void)
6313 u16 fcw = -1, fsw = -1;
6314 u32 cr0;
6315
6316- asm("movl %%cr0,%0" : "=r" (cr0));
6317+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
6318 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6319 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6320 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6321@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6322 {
6323 u32 f0, f1;
6324
6325- asm("pushfl ; "
6326+ asm volatile("pushfl ; "
6327 "pushfl ; "
6328 "popl %0 ; "
6329 "movl %0,%1 ; "
6330@@ -115,7 +115,7 @@ static void get_flags(void)
6331 set_bit(X86_FEATURE_FPU, cpu.flags);
6332
6333 if (has_eflag(X86_EFLAGS_ID)) {
6334- asm("cpuid"
6335+ asm volatile("cpuid"
6336 : "=a" (max_intel_level),
6337 "=b" (cpu_vendor[0]),
6338 "=d" (cpu_vendor[1]),
6339@@ -124,7 +124,7 @@ static void get_flags(void)
6340
6341 if (max_intel_level >= 0x00000001 &&
6342 max_intel_level <= 0x0000ffff) {
6343- asm("cpuid"
6344+ asm volatile("cpuid"
6345 : "=a" (tfms),
6346 "=c" (cpu.flags[4]),
6347 "=d" (cpu.flags[0])
6348@@ -136,7 +136,7 @@ static void get_flags(void)
6349 cpu.model += ((tfms >> 16) & 0xf) << 4;
6350 }
6351
6352- asm("cpuid"
6353+ asm volatile("cpuid"
6354 : "=a" (max_amd_level)
6355 : "a" (0x80000000)
6356 : "ebx", "ecx", "edx");
6357@@ -144,7 +144,7 @@ static void get_flags(void)
6358 if (max_amd_level >= 0x80000001 &&
6359 max_amd_level <= 0x8000ffff) {
6360 u32 eax = 0x80000001;
6361- asm("cpuid"
6362+ asm volatile("cpuid"
6363 : "+a" (eax),
6364 "=c" (cpu.flags[6]),
6365 "=d" (cpu.flags[1])
6366@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6367 u32 ecx = MSR_K7_HWCR;
6368 u32 eax, edx;
6369
6370- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6371+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6372 eax &= ~(1 << 15);
6373- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6374+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6375
6376 get_flags(); /* Make sure it really did something */
6377 err = check_flags();
6378@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6379 u32 ecx = MSR_VIA_FCR;
6380 u32 eax, edx;
6381
6382- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6383+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6384 eax |= (1<<1)|(1<<7);
6385- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6386+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6387
6388 set_bit(X86_FEATURE_CX8, cpu.flags);
6389 err = check_flags();
6390@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6391 u32 eax, edx;
6392 u32 level = 1;
6393
6394- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6395- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6396- asm("cpuid"
6397+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6398+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6399+ asm volatile("cpuid"
6400 : "+a" (level), "=d" (cpu.flags[0])
6401 : : "ecx", "ebx");
6402- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6403+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6404
6405 err = check_flags();
6406 }
6407diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
6408index bdb4d45..0476680 100644
6409--- a/arch/x86/boot/header.S
6410+++ b/arch/x86/boot/header.S
6411@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
6412 # single linked list of
6413 # struct setup_data
6414
6415-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6416+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6417
6418 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6419 #define VO_INIT_SIZE (VO__end - VO__text)
6420diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
6421index db75d07..8e6d0af 100644
6422--- a/arch/x86/boot/memory.c
6423+++ b/arch/x86/boot/memory.c
6424@@ -19,7 +19,7 @@
6425
6426 static int detect_memory_e820(void)
6427 {
6428- int count = 0;
6429+ unsigned int count = 0;
6430 struct biosregs ireg, oreg;
6431 struct e820entry *desc = boot_params.e820_map;
6432 static struct e820entry buf; /* static so it is zeroed */
6433diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
6434index 11e8c6e..fdbb1ed 100644
6435--- a/arch/x86/boot/video-vesa.c
6436+++ b/arch/x86/boot/video-vesa.c
6437@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6438
6439 boot_params.screen_info.vesapm_seg = oreg.es;
6440 boot_params.screen_info.vesapm_off = oreg.di;
6441+ boot_params.screen_info.vesapm_size = oreg.cx;
6442 }
6443
6444 /*
6445diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
6446index 43eda28..5ab5fdb 100644
6447--- a/arch/x86/boot/video.c
6448+++ b/arch/x86/boot/video.c
6449@@ -96,7 +96,7 @@ static void store_mode_params(void)
6450 static unsigned int get_entry(void)
6451 {
6452 char entry_buf[4];
6453- int i, len = 0;
6454+ unsigned int i, len = 0;
6455 int key;
6456 unsigned int v;
6457
6458diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
6459index 5b577d5..3c1fed4 100644
6460--- a/arch/x86/crypto/aes-x86_64-asm_64.S
6461+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
6462@@ -8,6 +8,8 @@
6463 * including this sentence is retained in full.
6464 */
6465
6466+#include <asm/alternative-asm.h>
6467+
6468 .extern crypto_ft_tab
6469 .extern crypto_it_tab
6470 .extern crypto_fl_tab
6471@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
6472 je B192; \
6473 leaq 32(r9),r9;
6474
6475+#define ret pax_force_retaddr 0, 1; ret
6476+
6477 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
6478 movq r1,r2; \
6479 movq r3,r4; \
6480diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
6481index be6d9e3..21fbbca 100644
6482--- a/arch/x86/crypto/aesni-intel_asm.S
6483+++ b/arch/x86/crypto/aesni-intel_asm.S
6484@@ -31,6 +31,7 @@
6485
6486 #include <linux/linkage.h>
6487 #include <asm/inst.h>
6488+#include <asm/alternative-asm.h>
6489
6490 #ifdef __x86_64__
6491 .data
6492@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
6493 pop %r14
6494 pop %r13
6495 pop %r12
6496+ pax_force_retaddr 0, 1
6497 ret
6498+ENDPROC(aesni_gcm_dec)
6499
6500
6501 /*****************************************************************************
6502@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
6503 pop %r14
6504 pop %r13
6505 pop %r12
6506+ pax_force_retaddr 0, 1
6507 ret
6508+ENDPROC(aesni_gcm_enc)
6509
6510 #endif
6511
6512@@ -1714,6 +1719,7 @@ _key_expansion_256a:
6513 pxor %xmm1, %xmm0
6514 movaps %xmm0, (TKEYP)
6515 add $0x10, TKEYP
6516+ pax_force_retaddr_bts
6517 ret
6518
6519 .align 4
6520@@ -1738,6 +1744,7 @@ _key_expansion_192a:
6521 shufps $0b01001110, %xmm2, %xmm1
6522 movaps %xmm1, 0x10(TKEYP)
6523 add $0x20, TKEYP
6524+ pax_force_retaddr_bts
6525 ret
6526
6527 .align 4
6528@@ -1757,6 +1764,7 @@ _key_expansion_192b:
6529
6530 movaps %xmm0, (TKEYP)
6531 add $0x10, TKEYP
6532+ pax_force_retaddr_bts
6533 ret
6534
6535 .align 4
6536@@ -1769,6 +1777,7 @@ _key_expansion_256b:
6537 pxor %xmm1, %xmm2
6538 movaps %xmm2, (TKEYP)
6539 add $0x10, TKEYP
6540+ pax_force_retaddr_bts
6541 ret
6542
6543 /*
6544@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
6545 #ifndef __x86_64__
6546 popl KEYP
6547 #endif
6548+ pax_force_retaddr 0, 1
6549 ret
6550+ENDPROC(aesni_set_key)
6551
6552 /*
6553 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
6554@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
6555 popl KLEN
6556 popl KEYP
6557 #endif
6558+ pax_force_retaddr 0, 1
6559 ret
6560+ENDPROC(aesni_enc)
6561
6562 /*
6563 * _aesni_enc1: internal ABI
6564@@ -1959,6 +1972,7 @@ _aesni_enc1:
6565 AESENC KEY STATE
6566 movaps 0x70(TKEYP), KEY
6567 AESENCLAST KEY STATE
6568+ pax_force_retaddr_bts
6569 ret
6570
6571 /*
6572@@ -2067,6 +2081,7 @@ _aesni_enc4:
6573 AESENCLAST KEY STATE2
6574 AESENCLAST KEY STATE3
6575 AESENCLAST KEY STATE4
6576+ pax_force_retaddr_bts
6577 ret
6578
6579 /*
6580@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
6581 popl KLEN
6582 popl KEYP
6583 #endif
6584+ pax_force_retaddr 0, 1
6585 ret
6586+ENDPROC(aesni_dec)
6587
6588 /*
6589 * _aesni_dec1: internal ABI
6590@@ -2146,6 +2163,7 @@ _aesni_dec1:
6591 AESDEC KEY STATE
6592 movaps 0x70(TKEYP), KEY
6593 AESDECLAST KEY STATE
6594+ pax_force_retaddr_bts
6595 ret
6596
6597 /*
6598@@ -2254,6 +2272,7 @@ _aesni_dec4:
6599 AESDECLAST KEY STATE2
6600 AESDECLAST KEY STATE3
6601 AESDECLAST KEY STATE4
6602+ pax_force_retaddr_bts
6603 ret
6604
6605 /*
6606@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
6607 popl KEYP
6608 popl LEN
6609 #endif
6610+ pax_force_retaddr 0, 1
6611 ret
6612+ENDPROC(aesni_ecb_enc)
6613
6614 /*
6615 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6616@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
6617 popl KEYP
6618 popl LEN
6619 #endif
6620+ pax_force_retaddr 0, 1
6621 ret
6622+ENDPROC(aesni_ecb_dec)
6623
6624 /*
6625 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6626@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
6627 popl LEN
6628 popl IVP
6629 #endif
6630+ pax_force_retaddr 0, 1
6631 ret
6632+ENDPROC(aesni_cbc_enc)
6633
6634 /*
6635 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6636@@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
6637 popl LEN
6638 popl IVP
6639 #endif
6640+ pax_force_retaddr 0, 1
6641 ret
6642+ENDPROC(aesni_cbc_dec)
6643
6644 #ifdef __x86_64__
6645 .align 16
6646@@ -2524,6 +2551,7 @@ _aesni_inc_init:
6647 mov $1, TCTR_LOW
6648 MOVQ_R64_XMM TCTR_LOW INC
6649 MOVQ_R64_XMM CTR TCTR_LOW
6650+ pax_force_retaddr_bts
6651 ret
6652
6653 /*
6654@@ -2552,6 +2580,7 @@ _aesni_inc:
6655 .Linc_low:
6656 movaps CTR, IV
6657 PSHUFB_XMM BSWAP_MASK IV
6658+ pax_force_retaddr_bts
6659 ret
6660
6661 /*
6662@@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
6663 .Lctr_enc_ret:
6664 movups IV, (IVP)
6665 .Lctr_enc_just_ret:
6666+ pax_force_retaddr 0, 1
6667 ret
6668+ENDPROC(aesni_ctr_enc)
6669 #endif
6670diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6671index 391d245..67f35c2 100644
6672--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
6673+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6674@@ -20,6 +20,8 @@
6675 *
6676 */
6677
6678+#include <asm/alternative-asm.h>
6679+
6680 .file "blowfish-x86_64-asm.S"
6681 .text
6682
6683@@ -151,9 +153,11 @@ __blowfish_enc_blk:
6684 jnz __enc_xor;
6685
6686 write_block();
6687+ pax_force_retaddr 0, 1
6688 ret;
6689 __enc_xor:
6690 xor_block();
6691+ pax_force_retaddr 0, 1
6692 ret;
6693
6694 .align 8
6695@@ -188,6 +192,7 @@ blowfish_dec_blk:
6696
6697 movq %r11, %rbp;
6698
6699+ pax_force_retaddr 0, 1
6700 ret;
6701
6702 /**********************************************************************
6703@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
6704
6705 popq %rbx;
6706 popq %rbp;
6707+ pax_force_retaddr 0, 1
6708 ret;
6709
6710 __enc_xor4:
6711@@ -349,6 +355,7 @@ __enc_xor4:
6712
6713 popq %rbx;
6714 popq %rbp;
6715+ pax_force_retaddr 0, 1
6716 ret;
6717
6718 .align 8
6719@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
6720 popq %rbx;
6721 popq %rbp;
6722
6723+ pax_force_retaddr 0, 1
6724 ret;
6725
6726diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6727index 6214a9b..1f4fc9a 100644
6728--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
6729+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6730@@ -1,3 +1,5 @@
6731+#include <asm/alternative-asm.h>
6732+
6733 # enter ECRYPT_encrypt_bytes
6734 .text
6735 .p2align 5
6736@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
6737 add %r11,%rsp
6738 mov %rdi,%rax
6739 mov %rsi,%rdx
6740+ pax_force_retaddr 0, 1
6741 ret
6742 # bytesatleast65:
6743 ._bytesatleast65:
6744@@ -891,6 +894,7 @@ ECRYPT_keysetup:
6745 add %r11,%rsp
6746 mov %rdi,%rax
6747 mov %rsi,%rdx
6748+ pax_force_retaddr
6749 ret
6750 # enter ECRYPT_ivsetup
6751 .text
6752@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
6753 add %r11,%rsp
6754 mov %rdi,%rax
6755 mov %rsi,%rdx
6756+ pax_force_retaddr
6757 ret
6758diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
6759index b2c2f57..8470cab 100644
6760--- a/arch/x86/crypto/sha1_ssse3_asm.S
6761+++ b/arch/x86/crypto/sha1_ssse3_asm.S
6762@@ -28,6 +28,8 @@
6763 * (at your option) any later version.
6764 */
6765
6766+#include <asm/alternative-asm.h>
6767+
6768 #define CTX %rdi // arg1
6769 #define BUF %rsi // arg2
6770 #define CNT %rdx // arg3
6771@@ -104,6 +106,7 @@
6772 pop %r12
6773 pop %rbp
6774 pop %rbx
6775+ pax_force_retaddr 0, 1
6776 ret
6777
6778 .size \name, .-\name
6779diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6780index 5b012a2..36d5364 100644
6781--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6782+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6783@@ -20,6 +20,8 @@
6784 *
6785 */
6786
6787+#include <asm/alternative-asm.h>
6788+
6789 .file "twofish-x86_64-asm-3way.S"
6790 .text
6791
6792@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
6793 popq %r13;
6794 popq %r14;
6795 popq %r15;
6796+ pax_force_retaddr 0, 1
6797 ret;
6798
6799 __enc_xor3:
6800@@ -271,6 +274,7 @@ __enc_xor3:
6801 popq %r13;
6802 popq %r14;
6803 popq %r15;
6804+ pax_force_retaddr 0, 1
6805 ret;
6806
6807 .global twofish_dec_blk_3way
6808@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
6809 popq %r13;
6810 popq %r14;
6811 popq %r15;
6812+ pax_force_retaddr 0, 1
6813 ret;
6814
6815diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
6816index 7bcf3fc..f53832f 100644
6817--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
6818+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
6819@@ -21,6 +21,7 @@
6820 .text
6821
6822 #include <asm/asm-offsets.h>
6823+#include <asm/alternative-asm.h>
6824
6825 #define a_offset 0
6826 #define b_offset 4
6827@@ -268,6 +269,7 @@ twofish_enc_blk:
6828
6829 popq R1
6830 movq $1,%rax
6831+ pax_force_retaddr 0, 1
6832 ret
6833
6834 twofish_dec_blk:
6835@@ -319,4 +321,5 @@ twofish_dec_blk:
6836
6837 popq R1
6838 movq $1,%rax
6839+ pax_force_retaddr 0, 1
6840 ret
6841diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
6842index fd84387..0b4af7d 100644
6843--- a/arch/x86/ia32/ia32_aout.c
6844+++ b/arch/x86/ia32/ia32_aout.c
6845@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
6846 unsigned long dump_start, dump_size;
6847 struct user32 dump;
6848
6849+ memset(&dump, 0, sizeof(dump));
6850+
6851 fs = get_fs();
6852 set_fs(KERNEL_DS);
6853 has_dumped = 1;
6854diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
6855index 6557769..ef6ae89 100644
6856--- a/arch/x86/ia32/ia32_signal.c
6857+++ b/arch/x86/ia32/ia32_signal.c
6858@@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
6859 }
6860 seg = get_fs();
6861 set_fs(KERNEL_DS);
6862- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
6863+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
6864 set_fs(seg);
6865 if (ret >= 0 && uoss_ptr) {
6866 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
6867@@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
6868 */
6869 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6870 size_t frame_size,
6871- void **fpstate)
6872+ void __user **fpstate)
6873 {
6874 unsigned long sp;
6875
6876@@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6877
6878 if (used_math()) {
6879 sp = sp - sig_xstate_ia32_size;
6880- *fpstate = (struct _fpstate_ia32 *) sp;
6881+ *fpstate = (struct _fpstate_ia32 __user *) sp;
6882 if (save_i387_xstate_ia32(*fpstate) < 0)
6883 return (void __user *) -1L;
6884 }
6885@@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6886 sp -= frame_size;
6887 /* Align the stack pointer according to the i386 ABI,
6888 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6889- sp = ((sp + 4) & -16ul) - 4;
6890+ sp = ((sp - 12) & -16ul) - 4;
6891 return (void __user *) sp;
6892 }
6893
6894@@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
6895 * These are actually not used anymore, but left because some
6896 * gdb versions depend on them as a marker.
6897 */
6898- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6899+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6900 } put_user_catch(err);
6901
6902 if (err)
6903@@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6904 0xb8,
6905 __NR_ia32_rt_sigreturn,
6906 0x80cd,
6907- 0,
6908+ 0
6909 };
6910
6911 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6912@@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6913
6914 if (ka->sa.sa_flags & SA_RESTORER)
6915 restorer = ka->sa.sa_restorer;
6916+ else if (current->mm->context.vdso)
6917+ /* Return stub is in 32bit vsyscall page */
6918+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6919 else
6920- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6921- rt_sigreturn);
6922+ restorer = &frame->retcode;
6923 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6924
6925 /*
6926 * Not actually used anymore, but left because some gdb
6927 * versions need it.
6928 */
6929- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6930+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6931 } put_user_catch(err);
6932
6933 if (err)
6934diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
6935index a6253ec..4ad2120 100644
6936--- a/arch/x86/ia32/ia32entry.S
6937+++ b/arch/x86/ia32/ia32entry.S
6938@@ -13,7 +13,9 @@
6939 #include <asm/thread_info.h>
6940 #include <asm/segment.h>
6941 #include <asm/irqflags.h>
6942+#include <asm/pgtable.h>
6943 #include <linux/linkage.h>
6944+#include <asm/alternative-asm.h>
6945
6946 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6947 #include <linux/elf-em.h>
6948@@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit)
6949 ENDPROC(native_irq_enable_sysexit)
6950 #endif
6951
6952+ .macro pax_enter_kernel_user
6953+ pax_set_fptr_mask
6954+#ifdef CONFIG_PAX_MEMORY_UDEREF
6955+ call pax_enter_kernel_user
6956+#endif
6957+ .endm
6958+
6959+ .macro pax_exit_kernel_user
6960+#ifdef CONFIG_PAX_MEMORY_UDEREF
6961+ call pax_exit_kernel_user
6962+#endif
6963+#ifdef CONFIG_PAX_RANDKSTACK
6964+ pushq %rax
6965+ pushq %r11
6966+ call pax_randomize_kstack
6967+ popq %r11
6968+ popq %rax
6969+#endif
6970+ .endm
6971+
6972+.macro pax_erase_kstack
6973+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6974+ call pax_erase_kstack
6975+#endif
6976+.endm
6977+
6978 /*
6979 * 32bit SYSENTER instruction entry.
6980 *
6981@@ -121,12 +149,6 @@ ENTRY(ia32_sysenter_target)
6982 CFI_REGISTER rsp,rbp
6983 SWAPGS_UNSAFE_STACK
6984 movq PER_CPU_VAR(kernel_stack), %rsp
6985- addq $(KERNEL_STACK_OFFSET),%rsp
6986- /*
6987- * No need to follow this irqs on/off section: the syscall
6988- * disabled irqs, here we enable it straight after entry:
6989- */
6990- ENABLE_INTERRUPTS(CLBR_NONE)
6991 movl %ebp,%ebp /* zero extension */
6992 pushq_cfi $__USER32_DS
6993 /*CFI_REL_OFFSET ss,0*/
6994@@ -134,25 +156,39 @@ ENTRY(ia32_sysenter_target)
6995 CFI_REL_OFFSET rsp,0
6996 pushfq_cfi
6997 /*CFI_REL_OFFSET rflags,0*/
6998- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6999- CFI_REGISTER rip,r10
7000+ orl $X86_EFLAGS_IF,(%rsp)
7001+ GET_THREAD_INFO(%r11)
7002+ movl TI_sysenter_return(%r11), %r11d
7003+ CFI_REGISTER rip,r11
7004 pushq_cfi $__USER32_CS
7005 /*CFI_REL_OFFSET cs,0*/
7006 movl %eax, %eax
7007- pushq_cfi %r10
7008+ pushq_cfi %r11
7009 CFI_REL_OFFSET rip,0
7010 pushq_cfi %rax
7011 cld
7012 SAVE_ARGS 0,1,0
7013+ pax_enter_kernel_user
7014+ /*
7015+ * No need to follow this irqs on/off section: the syscall
7016+ * disabled irqs, here we enable it straight after entry:
7017+ */
7018+ ENABLE_INTERRUPTS(CLBR_NONE)
7019 /* no need to do an access_ok check here because rbp has been
7020 32bit zero extended */
7021+
7022+#ifdef CONFIG_PAX_MEMORY_UDEREF
7023+ mov $PAX_USER_SHADOW_BASE,%r11
7024+ add %r11,%rbp
7025+#endif
7026+
7027 1: movl (%rbp),%ebp
7028 .section __ex_table,"a"
7029 .quad 1b,ia32_badarg
7030 .previous
7031- GET_THREAD_INFO(%r10)
7032- orl $TS_COMPAT,TI_status(%r10)
7033- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
7034+ GET_THREAD_INFO(%r11)
7035+ orl $TS_COMPAT,TI_status(%r11)
7036+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
7037 CFI_REMEMBER_STATE
7038 jnz sysenter_tracesys
7039 cmpq $(IA32_NR_syscalls-1),%rax
7040@@ -162,13 +198,15 @@ sysenter_do_call:
7041 sysenter_dispatch:
7042 call *ia32_sys_call_table(,%rax,8)
7043 movq %rax,RAX-ARGOFFSET(%rsp)
7044- GET_THREAD_INFO(%r10)
7045+ GET_THREAD_INFO(%r11)
7046 DISABLE_INTERRUPTS(CLBR_NONE)
7047 TRACE_IRQS_OFF
7048- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
7049+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
7050 jnz sysexit_audit
7051 sysexit_from_sys_call:
7052- andl $~TS_COMPAT,TI_status(%r10)
7053+ pax_exit_kernel_user
7054+ pax_erase_kstack
7055+ andl $~TS_COMPAT,TI_status(%r11)
7056 /* clear IF, that popfq doesn't enable interrupts early */
7057 andl $~0x200,EFLAGS-R11(%rsp)
7058 movl RIP-R11(%rsp),%edx /* User %eip */
7059@@ -194,6 +232,9 @@ sysexit_from_sys_call:
7060 movl %eax,%esi /* 2nd arg: syscall number */
7061 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
7062 call audit_syscall_entry
7063+
7064+ pax_erase_kstack
7065+
7066 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
7067 cmpq $(IA32_NR_syscalls-1),%rax
7068 ja ia32_badsys
7069@@ -205,7 +246,7 @@ sysexit_from_sys_call:
7070 .endm
7071
7072 .macro auditsys_exit exit
7073- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
7074+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
7075 jnz ia32_ret_from_sys_call
7076 TRACE_IRQS_ON
7077 sti
7078@@ -215,12 +256,12 @@ sysexit_from_sys_call:
7079 movzbl %al,%edi /* zero-extend that into %edi */
7080 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
7081 call audit_syscall_exit
7082- GET_THREAD_INFO(%r10)
7083+ GET_THREAD_INFO(%r11)
7084 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
7085 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
7086 cli
7087 TRACE_IRQS_OFF
7088- testl %edi,TI_flags(%r10)
7089+ testl %edi,TI_flags(%r11)
7090 jz \exit
7091 CLEAR_RREGS -ARGOFFSET
7092 jmp int_with_check
7093@@ -238,7 +279,7 @@ sysexit_audit:
7094
7095 sysenter_tracesys:
7096 #ifdef CONFIG_AUDITSYSCALL
7097- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
7098+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
7099 jz sysenter_auditsys
7100 #endif
7101 SAVE_REST
7102@@ -246,6 +287,9 @@ sysenter_tracesys:
7103 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
7104 movq %rsp,%rdi /* &pt_regs -> arg1 */
7105 call syscall_trace_enter
7106+
7107+ pax_erase_kstack
7108+
7109 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
7110 RESTORE_REST
7111 cmpq $(IA32_NR_syscalls-1),%rax
7112@@ -277,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
7113 ENTRY(ia32_cstar_target)
7114 CFI_STARTPROC32 simple
7115 CFI_SIGNAL_FRAME
7116- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
7117+ CFI_DEF_CFA rsp,0
7118 CFI_REGISTER rip,rcx
7119 /*CFI_REGISTER rflags,r11*/
7120 SWAPGS_UNSAFE_STACK
7121 movl %esp,%r8d
7122 CFI_REGISTER rsp,r8
7123 movq PER_CPU_VAR(kernel_stack),%rsp
7124+ SAVE_ARGS 8*6,0,0
7125+ pax_enter_kernel_user
7126 /*
7127 * No need to follow this irqs on/off section: the syscall
7128 * disabled irqs and here we enable it straight after entry:
7129 */
7130 ENABLE_INTERRUPTS(CLBR_NONE)
7131- SAVE_ARGS 8,0,0
7132 movl %eax,%eax /* zero extension */
7133 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
7134 movq %rcx,RIP-ARGOFFSET(%rsp)
7135@@ -305,13 +350,19 @@ ENTRY(ia32_cstar_target)
7136 /* no need to do an access_ok check here because r8 has been
7137 32bit zero extended */
7138 /* hardware stack frame is complete now */
7139+
7140+#ifdef CONFIG_PAX_MEMORY_UDEREF
7141+ mov $PAX_USER_SHADOW_BASE,%r11
7142+ add %r11,%r8
7143+#endif
7144+
7145 1: movl (%r8),%r9d
7146 .section __ex_table,"a"
7147 .quad 1b,ia32_badarg
7148 .previous
7149- GET_THREAD_INFO(%r10)
7150- orl $TS_COMPAT,TI_status(%r10)
7151- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
7152+ GET_THREAD_INFO(%r11)
7153+ orl $TS_COMPAT,TI_status(%r11)
7154+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
7155 CFI_REMEMBER_STATE
7156 jnz cstar_tracesys
7157 cmpq $IA32_NR_syscalls-1,%rax
7158@@ -321,13 +372,15 @@ cstar_do_call:
7159 cstar_dispatch:
7160 call *ia32_sys_call_table(,%rax,8)
7161 movq %rax,RAX-ARGOFFSET(%rsp)
7162- GET_THREAD_INFO(%r10)
7163+ GET_THREAD_INFO(%r11)
7164 DISABLE_INTERRUPTS(CLBR_NONE)
7165 TRACE_IRQS_OFF
7166- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
7167+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
7168 jnz sysretl_audit
7169 sysretl_from_sys_call:
7170- andl $~TS_COMPAT,TI_status(%r10)
7171+ pax_exit_kernel_user
7172+ pax_erase_kstack
7173+ andl $~TS_COMPAT,TI_status(%r11)
7174 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
7175 movl RIP-ARGOFFSET(%rsp),%ecx
7176 CFI_REGISTER rip,rcx
7177@@ -355,7 +408,7 @@ sysretl_audit:
7178
7179 cstar_tracesys:
7180 #ifdef CONFIG_AUDITSYSCALL
7181- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
7182+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
7183 jz cstar_auditsys
7184 #endif
7185 xchgl %r9d,%ebp
7186@@ -364,6 +417,9 @@ cstar_tracesys:
7187 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
7188 movq %rsp,%rdi /* &pt_regs -> arg1 */
7189 call syscall_trace_enter
7190+
7191+ pax_erase_kstack
7192+
7193 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
7194 RESTORE_REST
7195 xchgl %ebp,%r9d
7196@@ -409,20 +465,21 @@ ENTRY(ia32_syscall)
7197 CFI_REL_OFFSET rip,RIP-RIP
7198 PARAVIRT_ADJUST_EXCEPTION_FRAME
7199 SWAPGS
7200- /*
7201- * No need to follow this irqs on/off section: the syscall
7202- * disabled irqs and here we enable it straight after entry:
7203- */
7204- ENABLE_INTERRUPTS(CLBR_NONE)
7205 movl %eax,%eax
7206 pushq_cfi %rax
7207 cld
7208 /* note the registers are not zero extended to the sf.
7209 this could be a problem. */
7210 SAVE_ARGS 0,1,0
7211- GET_THREAD_INFO(%r10)
7212- orl $TS_COMPAT,TI_status(%r10)
7213- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
7214+ pax_enter_kernel_user
7215+ /*
7216+ * No need to follow this irqs on/off section: the syscall
7217+ * disabled irqs and here we enable it straight after entry:
7218+ */
7219+ ENABLE_INTERRUPTS(CLBR_NONE)
7220+ GET_THREAD_INFO(%r11)
7221+ orl $TS_COMPAT,TI_status(%r11)
7222+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
7223 jnz ia32_tracesys
7224 cmpq $(IA32_NR_syscalls-1),%rax
7225 ja ia32_badsys
7226@@ -441,6 +498,9 @@ ia32_tracesys:
7227 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
7228 movq %rsp,%rdi /* &pt_regs -> arg1 */
7229 call syscall_trace_enter
7230+
7231+ pax_erase_kstack
7232+
7233 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
7234 RESTORE_REST
7235 cmpq $(IA32_NR_syscalls-1),%rax
7236@@ -455,6 +515,7 @@ ia32_badsys:
7237
7238 quiet_ni_syscall:
7239 movq $-ENOSYS,%rax
7240+ pax_force_retaddr
7241 ret
7242 CFI_ENDPROC
7243
7244diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
7245index f6f5c53..b358b28 100644
7246--- a/arch/x86/ia32/sys_ia32.c
7247+++ b/arch/x86/ia32/sys_ia32.c
7248@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
7249 */
7250 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
7251 {
7252- typeof(ubuf->st_uid) uid = 0;
7253- typeof(ubuf->st_gid) gid = 0;
7254+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
7255+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
7256 SET_UID(uid, stat->uid);
7257 SET_GID(gid, stat->gid);
7258 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
7259@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
7260 }
7261 set_fs(KERNEL_DS);
7262 ret = sys_rt_sigprocmask(how,
7263- set ? (sigset_t __user *)&s : NULL,
7264- oset ? (sigset_t __user *)&s : NULL,
7265+ set ? (sigset_t __force_user *)&s : NULL,
7266+ oset ? (sigset_t __force_user *)&s : NULL,
7267 sigsetsize);
7268 set_fs(old_fs);
7269 if (ret)
7270@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
7271 return alarm_setitimer(seconds);
7272 }
7273
7274-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
7275+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
7276 int options)
7277 {
7278 return compat_sys_wait4(pid, stat_addr, options, NULL);
7279@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
7280 mm_segment_t old_fs = get_fs();
7281
7282 set_fs(KERNEL_DS);
7283- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
7284+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
7285 set_fs(old_fs);
7286 if (put_compat_timespec(&t, interval))
7287 return -EFAULT;
7288@@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
7289 mm_segment_t old_fs = get_fs();
7290
7291 set_fs(KERNEL_DS);
7292- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
7293+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
7294 set_fs(old_fs);
7295 if (!ret) {
7296 switch (_NSIG_WORDS) {
7297@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
7298 if (copy_siginfo_from_user32(&info, uinfo))
7299 return -EFAULT;
7300 set_fs(KERNEL_DS);
7301- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
7302+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
7303 set_fs(old_fs);
7304 return ret;
7305 }
7306@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
7307 return -EFAULT;
7308
7309 set_fs(KERNEL_DS);
7310- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
7311+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
7312 count);
7313 set_fs(old_fs);
7314
7315diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
7316index 091508b..7692c6f 100644
7317--- a/arch/x86/include/asm/alternative-asm.h
7318+++ b/arch/x86/include/asm/alternative-asm.h
7319@@ -4,10 +4,10 @@
7320
7321 #ifdef CONFIG_SMP
7322 .macro LOCK_PREFIX
7323-1: lock
7324+672: lock
7325 .section .smp_locks,"a"
7326 .balign 4
7327- .long 1b - .
7328+ .long 672b - .
7329 .previous
7330 .endm
7331 #else
7332@@ -15,6 +15,45 @@
7333 .endm
7334 #endif
7335
7336+#ifdef KERNEXEC_PLUGIN
7337+ .macro pax_force_retaddr_bts rip=0
7338+ btsq $63,\rip(%rsp)
7339+ .endm
7340+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
7341+ .macro pax_force_retaddr rip=0, reload=0
7342+ btsq $63,\rip(%rsp)
7343+ .endm
7344+ .macro pax_force_fptr ptr
7345+ btsq $63,\ptr
7346+ .endm
7347+ .macro pax_set_fptr_mask
7348+ .endm
7349+#endif
7350+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
7351+ .macro pax_force_retaddr rip=0, reload=0
7352+ .if \reload
7353+ pax_set_fptr_mask
7354+ .endif
7355+ orq %r10,\rip(%rsp)
7356+ .endm
7357+ .macro pax_force_fptr ptr
7358+ orq %r10,\ptr
7359+ .endm
7360+ .macro pax_set_fptr_mask
7361+ movabs $0x8000000000000000,%r10
7362+ .endm
7363+#endif
7364+#else
7365+ .macro pax_force_retaddr rip=0, reload=0
7366+ .endm
7367+ .macro pax_force_fptr ptr
7368+ .endm
7369+ .macro pax_force_retaddr_bts rip=0
7370+ .endm
7371+ .macro pax_set_fptr_mask
7372+ .endm
7373+#endif
7374+
7375 .macro altinstruction_entry orig alt feature orig_len alt_len
7376 .long \orig - .
7377 .long \alt - .
7378diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
7379index 37ad100..7d47faa 100644
7380--- a/arch/x86/include/asm/alternative.h
7381+++ b/arch/x86/include/asm/alternative.h
7382@@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
7383 ".section .discard,\"aw\",@progbits\n" \
7384 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
7385 ".previous\n" \
7386- ".section .altinstr_replacement, \"ax\"\n" \
7387+ ".section .altinstr_replacement, \"a\"\n" \
7388 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
7389 ".previous"
7390
7391diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
7392index 1a6c09a..fec2432 100644
7393--- a/arch/x86/include/asm/apic.h
7394+++ b/arch/x86/include/asm/apic.h
7395@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
7396
7397 #ifdef CONFIG_X86_LOCAL_APIC
7398
7399-extern unsigned int apic_verbosity;
7400+extern int apic_verbosity;
7401 extern int local_apic_timer_c2_ok;
7402
7403 extern int disable_apic;
7404diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
7405index 20370c6..a2eb9b0 100644
7406--- a/arch/x86/include/asm/apm.h
7407+++ b/arch/x86/include/asm/apm.h
7408@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
7409 __asm__ __volatile__(APM_DO_ZERO_SEGS
7410 "pushl %%edi\n\t"
7411 "pushl %%ebp\n\t"
7412- "lcall *%%cs:apm_bios_entry\n\t"
7413+ "lcall *%%ss:apm_bios_entry\n\t"
7414 "setc %%al\n\t"
7415 "popl %%ebp\n\t"
7416 "popl %%edi\n\t"
7417@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
7418 __asm__ __volatile__(APM_DO_ZERO_SEGS
7419 "pushl %%edi\n\t"
7420 "pushl %%ebp\n\t"
7421- "lcall *%%cs:apm_bios_entry\n\t"
7422+ "lcall *%%ss:apm_bios_entry\n\t"
7423 "setc %%bl\n\t"
7424 "popl %%ebp\n\t"
7425 "popl %%edi\n\t"
7426diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
7427index 58cb6d4..ca9010d 100644
7428--- a/arch/x86/include/asm/atomic.h
7429+++ b/arch/x86/include/asm/atomic.h
7430@@ -22,7 +22,18 @@
7431 */
7432 static inline int atomic_read(const atomic_t *v)
7433 {
7434- return (*(volatile int *)&(v)->counter);
7435+ return (*(volatile const int *)&(v)->counter);
7436+}
7437+
7438+/**
7439+ * atomic_read_unchecked - read atomic variable
7440+ * @v: pointer of type atomic_unchecked_t
7441+ *
7442+ * Atomically reads the value of @v.
7443+ */
7444+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7445+{
7446+ return (*(volatile const int *)&(v)->counter);
7447 }
7448
7449 /**
7450@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
7451 }
7452
7453 /**
7454+ * atomic_set_unchecked - set atomic variable
7455+ * @v: pointer of type atomic_unchecked_t
7456+ * @i: required value
7457+ *
7458+ * Atomically sets the value of @v to @i.
7459+ */
7460+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7461+{
7462+ v->counter = i;
7463+}
7464+
7465+/**
7466 * atomic_add - add integer to atomic variable
7467 * @i: integer value to add
7468 * @v: pointer of type atomic_t
7469@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
7470 */
7471 static inline void atomic_add(int i, atomic_t *v)
7472 {
7473- asm volatile(LOCK_PREFIX "addl %1,%0"
7474+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7475+
7476+#ifdef CONFIG_PAX_REFCOUNT
7477+ "jno 0f\n"
7478+ LOCK_PREFIX "subl %1,%0\n"
7479+ "int $4\n0:\n"
7480+ _ASM_EXTABLE(0b, 0b)
7481+#endif
7482+
7483+ : "+m" (v->counter)
7484+ : "ir" (i));
7485+}
7486+
7487+/**
7488+ * atomic_add_unchecked - add integer to atomic variable
7489+ * @i: integer value to add
7490+ * @v: pointer of type atomic_unchecked_t
7491+ *
7492+ * Atomically adds @i to @v.
7493+ */
7494+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7495+{
7496+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7497 : "+m" (v->counter)
7498 : "ir" (i));
7499 }
7500@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
7501 */
7502 static inline void atomic_sub(int i, atomic_t *v)
7503 {
7504- asm volatile(LOCK_PREFIX "subl %1,%0"
7505+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7506+
7507+#ifdef CONFIG_PAX_REFCOUNT
7508+ "jno 0f\n"
7509+ LOCK_PREFIX "addl %1,%0\n"
7510+ "int $4\n0:\n"
7511+ _ASM_EXTABLE(0b, 0b)
7512+#endif
7513+
7514+ : "+m" (v->counter)
7515+ : "ir" (i));
7516+}
7517+
7518+/**
7519+ * atomic_sub_unchecked - subtract integer from atomic variable
7520+ * @i: integer value to subtract
7521+ * @v: pointer of type atomic_unchecked_t
7522+ *
7523+ * Atomically subtracts @i from @v.
7524+ */
7525+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7526+{
7527+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7528 : "+m" (v->counter)
7529 : "ir" (i));
7530 }
7531@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7532 {
7533 unsigned char c;
7534
7535- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7536+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
7537+
7538+#ifdef CONFIG_PAX_REFCOUNT
7539+ "jno 0f\n"
7540+ LOCK_PREFIX "addl %2,%0\n"
7541+ "int $4\n0:\n"
7542+ _ASM_EXTABLE(0b, 0b)
7543+#endif
7544+
7545+ "sete %1\n"
7546 : "+m" (v->counter), "=qm" (c)
7547 : "ir" (i) : "memory");
7548 return c;
7549@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7550 */
7551 static inline void atomic_inc(atomic_t *v)
7552 {
7553- asm volatile(LOCK_PREFIX "incl %0"
7554+ asm volatile(LOCK_PREFIX "incl %0\n"
7555+
7556+#ifdef CONFIG_PAX_REFCOUNT
7557+ "jno 0f\n"
7558+ LOCK_PREFIX "decl %0\n"
7559+ "int $4\n0:\n"
7560+ _ASM_EXTABLE(0b, 0b)
7561+#endif
7562+
7563+ : "+m" (v->counter));
7564+}
7565+
7566+/**
7567+ * atomic_inc_unchecked - increment atomic variable
7568+ * @v: pointer of type atomic_unchecked_t
7569+ *
7570+ * Atomically increments @v by 1.
7571+ */
7572+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7573+{
7574+ asm volatile(LOCK_PREFIX "incl %0\n"
7575 : "+m" (v->counter));
7576 }
7577
7578@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
7579 */
7580 static inline void atomic_dec(atomic_t *v)
7581 {
7582- asm volatile(LOCK_PREFIX "decl %0"
7583+ asm volatile(LOCK_PREFIX "decl %0\n"
7584+
7585+#ifdef CONFIG_PAX_REFCOUNT
7586+ "jno 0f\n"
7587+ LOCK_PREFIX "incl %0\n"
7588+ "int $4\n0:\n"
7589+ _ASM_EXTABLE(0b, 0b)
7590+#endif
7591+
7592+ : "+m" (v->counter));
7593+}
7594+
7595+/**
7596+ * atomic_dec_unchecked - decrement atomic variable
7597+ * @v: pointer of type atomic_unchecked_t
7598+ *
7599+ * Atomically decrements @v by 1.
7600+ */
7601+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7602+{
7603+ asm volatile(LOCK_PREFIX "decl %0\n"
7604 : "+m" (v->counter));
7605 }
7606
7607@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
7608 {
7609 unsigned char c;
7610
7611- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7612+ asm volatile(LOCK_PREFIX "decl %0\n"
7613+
7614+#ifdef CONFIG_PAX_REFCOUNT
7615+ "jno 0f\n"
7616+ LOCK_PREFIX "incl %0\n"
7617+ "int $4\n0:\n"
7618+ _ASM_EXTABLE(0b, 0b)
7619+#endif
7620+
7621+ "sete %1\n"
7622 : "+m" (v->counter), "=qm" (c)
7623 : : "memory");
7624 return c != 0;
7625@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
7626 {
7627 unsigned char c;
7628
7629- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7630+ asm volatile(LOCK_PREFIX "incl %0\n"
7631+
7632+#ifdef CONFIG_PAX_REFCOUNT
7633+ "jno 0f\n"
7634+ LOCK_PREFIX "decl %0\n"
7635+ "int $4\n0:\n"
7636+ _ASM_EXTABLE(0b, 0b)
7637+#endif
7638+
7639+ "sete %1\n"
7640+ : "+m" (v->counter), "=qm" (c)
7641+ : : "memory");
7642+ return c != 0;
7643+}
7644+
7645+/**
7646+ * atomic_inc_and_test_unchecked - increment and test
7647+ * @v: pointer of type atomic_unchecked_t
7648+ *
7649+ * Atomically increments @v by 1
7650+ * and returns true if the result is zero, or false for all
7651+ * other cases.
7652+ */
7653+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7654+{
7655+ unsigned char c;
7656+
7657+ asm volatile(LOCK_PREFIX "incl %0\n"
7658+ "sete %1\n"
7659 : "+m" (v->counter), "=qm" (c)
7660 : : "memory");
7661 return c != 0;
7662@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
7663 {
7664 unsigned char c;
7665
7666- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7667+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7668+
7669+#ifdef CONFIG_PAX_REFCOUNT
7670+ "jno 0f\n"
7671+ LOCK_PREFIX "subl %2,%0\n"
7672+ "int $4\n0:\n"
7673+ _ASM_EXTABLE(0b, 0b)
7674+#endif
7675+
7676+ "sets %1\n"
7677 : "+m" (v->counter), "=qm" (c)
7678 : "ir" (i) : "memory");
7679 return c;
7680@@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
7681 goto no_xadd;
7682 #endif
7683 /* Modern 486+ processor */
7684- return i + xadd(&v->counter, i);
7685+ return i + xadd_check_overflow(&v->counter, i);
7686
7687 #ifdef CONFIG_M386
7688 no_xadd: /* Legacy 386 processor */
7689@@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
7690 }
7691
7692 /**
7693+ * atomic_add_return_unchecked - add integer and return
7694+ * @i: integer value to add
7695+ * @v: pointer of type atomic_unchecked_t
7696+ *
7697+ * Atomically adds @i to @v and returns @i + @v
7698+ */
7699+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7700+{
7701+#ifdef CONFIG_M386
7702+ int __i;
7703+ unsigned long flags;
7704+ if (unlikely(boot_cpu_data.x86 <= 3))
7705+ goto no_xadd;
7706+#endif
7707+ /* Modern 486+ processor */
7708+ return i + xadd(&v->counter, i);
7709+
7710+#ifdef CONFIG_M386
7711+no_xadd: /* Legacy 386 processor */
7712+ raw_local_irq_save(flags);
7713+ __i = atomic_read_unchecked(v);
7714+ atomic_set_unchecked(v, i + __i);
7715+ raw_local_irq_restore(flags);
7716+ return i + __i;
7717+#endif
7718+}
7719+
7720+/**
7721 * atomic_sub_return - subtract integer and return
7722 * @v: pointer of type atomic_t
7723 * @i: integer value to subtract
7724@@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
7725 }
7726
7727 #define atomic_inc_return(v) (atomic_add_return(1, v))
7728+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7729+{
7730+ return atomic_add_return_unchecked(1, v);
7731+}
7732 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7733
7734 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7735@@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7736 return cmpxchg(&v->counter, old, new);
7737 }
7738
7739+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7740+{
7741+ return cmpxchg(&v->counter, old, new);
7742+}
7743+
7744 static inline int atomic_xchg(atomic_t *v, int new)
7745 {
7746 return xchg(&v->counter, new);
7747 }
7748
7749+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7750+{
7751+ return xchg(&v->counter, new);
7752+}
7753+
7754 /**
7755 * __atomic_add_unless - add unless the number is already a given value
7756 * @v: pointer of type atomic_t
7757@@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
7758 */
7759 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7760 {
7761- int c, old;
7762+ int c, old, new;
7763 c = atomic_read(v);
7764 for (;;) {
7765- if (unlikely(c == (u)))
7766+ if (unlikely(c == u))
7767 break;
7768- old = atomic_cmpxchg((v), c, c + (a));
7769+
7770+ asm volatile("addl %2,%0\n"
7771+
7772+#ifdef CONFIG_PAX_REFCOUNT
7773+ "jno 0f\n"
7774+ "subl %2,%0\n"
7775+ "int $4\n0:\n"
7776+ _ASM_EXTABLE(0b, 0b)
7777+#endif
7778+
7779+ : "=r" (new)
7780+ : "0" (c), "ir" (a));
7781+
7782+ old = atomic_cmpxchg(v, c, new);
7783 if (likely(old == c))
7784 break;
7785 c = old;
7786@@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7787 return c;
7788 }
7789
7790+/**
7791+ * atomic_inc_not_zero_hint - increment if not null
7792+ * @v: pointer of type atomic_t
7793+ * @hint: probable value of the atomic before the increment
7794+ *
7795+ * This version of atomic_inc_not_zero() gives a hint of probable
7796+ * value of the atomic. This helps processor to not read the memory
7797+ * before doing the atomic read/modify/write cycle, lowering
7798+ * number of bus transactions on some arches.
7799+ *
7800+ * Returns: 0 if increment was not done, 1 otherwise.
7801+ */
7802+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
7803+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
7804+{
7805+ int val, c = hint, new;
7806+
7807+ /* sanity test, should be removed by compiler if hint is a constant */
7808+ if (!hint)
7809+ return __atomic_add_unless(v, 1, 0);
7810+
7811+ do {
7812+ asm volatile("incl %0\n"
7813+
7814+#ifdef CONFIG_PAX_REFCOUNT
7815+ "jno 0f\n"
7816+ "decl %0\n"
7817+ "int $4\n0:\n"
7818+ _ASM_EXTABLE(0b, 0b)
7819+#endif
7820+
7821+ : "=r" (new)
7822+ : "0" (c));
7823+
7824+ val = atomic_cmpxchg(v, c, new);
7825+ if (val == c)
7826+ return 1;
7827+ c = val;
7828+ } while (c);
7829+
7830+ return 0;
7831+}
7832
7833 /*
7834 * atomic_dec_if_positive - decrement by 1 if old value positive
7835diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
7836index 24098aa..1e37723 100644
7837--- a/arch/x86/include/asm/atomic64_32.h
7838+++ b/arch/x86/include/asm/atomic64_32.h
7839@@ -12,6 +12,14 @@ typedef struct {
7840 u64 __aligned(8) counter;
7841 } atomic64_t;
7842
7843+#ifdef CONFIG_PAX_REFCOUNT
7844+typedef struct {
7845+ u64 __aligned(8) counter;
7846+} atomic64_unchecked_t;
7847+#else
7848+typedef atomic64_t atomic64_unchecked_t;
7849+#endif
7850+
7851 #define ATOMIC64_INIT(val) { (val) }
7852
7853 #ifdef CONFIG_X86_CMPXCHG64
7854@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
7855 }
7856
7857 /**
7858+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
7859+ * @p: pointer to type atomic64_unchecked_t
7860+ * @o: expected value
7861+ * @n: new value
7862+ *
7863+ * Atomically sets @v to @n if it was equal to @o and returns
7864+ * the old value.
7865+ */
7866+
7867+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
7868+{
7869+ return cmpxchg64(&v->counter, o, n);
7870+}
7871+
7872+/**
7873 * atomic64_xchg - xchg atomic64 variable
7874 * @v: pointer to type atomic64_t
7875 * @n: value to assign
7876@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
7877 }
7878
7879 /**
7880+ * atomic64_set_unchecked - set atomic64 variable
7881+ * @v: pointer to type atomic64_unchecked_t
7882+ * @n: value to assign
7883+ *
7884+ * Atomically sets the value of @v to @n.
7885+ */
7886+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
7887+{
7888+ unsigned high = (unsigned)(i >> 32);
7889+ unsigned low = (unsigned)i;
7890+ asm volatile(ATOMIC64_ALTERNATIVE(set)
7891+ : "+b" (low), "+c" (high)
7892+ : "S" (v)
7893+ : "eax", "edx", "memory"
7894+ );
7895+}
7896+
7897+/**
7898 * atomic64_read - read atomic64 variable
7899 * @v: pointer to type atomic64_t
7900 *
7901@@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v)
7902 }
7903
7904 /**
7905+ * atomic64_read_unchecked - read atomic64 variable
7906+ * @v: pointer to type atomic64_unchecked_t
7907+ *
7908+ * Atomically reads the value of @v and returns it.
7909+ */
7910+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
7911+{
7912+ long long r;
7913+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
7914+ : "=A" (r), "+c" (v)
7915+ : : "memory"
7916+ );
7917+ return r;
7918+ }
7919+
7920+/**
7921 * atomic64_add_return - add and return
7922 * @i: integer value to add
7923 * @v: pointer to type atomic64_t
7924@@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
7925 return i;
7926 }
7927
7928+/**
7929+ * atomic64_add_return_unchecked - add and return
7930+ * @i: integer value to add
7931+ * @v: pointer to type atomic64_unchecked_t
7932+ *
7933+ * Atomically adds @i to @v and returns @i + *@v
7934+ */
7935+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
7936+{
7937+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
7938+ : "+A" (i), "+c" (v)
7939+ : : "memory"
7940+ );
7941+ return i;
7942+}
7943+
7944 /*
7945 * Other variants with different arithmetic operators:
7946 */
7947@@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
7948 return a;
7949 }
7950
7951+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7952+{
7953+ long long a;
7954+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
7955+ : "=A" (a)
7956+ : "S" (v)
7957+ : "memory", "ecx"
7958+ );
7959+ return a;
7960+}
7961+
7962 static inline long long atomic64_dec_return(atomic64_t *v)
7963 {
7964 long long a;
7965@@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
7966 }
7967
7968 /**
7969+ * atomic64_add_unchecked - add integer to atomic64 variable
7970+ * @i: integer value to add
7971+ * @v: pointer to type atomic64_unchecked_t
7972+ *
7973+ * Atomically adds @i to @v.
7974+ */
7975+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
7976+{
7977+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
7978+ : "+A" (i), "+c" (v)
7979+ : : "memory"
7980+ );
7981+ return i;
7982+}
7983+
7984+/**
7985 * atomic64_sub - subtract the atomic64 variable
7986 * @i: integer value to subtract
7987 * @v: pointer to type atomic64_t
7988diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
7989index 0e1cbfc..5623683 100644
7990--- a/arch/x86/include/asm/atomic64_64.h
7991+++ b/arch/x86/include/asm/atomic64_64.h
7992@@ -18,7 +18,19 @@
7993 */
7994 static inline long atomic64_read(const atomic64_t *v)
7995 {
7996- return (*(volatile long *)&(v)->counter);
7997+ return (*(volatile const long *)&(v)->counter);
7998+}
7999+
8000+/**
8001+ * atomic64_read_unchecked - read atomic64 variable
8002+ * @v: pointer of type atomic64_unchecked_t
8003+ *
8004+ * Atomically reads the value of @v.
8005+ * Doesn't imply a read memory barrier.
8006+ */
8007+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
8008+{
8009+ return (*(volatile const long *)&(v)->counter);
8010 }
8011
8012 /**
8013@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
8014 }
8015
8016 /**
8017+ * atomic64_set_unchecked - set atomic64 variable
8018+ * @v: pointer to type atomic64_unchecked_t
8019+ * @i: required value
8020+ *
8021+ * Atomically sets the value of @v to @i.
8022+ */
8023+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8024+{
8025+ v->counter = i;
8026+}
8027+
8028+/**
8029 * atomic64_add - add integer to atomic64 variable
8030 * @i: integer value to add
8031 * @v: pointer to type atomic64_t
8032@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
8033 */
8034 static inline void atomic64_add(long i, atomic64_t *v)
8035 {
8036+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
8037+
8038+#ifdef CONFIG_PAX_REFCOUNT
8039+ "jno 0f\n"
8040+ LOCK_PREFIX "subq %1,%0\n"
8041+ "int $4\n0:\n"
8042+ _ASM_EXTABLE(0b, 0b)
8043+#endif
8044+
8045+ : "=m" (v->counter)
8046+ : "er" (i), "m" (v->counter));
8047+}
8048+
8049+/**
8050+ * atomic64_add_unchecked - add integer to atomic64 variable
8051+ * @i: integer value to add
8052+ * @v: pointer to type atomic64_unchecked_t
8053+ *
8054+ * Atomically adds @i to @v.
8055+ */
8056+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
8057+{
8058 asm volatile(LOCK_PREFIX "addq %1,%0"
8059 : "=m" (v->counter)
8060 : "er" (i), "m" (v->counter));
8061@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
8062 */
8063 static inline void atomic64_sub(long i, atomic64_t *v)
8064 {
8065- asm volatile(LOCK_PREFIX "subq %1,%0"
8066+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
8067+
8068+#ifdef CONFIG_PAX_REFCOUNT
8069+ "jno 0f\n"
8070+ LOCK_PREFIX "addq %1,%0\n"
8071+ "int $4\n0:\n"
8072+ _ASM_EXTABLE(0b, 0b)
8073+#endif
8074+
8075+ : "=m" (v->counter)
8076+ : "er" (i), "m" (v->counter));
8077+}
8078+
8079+/**
8080+ * atomic64_sub_unchecked - subtract the atomic64 variable
8081+ * @i: integer value to subtract
8082+ * @v: pointer to type atomic64_unchecked_t
8083+ *
8084+ * Atomically subtracts @i from @v.
8085+ */
8086+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
8087+{
8088+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
8089 : "=m" (v->counter)
8090 : "er" (i), "m" (v->counter));
8091 }
8092@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
8093 {
8094 unsigned char c;
8095
8096- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
8097+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
8098+
8099+#ifdef CONFIG_PAX_REFCOUNT
8100+ "jno 0f\n"
8101+ LOCK_PREFIX "addq %2,%0\n"
8102+ "int $4\n0:\n"
8103+ _ASM_EXTABLE(0b, 0b)
8104+#endif
8105+
8106+ "sete %1\n"
8107 : "=m" (v->counter), "=qm" (c)
8108 : "er" (i), "m" (v->counter) : "memory");
8109 return c;
8110@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
8111 */
8112 static inline void atomic64_inc(atomic64_t *v)
8113 {
8114+ asm volatile(LOCK_PREFIX "incq %0\n"
8115+
8116+#ifdef CONFIG_PAX_REFCOUNT
8117+ "jno 0f\n"
8118+ LOCK_PREFIX "decq %0\n"
8119+ "int $4\n0:\n"
8120+ _ASM_EXTABLE(0b, 0b)
8121+#endif
8122+
8123+ : "=m" (v->counter)
8124+ : "m" (v->counter));
8125+}
8126+
8127+/**
8128+ * atomic64_inc_unchecked - increment atomic64 variable
8129+ * @v: pointer to type atomic64_unchecked_t
8130+ *
8131+ * Atomically increments @v by 1.
8132+ */
8133+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8134+{
8135 asm volatile(LOCK_PREFIX "incq %0"
8136 : "=m" (v->counter)
8137 : "m" (v->counter));
8138@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
8139 */
8140 static inline void atomic64_dec(atomic64_t *v)
8141 {
8142- asm volatile(LOCK_PREFIX "decq %0"
8143+ asm volatile(LOCK_PREFIX "decq %0\n"
8144+
8145+#ifdef CONFIG_PAX_REFCOUNT
8146+ "jno 0f\n"
8147+ LOCK_PREFIX "incq %0\n"
8148+ "int $4\n0:\n"
8149+ _ASM_EXTABLE(0b, 0b)
8150+#endif
8151+
8152+ : "=m" (v->counter)
8153+ : "m" (v->counter));
8154+}
8155+
8156+/**
8157+ * atomic64_dec_unchecked - decrement atomic64 variable
8158+ * @v: pointer to type atomic64_t
8159+ *
8160+ * Atomically decrements @v by 1.
8161+ */
8162+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8163+{
8164+ asm volatile(LOCK_PREFIX "decq %0\n"
8165 : "=m" (v->counter)
8166 : "m" (v->counter));
8167 }
8168@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
8169 {
8170 unsigned char c;
8171
8172- asm volatile(LOCK_PREFIX "decq %0; sete %1"
8173+ asm volatile(LOCK_PREFIX "decq %0\n"
8174+
8175+#ifdef CONFIG_PAX_REFCOUNT
8176+ "jno 0f\n"
8177+ LOCK_PREFIX "incq %0\n"
8178+ "int $4\n0:\n"
8179+ _ASM_EXTABLE(0b, 0b)
8180+#endif
8181+
8182+ "sete %1\n"
8183 : "=m" (v->counter), "=qm" (c)
8184 : "m" (v->counter) : "memory");
8185 return c != 0;
8186@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
8187 {
8188 unsigned char c;
8189
8190- asm volatile(LOCK_PREFIX "incq %0; sete %1"
8191+ asm volatile(LOCK_PREFIX "incq %0\n"
8192+
8193+#ifdef CONFIG_PAX_REFCOUNT
8194+ "jno 0f\n"
8195+ LOCK_PREFIX "decq %0\n"
8196+ "int $4\n0:\n"
8197+ _ASM_EXTABLE(0b, 0b)
8198+#endif
8199+
8200+ "sete %1\n"
8201 : "=m" (v->counter), "=qm" (c)
8202 : "m" (v->counter) : "memory");
8203 return c != 0;
8204@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
8205 {
8206 unsigned char c;
8207
8208- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
8209+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
8210+
8211+#ifdef CONFIG_PAX_REFCOUNT
8212+ "jno 0f\n"
8213+ LOCK_PREFIX "subq %2,%0\n"
8214+ "int $4\n0:\n"
8215+ _ASM_EXTABLE(0b, 0b)
8216+#endif
8217+
8218+ "sets %1\n"
8219 : "=m" (v->counter), "=qm" (c)
8220 : "er" (i), "m" (v->counter) : "memory");
8221 return c;
8222@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
8223 */
8224 static inline long atomic64_add_return(long i, atomic64_t *v)
8225 {
8226+ return i + xadd_check_overflow(&v->counter, i);
8227+}
8228+
8229+/**
8230+ * atomic64_add_return_unchecked - add and return
8231+ * @i: integer value to add
8232+ * @v: pointer to type atomic64_unchecked_t
8233+ *
8234+ * Atomically adds @i to @v and returns @i + @v
8235+ */
8236+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
8237+{
8238 return i + xadd(&v->counter, i);
8239 }
8240
8241@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
8242 }
8243
8244 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
8245+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8246+{
8247+ return atomic64_add_return_unchecked(1, v);
8248+}
8249 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
8250
8251 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8252@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8253 return cmpxchg(&v->counter, old, new);
8254 }
8255
8256+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8257+{
8258+ return cmpxchg(&v->counter, old, new);
8259+}
8260+
8261 static inline long atomic64_xchg(atomic64_t *v, long new)
8262 {
8263 return xchg(&v->counter, new);
8264@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
8265 */
8266 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8267 {
8268- long c, old;
8269+ long c, old, new;
8270 c = atomic64_read(v);
8271 for (;;) {
8272- if (unlikely(c == (u)))
8273+ if (unlikely(c == u))
8274 break;
8275- old = atomic64_cmpxchg((v), c, c + (a));
8276+
8277+ asm volatile("add %2,%0\n"
8278+
8279+#ifdef CONFIG_PAX_REFCOUNT
8280+ "jno 0f\n"
8281+ "sub %2,%0\n"
8282+ "int $4\n0:\n"
8283+ _ASM_EXTABLE(0b, 0b)
8284+#endif
8285+
8286+ : "=r" (new)
8287+ : "0" (c), "ir" (a));
8288+
8289+ old = atomic64_cmpxchg(v, c, new);
8290 if (likely(old == c))
8291 break;
8292 c = old;
8293 }
8294- return c != (u);
8295+ return c != u;
8296 }
8297
8298 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8299diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
8300index 1775d6e..b65017f 100644
8301--- a/arch/x86/include/asm/bitops.h
8302+++ b/arch/x86/include/asm/bitops.h
8303@@ -38,7 +38,7 @@
8304 * a mask operation on a byte.
8305 */
8306 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8307-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8308+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8309 #define CONST_MASK(nr) (1 << ((nr) & 7))
8310
8311 /**
8312diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
8313index 5e1a2ee..c9f9533 100644
8314--- a/arch/x86/include/asm/boot.h
8315+++ b/arch/x86/include/asm/boot.h
8316@@ -11,10 +11,15 @@
8317 #include <asm/pgtable_types.h>
8318
8319 /* Physical address where kernel should be loaded. */
8320-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8321+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8322 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8323 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8324
8325+#ifndef __ASSEMBLY__
8326+extern unsigned char __LOAD_PHYSICAL_ADDR[];
8327+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8328+#endif
8329+
8330 /* Minimum kernel alignment, as a power of two */
8331 #ifdef CONFIG_X86_64
8332 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8333diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
8334index 48f99f1..d78ebf9 100644
8335--- a/arch/x86/include/asm/cache.h
8336+++ b/arch/x86/include/asm/cache.h
8337@@ -5,12 +5,13 @@
8338
8339 /* L1 cache line size */
8340 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8341-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8342+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8343
8344 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8345+#define __read_only __attribute__((__section__(".data..read_only")))
8346
8347 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
8348-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
8349+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
8350
8351 #ifdef CONFIG_X86_VSMP
8352 #ifdef CONFIG_SMP
8353diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
8354index 4e12668..501d239 100644
8355--- a/arch/x86/include/asm/cacheflush.h
8356+++ b/arch/x86/include/asm/cacheflush.h
8357@@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
8358 unsigned long pg_flags = pg->flags & _PGMT_MASK;
8359
8360 if (pg_flags == _PGMT_DEFAULT)
8361- return -1;
8362+ return ~0UL;
8363 else if (pg_flags == _PGMT_WC)
8364 return _PAGE_CACHE_WC;
8365 else if (pg_flags == _PGMT_UC_MINUS)
8366diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
8367index 46fc474..b02b0f9 100644
8368--- a/arch/x86/include/asm/checksum_32.h
8369+++ b/arch/x86/include/asm/checksum_32.h
8370@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
8371 int len, __wsum sum,
8372 int *src_err_ptr, int *dst_err_ptr);
8373
8374+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8375+ int len, __wsum sum,
8376+ int *src_err_ptr, int *dst_err_ptr);
8377+
8378+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8379+ int len, __wsum sum,
8380+ int *src_err_ptr, int *dst_err_ptr);
8381+
8382 /*
8383 * Note: when you get a NULL pointer exception here this means someone
8384 * passed in an incorrect kernel address to one of these functions.
8385@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
8386 int *err_ptr)
8387 {
8388 might_sleep();
8389- return csum_partial_copy_generic((__force void *)src, dst,
8390+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
8391 len, sum, err_ptr, NULL);
8392 }
8393
8394@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
8395 {
8396 might_sleep();
8397 if (access_ok(VERIFY_WRITE, dst, len))
8398- return csum_partial_copy_generic(src, (__force void *)dst,
8399+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8400 len, sum, NULL, err_ptr);
8401
8402 if (len)
8403diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
8404index 5d3acdf..6447a02 100644
8405--- a/arch/x86/include/asm/cmpxchg.h
8406+++ b/arch/x86/include/asm/cmpxchg.h
8407@@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
8408 __compiletime_error("Bad argument size for cmpxchg");
8409 extern void __xadd_wrong_size(void)
8410 __compiletime_error("Bad argument size for xadd");
8411+extern void __xadd_check_overflow_wrong_size(void)
8412+ __compiletime_error("Bad argument size for xadd_check_overflow");
8413
8414 /*
8415 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
8416@@ -195,6 +197,34 @@ extern void __xadd_wrong_size(void)
8417 __ret; \
8418 })
8419
8420+#define __xadd_check_overflow(ptr, inc, lock) \
8421+ ({ \
8422+ __typeof__ (*(ptr)) __ret = (inc); \
8423+ switch (sizeof(*(ptr))) { \
8424+ case __X86_CASE_L: \
8425+ asm volatile (lock "xaddl %0, %1\n" \
8426+ "jno 0f\n" \
8427+ "mov %0,%1\n" \
8428+ "int $4\n0:\n" \
8429+ _ASM_EXTABLE(0b, 0b) \
8430+ : "+r" (__ret), "+m" (*(ptr)) \
8431+ : : "memory", "cc"); \
8432+ break; \
8433+ case __X86_CASE_Q: \
8434+ asm volatile (lock "xaddq %q0, %1\n" \
8435+ "jno 0f\n" \
8436+ "mov %0,%1\n" \
8437+ "int $4\n0:\n" \
8438+ _ASM_EXTABLE(0b, 0b) \
8439+ : "+r" (__ret), "+m" (*(ptr)) \
8440+ : : "memory", "cc"); \
8441+ break; \
8442+ default: \
8443+ __xadd_check_overflow_wrong_size(); \
8444+ } \
8445+ __ret; \
8446+ })
8447+
8448 /*
8449 * xadd() adds "inc" to "*ptr" and atomically returns the previous
8450 * value of "*ptr".
8451@@ -207,4 +237,6 @@ extern void __xadd_wrong_size(void)
8452 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
8453 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
8454
8455+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
8456+
8457 #endif /* ASM_X86_CMPXCHG_H */
8458diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
8459index f3444f7..051a196 100644
8460--- a/arch/x86/include/asm/cpufeature.h
8461+++ b/arch/x86/include/asm/cpufeature.h
8462@@ -363,7 +363,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
8463 ".section .discard,\"aw\",@progbits\n"
8464 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
8465 ".previous\n"
8466- ".section .altinstr_replacement,\"ax\"\n"
8467+ ".section .altinstr_replacement,\"a\"\n"
8468 "3: movb $1,%0\n"
8469 "4:\n"
8470 ".previous\n"
8471diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
8472index 41935fa..3b40db8 100644
8473--- a/arch/x86/include/asm/desc.h
8474+++ b/arch/x86/include/asm/desc.h
8475@@ -4,6 +4,7 @@
8476 #include <asm/desc_defs.h>
8477 #include <asm/ldt.h>
8478 #include <asm/mmu.h>
8479+#include <asm/pgtable.h>
8480
8481 #include <linux/smp.h>
8482
8483@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8484
8485 desc->type = (info->read_exec_only ^ 1) << 1;
8486 desc->type |= info->contents << 2;
8487+ desc->type |= info->seg_not_present ^ 1;
8488
8489 desc->s = 1;
8490 desc->dpl = 0x3;
8491@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8492 }
8493
8494 extern struct desc_ptr idt_descr;
8495-extern gate_desc idt_table[];
8496-
8497-struct gdt_page {
8498- struct desc_struct gdt[GDT_ENTRIES];
8499-} __attribute__((aligned(PAGE_SIZE)));
8500-
8501-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8502+extern gate_desc idt_table[256];
8503
8504+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8505 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8506 {
8507- return per_cpu(gdt_page, cpu).gdt;
8508+ return cpu_gdt_table[cpu];
8509 }
8510
8511 #ifdef CONFIG_X86_64
8512@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
8513 unsigned long base, unsigned dpl, unsigned flags,
8514 unsigned short seg)
8515 {
8516- gate->a = (seg << 16) | (base & 0xffff);
8517- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8518+ gate->gate.offset_low = base;
8519+ gate->gate.seg = seg;
8520+ gate->gate.reserved = 0;
8521+ gate->gate.type = type;
8522+ gate->gate.s = 0;
8523+ gate->gate.dpl = dpl;
8524+ gate->gate.p = 1;
8525+ gate->gate.offset_high = base >> 16;
8526 }
8527
8528 #endif
8529@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
8530
8531 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
8532 {
8533+ pax_open_kernel();
8534 memcpy(&idt[entry], gate, sizeof(*gate));
8535+ pax_close_kernel();
8536 }
8537
8538 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
8539 {
8540+ pax_open_kernel();
8541 memcpy(&ldt[entry], desc, 8);
8542+ pax_close_kernel();
8543 }
8544
8545 static inline void
8546@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
8547 default: size = sizeof(*gdt); break;
8548 }
8549
8550+ pax_open_kernel();
8551 memcpy(&gdt[entry], desc, size);
8552+ pax_close_kernel();
8553 }
8554
8555 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8556@@ -207,7 +216,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
8557
8558 static inline void native_load_tr_desc(void)
8559 {
8560+ pax_open_kernel();
8561 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8562+ pax_close_kernel();
8563 }
8564
8565 static inline void native_load_gdt(const struct desc_ptr *dtr)
8566@@ -244,8 +255,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
8567 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8568 unsigned int i;
8569
8570+ pax_open_kernel();
8571 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8572 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8573+ pax_close_kernel();
8574 }
8575
8576 #define _LDT_empty(info) \
8577@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
8578 desc->limit = (limit >> 16) & 0xf;
8579 }
8580
8581-static inline void _set_gate(int gate, unsigned type, void *addr,
8582+static inline void _set_gate(int gate, unsigned type, const void *addr,
8583 unsigned dpl, unsigned ist, unsigned seg)
8584 {
8585 gate_desc s;
8586@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
8587 * Pentium F0 0F bugfix can have resulted in the mapped
8588 * IDT being write-protected.
8589 */
8590-static inline void set_intr_gate(unsigned int n, void *addr)
8591+static inline void set_intr_gate(unsigned int n, const void *addr)
8592 {
8593 BUG_ON((unsigned)n > 0xFF);
8594 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8595@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
8596 /*
8597 * This routine sets up an interrupt gate at directory privilege level 3.
8598 */
8599-static inline void set_system_intr_gate(unsigned int n, void *addr)
8600+static inline void set_system_intr_gate(unsigned int n, const void *addr)
8601 {
8602 BUG_ON((unsigned)n > 0xFF);
8603 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8604 }
8605
8606-static inline void set_system_trap_gate(unsigned int n, void *addr)
8607+static inline void set_system_trap_gate(unsigned int n, const void *addr)
8608 {
8609 BUG_ON((unsigned)n > 0xFF);
8610 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8611 }
8612
8613-static inline void set_trap_gate(unsigned int n, void *addr)
8614+static inline void set_trap_gate(unsigned int n, const void *addr)
8615 {
8616 BUG_ON((unsigned)n > 0xFF);
8617 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8618@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
8619 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8620 {
8621 BUG_ON((unsigned)n > 0xFF);
8622- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8623+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8624 }
8625
8626-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8627+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8628 {
8629 BUG_ON((unsigned)n > 0xFF);
8630 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8631 }
8632
8633-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8634+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8635 {
8636 BUG_ON((unsigned)n > 0xFF);
8637 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8638 }
8639
8640+#ifdef CONFIG_X86_32
8641+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8642+{
8643+ struct desc_struct d;
8644+
8645+ if (likely(limit))
8646+ limit = (limit - 1UL) >> PAGE_SHIFT;
8647+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
8648+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8649+}
8650+#endif
8651+
8652 #endif /* _ASM_X86_DESC_H */
8653diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
8654index 278441f..b95a174 100644
8655--- a/arch/x86/include/asm/desc_defs.h
8656+++ b/arch/x86/include/asm/desc_defs.h
8657@@ -31,6 +31,12 @@ struct desc_struct {
8658 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8659 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8660 };
8661+ struct {
8662+ u16 offset_low;
8663+ u16 seg;
8664+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8665+ unsigned offset_high: 16;
8666+ } gate;
8667 };
8668 } __attribute__((packed));
8669
8670diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
8671index 908b969..a1f4eb4 100644
8672--- a/arch/x86/include/asm/e820.h
8673+++ b/arch/x86/include/asm/e820.h
8674@@ -69,7 +69,7 @@ struct e820map {
8675 #define ISA_START_ADDRESS 0xa0000
8676 #define ISA_END_ADDRESS 0x100000
8677
8678-#define BIOS_BEGIN 0x000a0000
8679+#define BIOS_BEGIN 0x000c0000
8680 #define BIOS_END 0x00100000
8681
8682 #define BIOS_ROM_BASE 0xffe00000
8683diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
8684index 5f962df..7289f09 100644
8685--- a/arch/x86/include/asm/elf.h
8686+++ b/arch/x86/include/asm/elf.h
8687@@ -238,7 +238,25 @@ extern int force_personality32;
8688 the loader. We need to make sure that it is out of the way of the program
8689 that it will "exec", and that there is sufficient room for the brk. */
8690
8691+#ifdef CONFIG_PAX_SEGMEXEC
8692+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8693+#else
8694 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8695+#endif
8696+
8697+#ifdef CONFIG_PAX_ASLR
8698+#ifdef CONFIG_X86_32
8699+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8700+
8701+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8702+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8703+#else
8704+#define PAX_ELF_ET_DYN_BASE 0x400000UL
8705+
8706+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8707+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8708+#endif
8709+#endif
8710
8711 /* This yields a mask that user programs can use to figure out what
8712 instruction set this CPU supports. This could be done in user space,
8713@@ -291,9 +309,7 @@ do { \
8714
8715 #define ARCH_DLINFO \
8716 do { \
8717- if (vdso_enabled) \
8718- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8719- (unsigned long)current->mm->context.vdso); \
8720+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
8721 } while (0)
8722
8723 #define AT_SYSINFO 32
8724@@ -304,7 +320,7 @@ do { \
8725
8726 #endif /* !CONFIG_X86_32 */
8727
8728-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8729+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8730
8731 #define VDSO_ENTRY \
8732 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8733@@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8734 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8735 #define compat_arch_setup_additional_pages syscall32_setup_pages
8736
8737-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8738-#define arch_randomize_brk arch_randomize_brk
8739-
8740 /*
8741 * True on X86_32 or when emulating IA32 on X86_64
8742 */
8743diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
8744index cc70c1c..d96d011 100644
8745--- a/arch/x86/include/asm/emergency-restart.h
8746+++ b/arch/x86/include/asm/emergency-restart.h
8747@@ -15,6 +15,6 @@ enum reboot_type {
8748
8749 extern enum reboot_type reboot_type;
8750
8751-extern void machine_emergency_restart(void);
8752+extern void machine_emergency_restart(void) __noreturn;
8753
8754 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8755diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
8756index d09bb03..4ea4194 100644
8757--- a/arch/x86/include/asm/futex.h
8758+++ b/arch/x86/include/asm/futex.h
8759@@ -12,16 +12,18 @@
8760 #include <asm/system.h>
8761
8762 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8763+ typecheck(u32 __user *, uaddr); \
8764 asm volatile("1:\t" insn "\n" \
8765 "2:\t.section .fixup,\"ax\"\n" \
8766 "3:\tmov\t%3, %1\n" \
8767 "\tjmp\t2b\n" \
8768 "\t.previous\n" \
8769 _ASM_EXTABLE(1b, 3b) \
8770- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8771+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
8772 : "i" (-EFAULT), "0" (oparg), "1" (0))
8773
8774 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8775+ typecheck(u32 __user *, uaddr); \
8776 asm volatile("1:\tmovl %2, %0\n" \
8777 "\tmovl\t%0, %3\n" \
8778 "\t" insn "\n" \
8779@@ -34,7 +36,7 @@
8780 _ASM_EXTABLE(1b, 4b) \
8781 _ASM_EXTABLE(2b, 4b) \
8782 : "=&a" (oldval), "=&r" (ret), \
8783- "+m" (*uaddr), "=&r" (tem) \
8784+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
8785 : "r" (oparg), "i" (-EFAULT), "1" (0))
8786
8787 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8788@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8789
8790 switch (op) {
8791 case FUTEX_OP_SET:
8792- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8793+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8794 break;
8795 case FUTEX_OP_ADD:
8796- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8797+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8798 uaddr, oparg);
8799 break;
8800 case FUTEX_OP_OR:
8801@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
8802 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8803 return -EFAULT;
8804
8805- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
8806+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
8807 "2:\t.section .fixup, \"ax\"\n"
8808 "3:\tmov %3, %0\n"
8809 "\tjmp 2b\n"
8810 "\t.previous\n"
8811 _ASM_EXTABLE(1b, 3b)
8812- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
8813+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
8814 : "i" (-EFAULT), "r" (newval), "1" (oldval)
8815 : "memory"
8816 );
8817diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
8818index eb92a6e..b98b2f4 100644
8819--- a/arch/x86/include/asm/hw_irq.h
8820+++ b/arch/x86/include/asm/hw_irq.h
8821@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
8822 extern void enable_IO_APIC(void);
8823
8824 /* Statistics */
8825-extern atomic_t irq_err_count;
8826-extern atomic_t irq_mis_count;
8827+extern atomic_unchecked_t irq_err_count;
8828+extern atomic_unchecked_t irq_mis_count;
8829
8830 /* EISA */
8831 extern void eisa_set_level_irq(unsigned int irq);
8832diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
8833index a850b4d..bae26dc 100644
8834--- a/arch/x86/include/asm/i387.h
8835+++ b/arch/x86/include/asm/i387.h
8836@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
8837 {
8838 int err;
8839
8840+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8841+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8842+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
8843+#endif
8844+
8845 /* See comment in fxsave() below. */
8846 #ifdef CONFIG_AS_FXSAVEQ
8847 asm volatile("1: fxrstorq %[fx]\n\t"
8848@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
8849 {
8850 int err;
8851
8852+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8853+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8854+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8855+#endif
8856+
8857 /*
8858 * Clear the bytes not touched by the fxsave and reserved
8859 * for the SW usage.
8860@@ -424,7 +434,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
8861 static inline bool interrupted_user_mode(void)
8862 {
8863 struct pt_regs *regs = get_irq_regs();
8864- return regs && user_mode_vm(regs);
8865+ return regs && user_mode(regs);
8866 }
8867
8868 /*
8869diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
8870index d8e8eef..99f81ae 100644
8871--- a/arch/x86/include/asm/io.h
8872+++ b/arch/x86/include/asm/io.h
8873@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
8874
8875 #include <linux/vmalloc.h>
8876
8877+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8878+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8879+{
8880+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8881+}
8882+
8883+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8884+{
8885+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8886+}
8887+
8888 /*
8889 * Convert a virtual cached pointer to an uncached pointer
8890 */
8891diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
8892index bba3cf8..06bc8da 100644
8893--- a/arch/x86/include/asm/irqflags.h
8894+++ b/arch/x86/include/asm/irqflags.h
8895@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
8896 sti; \
8897 sysexit
8898
8899+#define GET_CR0_INTO_RDI mov %cr0, %rdi
8900+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8901+#define GET_CR3_INTO_RDI mov %cr3, %rdi
8902+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8903+
8904 #else
8905 #define INTERRUPT_RETURN iret
8906 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8907diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
8908index 5478825..839e88c 100644
8909--- a/arch/x86/include/asm/kprobes.h
8910+++ b/arch/x86/include/asm/kprobes.h
8911@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
8912 #define RELATIVEJUMP_SIZE 5
8913 #define RELATIVECALL_OPCODE 0xe8
8914 #define RELATIVE_ADDR_SIZE 4
8915-#define MAX_STACK_SIZE 64
8916-#define MIN_STACK_SIZE(ADDR) \
8917- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8918- THREAD_SIZE - (unsigned long)(ADDR))) \
8919- ? (MAX_STACK_SIZE) \
8920- : (((unsigned long)current_thread_info()) + \
8921- THREAD_SIZE - (unsigned long)(ADDR)))
8922+#define MAX_STACK_SIZE 64UL
8923+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8924
8925 #define flush_insn_slot(p) do { } while (0)
8926
8927diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
8928index b4973f4..7c4d3fc 100644
8929--- a/arch/x86/include/asm/kvm_host.h
8930+++ b/arch/x86/include/asm/kvm_host.h
8931@@ -459,7 +459,7 @@ struct kvm_arch {
8932 unsigned int n_requested_mmu_pages;
8933 unsigned int n_max_mmu_pages;
8934 unsigned int indirect_shadow_pages;
8935- atomic_t invlpg_counter;
8936+ atomic_unchecked_t invlpg_counter;
8937 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
8938 /*
8939 * Hash table of struct kvm_mmu_page.
8940@@ -638,7 +638,7 @@ struct kvm_x86_ops {
8941 int (*check_intercept)(struct kvm_vcpu *vcpu,
8942 struct x86_instruction_info *info,
8943 enum x86_intercept_stage stage);
8944-};
8945+} __do_const;
8946
8947 struct kvm_arch_async_pf {
8948 u32 token;
8949diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
8950index 9cdae5d..300d20f 100644
8951--- a/arch/x86/include/asm/local.h
8952+++ b/arch/x86/include/asm/local.h
8953@@ -18,26 +18,58 @@ typedef struct {
8954
8955 static inline void local_inc(local_t *l)
8956 {
8957- asm volatile(_ASM_INC "%0"
8958+ asm volatile(_ASM_INC "%0\n"
8959+
8960+#ifdef CONFIG_PAX_REFCOUNT
8961+ "jno 0f\n"
8962+ _ASM_DEC "%0\n"
8963+ "int $4\n0:\n"
8964+ _ASM_EXTABLE(0b, 0b)
8965+#endif
8966+
8967 : "+m" (l->a.counter));
8968 }
8969
8970 static inline void local_dec(local_t *l)
8971 {
8972- asm volatile(_ASM_DEC "%0"
8973+ asm volatile(_ASM_DEC "%0\n"
8974+
8975+#ifdef CONFIG_PAX_REFCOUNT
8976+ "jno 0f\n"
8977+ _ASM_INC "%0\n"
8978+ "int $4\n0:\n"
8979+ _ASM_EXTABLE(0b, 0b)
8980+#endif
8981+
8982 : "+m" (l->a.counter));
8983 }
8984
8985 static inline void local_add(long i, local_t *l)
8986 {
8987- asm volatile(_ASM_ADD "%1,%0"
8988+ asm volatile(_ASM_ADD "%1,%0\n"
8989+
8990+#ifdef CONFIG_PAX_REFCOUNT
8991+ "jno 0f\n"
8992+ _ASM_SUB "%1,%0\n"
8993+ "int $4\n0:\n"
8994+ _ASM_EXTABLE(0b, 0b)
8995+#endif
8996+
8997 : "+m" (l->a.counter)
8998 : "ir" (i));
8999 }
9000
9001 static inline void local_sub(long i, local_t *l)
9002 {
9003- asm volatile(_ASM_SUB "%1,%0"
9004+ asm volatile(_ASM_SUB "%1,%0\n"
9005+
9006+#ifdef CONFIG_PAX_REFCOUNT
9007+ "jno 0f\n"
9008+ _ASM_ADD "%1,%0\n"
9009+ "int $4\n0:\n"
9010+ _ASM_EXTABLE(0b, 0b)
9011+#endif
9012+
9013 : "+m" (l->a.counter)
9014 : "ir" (i));
9015 }
9016@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
9017 {
9018 unsigned char c;
9019
9020- asm volatile(_ASM_SUB "%2,%0; sete %1"
9021+ asm volatile(_ASM_SUB "%2,%0\n"
9022+
9023+#ifdef CONFIG_PAX_REFCOUNT
9024+ "jno 0f\n"
9025+ _ASM_ADD "%2,%0\n"
9026+ "int $4\n0:\n"
9027+ _ASM_EXTABLE(0b, 0b)
9028+#endif
9029+
9030+ "sete %1\n"
9031 : "+m" (l->a.counter), "=qm" (c)
9032 : "ir" (i) : "memory");
9033 return c;
9034@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
9035 {
9036 unsigned char c;
9037
9038- asm volatile(_ASM_DEC "%0; sete %1"
9039+ asm volatile(_ASM_DEC "%0\n"
9040+
9041+#ifdef CONFIG_PAX_REFCOUNT
9042+ "jno 0f\n"
9043+ _ASM_INC "%0\n"
9044+ "int $4\n0:\n"
9045+ _ASM_EXTABLE(0b, 0b)
9046+#endif
9047+
9048+ "sete %1\n"
9049 : "+m" (l->a.counter), "=qm" (c)
9050 : : "memory");
9051 return c != 0;
9052@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
9053 {
9054 unsigned char c;
9055
9056- asm volatile(_ASM_INC "%0; sete %1"
9057+ asm volatile(_ASM_INC "%0\n"
9058+
9059+#ifdef CONFIG_PAX_REFCOUNT
9060+ "jno 0f\n"
9061+ _ASM_DEC "%0\n"
9062+ "int $4\n0:\n"
9063+ _ASM_EXTABLE(0b, 0b)
9064+#endif
9065+
9066+ "sete %1\n"
9067 : "+m" (l->a.counter), "=qm" (c)
9068 : : "memory");
9069 return c != 0;
9070@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
9071 {
9072 unsigned char c;
9073
9074- asm volatile(_ASM_ADD "%2,%0; sets %1"
9075+ asm volatile(_ASM_ADD "%2,%0\n"
9076+
9077+#ifdef CONFIG_PAX_REFCOUNT
9078+ "jno 0f\n"
9079+ _ASM_SUB "%2,%0\n"
9080+ "int $4\n0:\n"
9081+ _ASM_EXTABLE(0b, 0b)
9082+#endif
9083+
9084+ "sets %1\n"
9085 : "+m" (l->a.counter), "=qm" (c)
9086 : "ir" (i) : "memory");
9087 return c;
9088@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
9089 #endif
9090 /* Modern 486+ processor */
9091 __i = i;
9092- asm volatile(_ASM_XADD "%0, %1;"
9093+ asm volatile(_ASM_XADD "%0, %1\n"
9094+
9095+#ifdef CONFIG_PAX_REFCOUNT
9096+ "jno 0f\n"
9097+ _ASM_MOV "%0,%1\n"
9098+ "int $4\n0:\n"
9099+ _ASM_EXTABLE(0b, 0b)
9100+#endif
9101+
9102 : "+r" (i), "+m" (l->a.counter)
9103 : : "memory");
9104 return i + __i;
9105diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
9106index 593e51d..fa69c9a 100644
9107--- a/arch/x86/include/asm/mman.h
9108+++ b/arch/x86/include/asm/mman.h
9109@@ -5,4 +5,14 @@
9110
9111 #include <asm-generic/mman.h>
9112
9113+#ifdef __KERNEL__
9114+#ifndef __ASSEMBLY__
9115+#ifdef CONFIG_X86_32
9116+#define arch_mmap_check i386_mmap_check
9117+int i386_mmap_check(unsigned long addr, unsigned long len,
9118+ unsigned long flags);
9119+#endif
9120+#endif
9121+#endif
9122+
9123 #endif /* _ASM_X86_MMAN_H */
9124diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
9125index 5f55e69..e20bfb1 100644
9126--- a/arch/x86/include/asm/mmu.h
9127+++ b/arch/x86/include/asm/mmu.h
9128@@ -9,7 +9,7 @@
9129 * we put the segment information here.
9130 */
9131 typedef struct {
9132- void *ldt;
9133+ struct desc_struct *ldt;
9134 int size;
9135
9136 #ifdef CONFIG_X86_64
9137@@ -18,7 +18,19 @@ typedef struct {
9138 #endif
9139
9140 struct mutex lock;
9141- void *vdso;
9142+ unsigned long vdso;
9143+
9144+#ifdef CONFIG_X86_32
9145+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
9146+ unsigned long user_cs_base;
9147+ unsigned long user_cs_limit;
9148+
9149+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9150+ cpumask_t cpu_user_cs_mask;
9151+#endif
9152+
9153+#endif
9154+#endif
9155 } mm_context_t;
9156
9157 #ifdef CONFIG_SMP
9158diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
9159index 6902152..399f3a2 100644
9160--- a/arch/x86/include/asm/mmu_context.h
9161+++ b/arch/x86/include/asm/mmu_context.h
9162@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
9163
9164 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
9165 {
9166+
9167+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9168+ unsigned int i;
9169+ pgd_t *pgd;
9170+
9171+ pax_open_kernel();
9172+ pgd = get_cpu_pgd(smp_processor_id());
9173+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
9174+ set_pgd_batched(pgd+i, native_make_pgd(0));
9175+ pax_close_kernel();
9176+#endif
9177+
9178 #ifdef CONFIG_SMP
9179 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
9180 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
9181@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
9182 struct task_struct *tsk)
9183 {
9184 unsigned cpu = smp_processor_id();
9185+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9186+ int tlbstate = TLBSTATE_OK;
9187+#endif
9188
9189 if (likely(prev != next)) {
9190 #ifdef CONFIG_SMP
9191+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9192+ tlbstate = percpu_read(cpu_tlbstate.state);
9193+#endif
9194 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9195 percpu_write(cpu_tlbstate.active_mm, next);
9196 #endif
9197 cpumask_set_cpu(cpu, mm_cpumask(next));
9198
9199 /* Re-load page tables */
9200+#ifdef CONFIG_PAX_PER_CPU_PGD
9201+ pax_open_kernel();
9202+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9203+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9204+ pax_close_kernel();
9205+ load_cr3(get_cpu_pgd(cpu));
9206+#else
9207 load_cr3(next->pgd);
9208+#endif
9209
9210 /* stop flush ipis for the previous mm */
9211 cpumask_clear_cpu(cpu, mm_cpumask(prev));
9212@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
9213 */
9214 if (unlikely(prev->context.ldt != next->context.ldt))
9215 load_LDT_nolock(&next->context);
9216- }
9217+
9218+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9219+ if (!(__supported_pte_mask & _PAGE_NX)) {
9220+ smp_mb__before_clear_bit();
9221+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
9222+ smp_mb__after_clear_bit();
9223+ cpu_set(cpu, next->context.cpu_user_cs_mask);
9224+ }
9225+#endif
9226+
9227+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9228+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
9229+ prev->context.user_cs_limit != next->context.user_cs_limit))
9230+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9231 #ifdef CONFIG_SMP
9232+ else if (unlikely(tlbstate != TLBSTATE_OK))
9233+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9234+#endif
9235+#endif
9236+
9237+ }
9238 else {
9239+
9240+#ifdef CONFIG_PAX_PER_CPU_PGD
9241+ pax_open_kernel();
9242+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9243+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9244+ pax_close_kernel();
9245+ load_cr3(get_cpu_pgd(cpu));
9246+#endif
9247+
9248+#ifdef CONFIG_SMP
9249 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9250 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9251
9252@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
9253 * tlb flush IPI delivery. We must reload CR3
9254 * to make sure to use no freed page tables.
9255 */
9256+
9257+#ifndef CONFIG_PAX_PER_CPU_PGD
9258 load_cr3(next->pgd);
9259+#endif
9260+
9261 load_LDT_nolock(&next->context);
9262+
9263+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9264+ if (!(__supported_pte_mask & _PAGE_NX))
9265+ cpu_set(cpu, next->context.cpu_user_cs_mask);
9266+#endif
9267+
9268+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9269+#ifdef CONFIG_PAX_PAGEEXEC
9270+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
9271+#endif
9272+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9273+#endif
9274+
9275 }
9276+#endif
9277 }
9278-#endif
9279 }
9280
9281 #define activate_mm(prev, next) \
9282diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
9283index 9eae775..c914fea 100644
9284--- a/arch/x86/include/asm/module.h
9285+++ b/arch/x86/include/asm/module.h
9286@@ -5,6 +5,7 @@
9287
9288 #ifdef CONFIG_X86_64
9289 /* X86_64 does not define MODULE_PROC_FAMILY */
9290+#define MODULE_PROC_FAMILY ""
9291 #elif defined CONFIG_M386
9292 #define MODULE_PROC_FAMILY "386 "
9293 #elif defined CONFIG_M486
9294@@ -59,8 +60,20 @@
9295 #error unknown processor family
9296 #endif
9297
9298-#ifdef CONFIG_X86_32
9299-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
9300+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9301+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
9302+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
9303+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
9304+#else
9305+#define MODULE_PAX_KERNEXEC ""
9306 #endif
9307
9308+#ifdef CONFIG_PAX_MEMORY_UDEREF
9309+#define MODULE_PAX_UDEREF "UDEREF "
9310+#else
9311+#define MODULE_PAX_UDEREF ""
9312+#endif
9313+
9314+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
9315+
9316 #endif /* _ASM_X86_MODULE_H */
9317diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
9318index 7639dbf..e08a58c 100644
9319--- a/arch/x86/include/asm/page_64_types.h
9320+++ b/arch/x86/include/asm/page_64_types.h
9321@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9322
9323 /* duplicated to the one in bootmem.h */
9324 extern unsigned long max_pfn;
9325-extern unsigned long phys_base;
9326+extern const unsigned long phys_base;
9327
9328 extern unsigned long __phys_addr(unsigned long);
9329 #define __phys_reloc_hide(x) (x)
9330diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
9331index a7d2db9..edb023e 100644
9332--- a/arch/x86/include/asm/paravirt.h
9333+++ b/arch/x86/include/asm/paravirt.h
9334@@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
9335 val);
9336 }
9337
9338+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9339+{
9340+ pgdval_t val = native_pgd_val(pgd);
9341+
9342+ if (sizeof(pgdval_t) > sizeof(long))
9343+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
9344+ val, (u64)val >> 32);
9345+ else
9346+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
9347+ val);
9348+}
9349+
9350 static inline void pgd_clear(pgd_t *pgdp)
9351 {
9352 set_pgd(pgdp, __pgd(0));
9353@@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
9354 pv_mmu_ops.set_fixmap(idx, phys, flags);
9355 }
9356
9357+#ifdef CONFIG_PAX_KERNEXEC
9358+static inline unsigned long pax_open_kernel(void)
9359+{
9360+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9361+}
9362+
9363+static inline unsigned long pax_close_kernel(void)
9364+{
9365+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9366+}
9367+#else
9368+static inline unsigned long pax_open_kernel(void) { return 0; }
9369+static inline unsigned long pax_close_kernel(void) { return 0; }
9370+#endif
9371+
9372 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9373
9374 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
9375@@ -964,7 +991,7 @@ extern void default_banner(void);
9376
9377 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9378 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9379-#define PARA_INDIRECT(addr) *%cs:addr
9380+#define PARA_INDIRECT(addr) *%ss:addr
9381 #endif
9382
9383 #define INTERRUPT_RETURN \
9384@@ -1041,6 +1068,21 @@ extern void default_banner(void);
9385 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9386 CLBR_NONE, \
9387 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9388+
9389+#define GET_CR0_INTO_RDI \
9390+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9391+ mov %rax,%rdi
9392+
9393+#define SET_RDI_INTO_CR0 \
9394+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9395+
9396+#define GET_CR3_INTO_RDI \
9397+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9398+ mov %rax,%rdi
9399+
9400+#define SET_RDI_INTO_CR3 \
9401+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9402+
9403 #endif /* CONFIG_X86_32 */
9404
9405 #endif /* __ASSEMBLY__ */
9406diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
9407index 8e8b9a4..f07d725 100644
9408--- a/arch/x86/include/asm/paravirt_types.h
9409+++ b/arch/x86/include/asm/paravirt_types.h
9410@@ -84,20 +84,20 @@ struct pv_init_ops {
9411 */
9412 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9413 unsigned long addr, unsigned len);
9414-};
9415+} __no_const;
9416
9417
9418 struct pv_lazy_ops {
9419 /* Set deferred update mode, used for batching operations. */
9420 void (*enter)(void);
9421 void (*leave)(void);
9422-};
9423+} __no_const;
9424
9425 struct pv_time_ops {
9426 unsigned long long (*sched_clock)(void);
9427 unsigned long long (*steal_clock)(int cpu);
9428 unsigned long (*get_tsc_khz)(void);
9429-};
9430+} __no_const;
9431
9432 struct pv_cpu_ops {
9433 /* hooks for various privileged instructions */
9434@@ -193,7 +193,7 @@ struct pv_cpu_ops {
9435
9436 void (*start_context_switch)(struct task_struct *prev);
9437 void (*end_context_switch)(struct task_struct *next);
9438-};
9439+} __no_const;
9440
9441 struct pv_irq_ops {
9442 /*
9443@@ -224,7 +224,7 @@ struct pv_apic_ops {
9444 unsigned long start_eip,
9445 unsigned long start_esp);
9446 #endif
9447-};
9448+} __no_const;
9449
9450 struct pv_mmu_ops {
9451 unsigned long (*read_cr2)(void);
9452@@ -313,6 +313,7 @@ struct pv_mmu_ops {
9453 struct paravirt_callee_save make_pud;
9454
9455 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
9456+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
9457 #endif /* PAGETABLE_LEVELS == 4 */
9458 #endif /* PAGETABLE_LEVELS >= 3 */
9459
9460@@ -324,6 +325,12 @@ struct pv_mmu_ops {
9461 an mfn. We can tell which is which from the index. */
9462 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9463 phys_addr_t phys, pgprot_t flags);
9464+
9465+#ifdef CONFIG_PAX_KERNEXEC
9466+ unsigned long (*pax_open_kernel)(void);
9467+ unsigned long (*pax_close_kernel)(void);
9468+#endif
9469+
9470 };
9471
9472 struct arch_spinlock;
9473@@ -334,7 +341,7 @@ struct pv_lock_ops {
9474 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
9475 int (*spin_trylock)(struct arch_spinlock *lock);
9476 void (*spin_unlock)(struct arch_spinlock *lock);
9477-};
9478+} __no_const;
9479
9480 /* This contains all the paravirt structures: we get a convenient
9481 * number for each function using the offset which we use to indicate
9482diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
9483index b4389a4..b7ff22c 100644
9484--- a/arch/x86/include/asm/pgalloc.h
9485+++ b/arch/x86/include/asm/pgalloc.h
9486@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
9487 pmd_t *pmd, pte_t *pte)
9488 {
9489 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9490+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9491+}
9492+
9493+static inline void pmd_populate_user(struct mm_struct *mm,
9494+ pmd_t *pmd, pte_t *pte)
9495+{
9496+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9497 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9498 }
9499
9500diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
9501index 98391db..8f6984e 100644
9502--- a/arch/x86/include/asm/pgtable-2level.h
9503+++ b/arch/x86/include/asm/pgtable-2level.h
9504@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
9505
9506 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9507 {
9508+ pax_open_kernel();
9509 *pmdp = pmd;
9510+ pax_close_kernel();
9511 }
9512
9513 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9514diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
9515index effff47..f9e4035 100644
9516--- a/arch/x86/include/asm/pgtable-3level.h
9517+++ b/arch/x86/include/asm/pgtable-3level.h
9518@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9519
9520 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9521 {
9522+ pax_open_kernel();
9523 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9524+ pax_close_kernel();
9525 }
9526
9527 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9528 {
9529+ pax_open_kernel();
9530 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9531+ pax_close_kernel();
9532 }
9533
9534 /*
9535diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
9536index 18601c8..3d716d1 100644
9537--- a/arch/x86/include/asm/pgtable.h
9538+++ b/arch/x86/include/asm/pgtable.h
9539@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9540
9541 #ifndef __PAGETABLE_PUD_FOLDED
9542 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
9543+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
9544 #define pgd_clear(pgd) native_pgd_clear(pgd)
9545 #endif
9546
9547@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9548
9549 #define arch_end_context_switch(prev) do {} while(0)
9550
9551+#define pax_open_kernel() native_pax_open_kernel()
9552+#define pax_close_kernel() native_pax_close_kernel()
9553 #endif /* CONFIG_PARAVIRT */
9554
9555+#define __HAVE_ARCH_PAX_OPEN_KERNEL
9556+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9557+
9558+#ifdef CONFIG_PAX_KERNEXEC
9559+static inline unsigned long native_pax_open_kernel(void)
9560+{
9561+ unsigned long cr0;
9562+
9563+ preempt_disable();
9564+ barrier();
9565+ cr0 = read_cr0() ^ X86_CR0_WP;
9566+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
9567+ write_cr0(cr0);
9568+ return cr0 ^ X86_CR0_WP;
9569+}
9570+
9571+static inline unsigned long native_pax_close_kernel(void)
9572+{
9573+ unsigned long cr0;
9574+
9575+ cr0 = read_cr0() ^ X86_CR0_WP;
9576+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9577+ write_cr0(cr0);
9578+ barrier();
9579+ preempt_enable_no_resched();
9580+ return cr0 ^ X86_CR0_WP;
9581+}
9582+#else
9583+static inline unsigned long native_pax_open_kernel(void) { return 0; }
9584+static inline unsigned long native_pax_close_kernel(void) { return 0; }
9585+#endif
9586+
9587 /*
9588 * The following only work if pte_present() is true.
9589 * Undefined behaviour if not..
9590 */
9591+static inline int pte_user(pte_t pte)
9592+{
9593+ return pte_val(pte) & _PAGE_USER;
9594+}
9595+
9596 static inline int pte_dirty(pte_t pte)
9597 {
9598 return pte_flags(pte) & _PAGE_DIRTY;
9599@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
9600 return pte_clear_flags(pte, _PAGE_RW);
9601 }
9602
9603+static inline pte_t pte_mkread(pte_t pte)
9604+{
9605+ return __pte(pte_val(pte) | _PAGE_USER);
9606+}
9607+
9608 static inline pte_t pte_mkexec(pte_t pte)
9609 {
9610- return pte_clear_flags(pte, _PAGE_NX);
9611+#ifdef CONFIG_X86_PAE
9612+ if (__supported_pte_mask & _PAGE_NX)
9613+ return pte_clear_flags(pte, _PAGE_NX);
9614+ else
9615+#endif
9616+ return pte_set_flags(pte, _PAGE_USER);
9617+}
9618+
9619+static inline pte_t pte_exprotect(pte_t pte)
9620+{
9621+#ifdef CONFIG_X86_PAE
9622+ if (__supported_pte_mask & _PAGE_NX)
9623+ return pte_set_flags(pte, _PAGE_NX);
9624+ else
9625+#endif
9626+ return pte_clear_flags(pte, _PAGE_USER);
9627 }
9628
9629 static inline pte_t pte_mkdirty(pte_t pte)
9630@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
9631 #endif
9632
9633 #ifndef __ASSEMBLY__
9634+
9635+#ifdef CONFIG_PAX_PER_CPU_PGD
9636+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9637+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9638+{
9639+ return cpu_pgd[cpu];
9640+}
9641+#endif
9642+
9643 #include <linux/mm_types.h>
9644
9645 static inline int pte_none(pte_t pte)
9646@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
9647
9648 static inline int pgd_bad(pgd_t pgd)
9649 {
9650- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9651+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9652 }
9653
9654 static inline int pgd_none(pgd_t pgd)
9655@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
9656 * pgd_offset() returns a (pgd_t *)
9657 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9658 */
9659-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9660+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9661+
9662+#ifdef CONFIG_PAX_PER_CPU_PGD
9663+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9664+#endif
9665+
9666 /*
9667 * a shortcut which implies the use of the kernel's pgd, instead
9668 * of a process's
9669@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
9670 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9671 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9672
9673+#ifdef CONFIG_X86_32
9674+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9675+#else
9676+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9677+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9678+
9679+#ifdef CONFIG_PAX_MEMORY_UDEREF
9680+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9681+#else
9682+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9683+#endif
9684+
9685+#endif
9686+
9687 #ifndef __ASSEMBLY__
9688
9689 extern int direct_gbpages;
9690@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
9691 * dst and src can be on the same page, but the range must not overlap,
9692 * and must not cross a page boundary.
9693 */
9694-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9695+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9696 {
9697- memcpy(dst, src, count * sizeof(pgd_t));
9698+ pax_open_kernel();
9699+ while (count--)
9700+ *dst++ = *src++;
9701+ pax_close_kernel();
9702 }
9703
9704+#ifdef CONFIG_PAX_PER_CPU_PGD
9705+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9706+#endif
9707+
9708+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9709+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9710+#else
9711+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9712+#endif
9713
9714 #include <asm-generic/pgtable.h>
9715 #endif /* __ASSEMBLY__ */
9716diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
9717index 0c92113..34a77c6 100644
9718--- a/arch/x86/include/asm/pgtable_32.h
9719+++ b/arch/x86/include/asm/pgtable_32.h
9720@@ -25,9 +25,6 @@
9721 struct mm_struct;
9722 struct vm_area_struct;
9723
9724-extern pgd_t swapper_pg_dir[1024];
9725-extern pgd_t initial_page_table[1024];
9726-
9727 static inline void pgtable_cache_init(void) { }
9728 static inline void check_pgt_cache(void) { }
9729 void paging_init(void);
9730@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9731 # include <asm/pgtable-2level.h>
9732 #endif
9733
9734+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9735+extern pgd_t initial_page_table[PTRS_PER_PGD];
9736+#ifdef CONFIG_X86_PAE
9737+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9738+#endif
9739+
9740 #if defined(CONFIG_HIGHPTE)
9741 #define pte_offset_map(dir, address) \
9742 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
9743@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9744 /* Clear a kernel PTE and flush it from the TLB */
9745 #define kpte_clear_flush(ptep, vaddr) \
9746 do { \
9747+ pax_open_kernel(); \
9748 pte_clear(&init_mm, (vaddr), (ptep)); \
9749+ pax_close_kernel(); \
9750 __flush_tlb_one((vaddr)); \
9751 } while (0)
9752
9753@@ -74,6 +79,9 @@ do { \
9754
9755 #endif /* !__ASSEMBLY__ */
9756
9757+#define HAVE_ARCH_UNMAPPED_AREA
9758+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9759+
9760 /*
9761 * kern_addr_valid() is (1) for FLATMEM and (0) for
9762 * SPARSEMEM and DISCONTIGMEM
9763diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
9764index ed5903b..c7fe163 100644
9765--- a/arch/x86/include/asm/pgtable_32_types.h
9766+++ b/arch/x86/include/asm/pgtable_32_types.h
9767@@ -8,7 +8,7 @@
9768 */
9769 #ifdef CONFIG_X86_PAE
9770 # include <asm/pgtable-3level_types.h>
9771-# define PMD_SIZE (1UL << PMD_SHIFT)
9772+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9773 # define PMD_MASK (~(PMD_SIZE - 1))
9774 #else
9775 # include <asm/pgtable-2level_types.h>
9776@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
9777 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9778 #endif
9779
9780+#ifdef CONFIG_PAX_KERNEXEC
9781+#ifndef __ASSEMBLY__
9782+extern unsigned char MODULES_EXEC_VADDR[];
9783+extern unsigned char MODULES_EXEC_END[];
9784+#endif
9785+#include <asm/boot.h>
9786+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9787+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9788+#else
9789+#define ktla_ktva(addr) (addr)
9790+#define ktva_ktla(addr) (addr)
9791+#endif
9792+
9793 #define MODULES_VADDR VMALLOC_START
9794 #define MODULES_END VMALLOC_END
9795 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9796diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
9797index 975f709..107976d 100644
9798--- a/arch/x86/include/asm/pgtable_64.h
9799+++ b/arch/x86/include/asm/pgtable_64.h
9800@@ -16,10 +16,14 @@
9801
9802 extern pud_t level3_kernel_pgt[512];
9803 extern pud_t level3_ident_pgt[512];
9804+extern pud_t level3_vmalloc_start_pgt[512];
9805+extern pud_t level3_vmalloc_end_pgt[512];
9806+extern pud_t level3_vmemmap_pgt[512];
9807+extern pud_t level2_vmemmap_pgt[512];
9808 extern pmd_t level2_kernel_pgt[512];
9809 extern pmd_t level2_fixmap_pgt[512];
9810-extern pmd_t level2_ident_pgt[512];
9811-extern pgd_t init_level4_pgt[];
9812+extern pmd_t level2_ident_pgt[512*2];
9813+extern pgd_t init_level4_pgt[512];
9814
9815 #define swapper_pg_dir init_level4_pgt
9816
9817@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9818
9819 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9820 {
9821+ pax_open_kernel();
9822 *pmdp = pmd;
9823+ pax_close_kernel();
9824 }
9825
9826 static inline void native_pmd_clear(pmd_t *pmd)
9827@@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud)
9828
9829 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9830 {
9831+ pax_open_kernel();
9832+ *pgdp = pgd;
9833+ pax_close_kernel();
9834+}
9835+
9836+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9837+{
9838 *pgdp = pgd;
9839 }
9840
9841diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
9842index 766ea16..5b96cb3 100644
9843--- a/arch/x86/include/asm/pgtable_64_types.h
9844+++ b/arch/x86/include/asm/pgtable_64_types.h
9845@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9846 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9847 #define MODULES_END _AC(0xffffffffff000000, UL)
9848 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9849+#define MODULES_EXEC_VADDR MODULES_VADDR
9850+#define MODULES_EXEC_END MODULES_END
9851+
9852+#define ktla_ktva(addr) (addr)
9853+#define ktva_ktla(addr) (addr)
9854
9855 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9856diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
9857index 013286a..8b42f4f 100644
9858--- a/arch/x86/include/asm/pgtable_types.h
9859+++ b/arch/x86/include/asm/pgtable_types.h
9860@@ -16,13 +16,12 @@
9861 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9862 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9863 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9864-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9865+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9866 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9867 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9868 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9869-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9870-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9871-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
9872+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9873+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
9874 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9875
9876 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9877@@ -40,7 +39,6 @@
9878 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9879 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9880 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9881-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9882 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9883 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9884 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9885@@ -57,8 +55,10 @@
9886
9887 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9888 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9889-#else
9890+#elif defined(CONFIG_KMEMCHECK)
9891 #define _PAGE_NX (_AT(pteval_t, 0))
9892+#else
9893+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9894 #endif
9895
9896 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9897@@ -96,6 +96,9 @@
9898 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9899 _PAGE_ACCESSED)
9900
9901+#define PAGE_READONLY_NOEXEC PAGE_READONLY
9902+#define PAGE_SHARED_NOEXEC PAGE_SHARED
9903+
9904 #define __PAGE_KERNEL_EXEC \
9905 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9906 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9907@@ -106,7 +109,7 @@
9908 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9909 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9910 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9911-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9912+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9913 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
9914 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
9915 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9916@@ -168,8 +171,8 @@
9917 * bits are combined, this will alow user to access the high address mapped
9918 * VDSO in the presence of CONFIG_COMPAT_VDSO
9919 */
9920-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9921-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9922+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9923+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9924 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9925 #endif
9926
9927@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
9928 {
9929 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9930 }
9931+#endif
9932
9933+#if PAGETABLE_LEVELS == 3
9934+#include <asm-generic/pgtable-nopud.h>
9935+#endif
9936+
9937+#if PAGETABLE_LEVELS == 2
9938+#include <asm-generic/pgtable-nopmd.h>
9939+#endif
9940+
9941+#ifndef __ASSEMBLY__
9942 #if PAGETABLE_LEVELS > 3
9943 typedef struct { pudval_t pud; } pud_t;
9944
9945@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
9946 return pud.pud;
9947 }
9948 #else
9949-#include <asm-generic/pgtable-nopud.h>
9950-
9951 static inline pudval_t native_pud_val(pud_t pud)
9952 {
9953 return native_pgd_val(pud.pgd);
9954@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
9955 return pmd.pmd;
9956 }
9957 #else
9958-#include <asm-generic/pgtable-nopmd.h>
9959-
9960 static inline pmdval_t native_pmd_val(pmd_t pmd)
9961 {
9962 return native_pgd_val(pmd.pud.pgd);
9963@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
9964
9965 extern pteval_t __supported_pte_mask;
9966 extern void set_nx(void);
9967-extern int nx_enabled;
9968
9969 #define pgprot_writecombine pgprot_writecombine
9970 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9971diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
9972index bb3ee36..781a6b8 100644
9973--- a/arch/x86/include/asm/processor.h
9974+++ b/arch/x86/include/asm/processor.h
9975@@ -268,7 +268,7 @@ struct tss_struct {
9976
9977 } ____cacheline_aligned;
9978
9979-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9980+extern struct tss_struct init_tss[NR_CPUS];
9981
9982 /*
9983 * Save the original ist values for checking stack pointers during debugging
9984@@ -861,11 +861,18 @@ static inline void spin_lock_prefetch(const void *x)
9985 */
9986 #define TASK_SIZE PAGE_OFFSET
9987 #define TASK_SIZE_MAX TASK_SIZE
9988+
9989+#ifdef CONFIG_PAX_SEGMEXEC
9990+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9991+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9992+#else
9993 #define STACK_TOP TASK_SIZE
9994-#define STACK_TOP_MAX STACK_TOP
9995+#endif
9996+
9997+#define STACK_TOP_MAX TASK_SIZE
9998
9999 #define INIT_THREAD { \
10000- .sp0 = sizeof(init_stack) + (long)&init_stack, \
10001+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
10002 .vm86_info = NULL, \
10003 .sysenter_cs = __KERNEL_CS, \
10004 .io_bitmap_ptr = NULL, \
10005@@ -879,7 +886,7 @@ static inline void spin_lock_prefetch(const void *x)
10006 */
10007 #define INIT_TSS { \
10008 .x86_tss = { \
10009- .sp0 = sizeof(init_stack) + (long)&init_stack, \
10010+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
10011 .ss0 = __KERNEL_DS, \
10012 .ss1 = __KERNEL_CS, \
10013 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
10014@@ -890,11 +897,7 @@ static inline void spin_lock_prefetch(const void *x)
10015 extern unsigned long thread_saved_pc(struct task_struct *tsk);
10016
10017 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
10018-#define KSTK_TOP(info) \
10019-({ \
10020- unsigned long *__ptr = (unsigned long *)(info); \
10021- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
10022-})
10023+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
10024
10025 /*
10026 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
10027@@ -909,7 +912,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
10028 #define task_pt_regs(task) \
10029 ({ \
10030 struct pt_regs *__regs__; \
10031- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
10032+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
10033 __regs__ - 1; \
10034 })
10035
10036@@ -919,13 +922,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
10037 /*
10038 * User space process size. 47bits minus one guard page.
10039 */
10040-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
10041+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
10042
10043 /* This decides where the kernel will search for a free chunk of vm
10044 * space during mmap's.
10045 */
10046 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
10047- 0xc0000000 : 0xFFFFe000)
10048+ 0xc0000000 : 0xFFFFf000)
10049
10050 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
10051 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
10052@@ -936,11 +939,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
10053 #define STACK_TOP_MAX TASK_SIZE_MAX
10054
10055 #define INIT_THREAD { \
10056- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
10057+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
10058 }
10059
10060 #define INIT_TSS { \
10061- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
10062+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
10063 }
10064
10065 /*
10066@@ -962,6 +965,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
10067 */
10068 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
10069
10070+#ifdef CONFIG_PAX_SEGMEXEC
10071+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
10072+#endif
10073+
10074 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
10075
10076 /* Get/set a process' ability to use the timestamp counter instruction */
10077diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
10078index 3566454..4bdfb8c 100644
10079--- a/arch/x86/include/asm/ptrace.h
10080+++ b/arch/x86/include/asm/ptrace.h
10081@@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
10082 }
10083
10084 /*
10085- * user_mode_vm(regs) determines whether a register set came from user mode.
10086+ * user_mode(regs) determines whether a register set came from user mode.
10087 * This is true if V8086 mode was enabled OR if the register set was from
10088 * protected mode with RPL-3 CS value. This tricky test checks that with
10089 * one comparison. Many places in the kernel can bypass this full check
10090- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
10091+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
10092+ * be used.
10093 */
10094-static inline int user_mode(struct pt_regs *regs)
10095+static inline int user_mode_novm(struct pt_regs *regs)
10096 {
10097 #ifdef CONFIG_X86_32
10098 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
10099 #else
10100- return !!(regs->cs & 3);
10101+ return !!(regs->cs & SEGMENT_RPL_MASK);
10102 #endif
10103 }
10104
10105-static inline int user_mode_vm(struct pt_regs *regs)
10106+static inline int user_mode(struct pt_regs *regs)
10107 {
10108 #ifdef CONFIG_X86_32
10109 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
10110 USER_RPL;
10111 #else
10112- return user_mode(regs);
10113+ return user_mode_novm(regs);
10114 #endif
10115 }
10116
10117@@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
10118 #ifdef CONFIG_X86_64
10119 static inline bool user_64bit_mode(struct pt_regs *regs)
10120 {
10121+ unsigned long cs = regs->cs & 0xffff;
10122 #ifndef CONFIG_PARAVIRT
10123 /*
10124 * On non-paravirt systems, this is the only long mode CPL 3
10125 * selector. We do not allow long mode selectors in the LDT.
10126 */
10127- return regs->cs == __USER_CS;
10128+ return cs == __USER_CS;
10129 #else
10130 /* Headers are too twisted for this to go in paravirt.h. */
10131- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
10132+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
10133 #endif
10134 }
10135 #endif
10136diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
10137index 92f29706..a79cbbb 100644
10138--- a/arch/x86/include/asm/reboot.h
10139+++ b/arch/x86/include/asm/reboot.h
10140@@ -6,19 +6,19 @@
10141 struct pt_regs;
10142
10143 struct machine_ops {
10144- void (*restart)(char *cmd);
10145- void (*halt)(void);
10146- void (*power_off)(void);
10147+ void (* __noreturn restart)(char *cmd);
10148+ void (* __noreturn halt)(void);
10149+ void (* __noreturn power_off)(void);
10150 void (*shutdown)(void);
10151 void (*crash_shutdown)(struct pt_regs *);
10152- void (*emergency_restart)(void);
10153-};
10154+ void (* __noreturn emergency_restart)(void);
10155+} __no_const;
10156
10157 extern struct machine_ops machine_ops;
10158
10159 void native_machine_crash_shutdown(struct pt_regs *regs);
10160 void native_machine_shutdown(void);
10161-void machine_real_restart(unsigned int type);
10162+void machine_real_restart(unsigned int type) __noreturn;
10163 /* These must match dispatch_table in reboot_32.S */
10164 #define MRR_BIOS 0
10165 #define MRR_APM 1
10166diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
10167index 2dbe4a7..ce1db00 100644
10168--- a/arch/x86/include/asm/rwsem.h
10169+++ b/arch/x86/include/asm/rwsem.h
10170@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
10171 {
10172 asm volatile("# beginning down_read\n\t"
10173 LOCK_PREFIX _ASM_INC "(%1)\n\t"
10174+
10175+#ifdef CONFIG_PAX_REFCOUNT
10176+ "jno 0f\n"
10177+ LOCK_PREFIX _ASM_DEC "(%1)\n"
10178+ "int $4\n0:\n"
10179+ _ASM_EXTABLE(0b, 0b)
10180+#endif
10181+
10182 /* adds 0x00000001 */
10183 " jns 1f\n"
10184 " call call_rwsem_down_read_failed\n"
10185@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
10186 "1:\n\t"
10187 " mov %1,%2\n\t"
10188 " add %3,%2\n\t"
10189+
10190+#ifdef CONFIG_PAX_REFCOUNT
10191+ "jno 0f\n"
10192+ "sub %3,%2\n"
10193+ "int $4\n0:\n"
10194+ _ASM_EXTABLE(0b, 0b)
10195+#endif
10196+
10197 " jle 2f\n\t"
10198 LOCK_PREFIX " cmpxchg %2,%0\n\t"
10199 " jnz 1b\n\t"
10200@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
10201 long tmp;
10202 asm volatile("# beginning down_write\n\t"
10203 LOCK_PREFIX " xadd %1,(%2)\n\t"
10204+
10205+#ifdef CONFIG_PAX_REFCOUNT
10206+ "jno 0f\n"
10207+ "mov %1,(%2)\n"
10208+ "int $4\n0:\n"
10209+ _ASM_EXTABLE(0b, 0b)
10210+#endif
10211+
10212 /* adds 0xffff0001, returns the old value */
10213 " test %1,%1\n\t"
10214 /* was the count 0 before? */
10215@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
10216 long tmp;
10217 asm volatile("# beginning __up_read\n\t"
10218 LOCK_PREFIX " xadd %1,(%2)\n\t"
10219+
10220+#ifdef CONFIG_PAX_REFCOUNT
10221+ "jno 0f\n"
10222+ "mov %1,(%2)\n"
10223+ "int $4\n0:\n"
10224+ _ASM_EXTABLE(0b, 0b)
10225+#endif
10226+
10227 /* subtracts 1, returns the old value */
10228 " jns 1f\n\t"
10229 " call call_rwsem_wake\n" /* expects old value in %edx */
10230@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
10231 long tmp;
10232 asm volatile("# beginning __up_write\n\t"
10233 LOCK_PREFIX " xadd %1,(%2)\n\t"
10234+
10235+#ifdef CONFIG_PAX_REFCOUNT
10236+ "jno 0f\n"
10237+ "mov %1,(%2)\n"
10238+ "int $4\n0:\n"
10239+ _ASM_EXTABLE(0b, 0b)
10240+#endif
10241+
10242 /* subtracts 0xffff0001, returns the old value */
10243 " jns 1f\n\t"
10244 " call call_rwsem_wake\n" /* expects old value in %edx */
10245@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10246 {
10247 asm volatile("# beginning __downgrade_write\n\t"
10248 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10249+
10250+#ifdef CONFIG_PAX_REFCOUNT
10251+ "jno 0f\n"
10252+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10253+ "int $4\n0:\n"
10254+ _ASM_EXTABLE(0b, 0b)
10255+#endif
10256+
10257 /*
10258 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10259 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10260@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10261 */
10262 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10263 {
10264- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10265+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10266+
10267+#ifdef CONFIG_PAX_REFCOUNT
10268+ "jno 0f\n"
10269+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
10270+ "int $4\n0:\n"
10271+ _ASM_EXTABLE(0b, 0b)
10272+#endif
10273+
10274 : "+m" (sem->count)
10275 : "er" (delta));
10276 }
10277@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10278 */
10279 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
10280 {
10281- return delta + xadd(&sem->count, delta);
10282+ return delta + xadd_check_overflow(&sem->count, delta);
10283 }
10284
10285 #endif /* __KERNEL__ */
10286diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
10287index 5e64171..f58957e 100644
10288--- a/arch/x86/include/asm/segment.h
10289+++ b/arch/x86/include/asm/segment.h
10290@@ -64,10 +64,15 @@
10291 * 26 - ESPFIX small SS
10292 * 27 - per-cpu [ offset to per-cpu data area ]
10293 * 28 - stack_canary-20 [ for stack protector ]
10294- * 29 - unused
10295- * 30 - unused
10296+ * 29 - PCI BIOS CS
10297+ * 30 - PCI BIOS DS
10298 * 31 - TSS for double fault handler
10299 */
10300+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
10301+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
10302+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
10303+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
10304+
10305 #define GDT_ENTRY_TLS_MIN 6
10306 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
10307
10308@@ -79,6 +84,8 @@
10309
10310 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
10311
10312+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10313+
10314 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
10315
10316 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
10317@@ -104,6 +111,12 @@
10318 #define __KERNEL_STACK_CANARY 0
10319 #endif
10320
10321+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
10322+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10323+
10324+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
10325+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10326+
10327 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10328
10329 /*
10330@@ -141,7 +154,7 @@
10331 */
10332
10333 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10334-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10335+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10336
10337
10338 #else
10339@@ -165,6 +178,8 @@
10340 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
10341 #define __USER32_DS __USER_DS
10342
10343+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10344+
10345 #define GDT_ENTRY_TSS 8 /* needs two entries */
10346 #define GDT_ENTRY_LDT 10 /* needs two entries */
10347 #define GDT_ENTRY_TLS_MIN 12
10348@@ -185,6 +200,7 @@
10349 #endif
10350
10351 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
10352+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
10353 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
10354 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
10355 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
10356diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
10357index 73b11bc..d4a3b63 100644
10358--- a/arch/x86/include/asm/smp.h
10359+++ b/arch/x86/include/asm/smp.h
10360@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10361 /* cpus sharing the last level cache: */
10362 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
10363 DECLARE_PER_CPU(u16, cpu_llc_id);
10364-DECLARE_PER_CPU(int, cpu_number);
10365+DECLARE_PER_CPU(unsigned int, cpu_number);
10366
10367 static inline struct cpumask *cpu_sibling_mask(int cpu)
10368 {
10369@@ -77,7 +77,7 @@ struct smp_ops {
10370
10371 void (*send_call_func_ipi)(const struct cpumask *mask);
10372 void (*send_call_func_single_ipi)(int cpu);
10373-};
10374+} __no_const;
10375
10376 /* Globals due to paravirt */
10377 extern void set_cpu_sibling_map(int cpu);
10378@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
10379 extern int safe_smp_processor_id(void);
10380
10381 #elif defined(CONFIG_X86_64_SMP)
10382-#define raw_smp_processor_id() (percpu_read(cpu_number))
10383-
10384-#define stack_smp_processor_id() \
10385-({ \
10386- struct thread_info *ti; \
10387- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10388- ti->cpu; \
10389-})
10390+#define raw_smp_processor_id() (percpu_read(cpu_number))
10391+#define stack_smp_processor_id() raw_smp_processor_id()
10392 #define safe_smp_processor_id() smp_processor_id()
10393
10394 #endif
10395diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
10396index 972c260..43ab1fd 100644
10397--- a/arch/x86/include/asm/spinlock.h
10398+++ b/arch/x86/include/asm/spinlock.h
10399@@ -188,6 +188,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
10400 static inline void arch_read_lock(arch_rwlock_t *rw)
10401 {
10402 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
10403+
10404+#ifdef CONFIG_PAX_REFCOUNT
10405+ "jno 0f\n"
10406+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
10407+ "int $4\n0:\n"
10408+ _ASM_EXTABLE(0b, 0b)
10409+#endif
10410+
10411 "jns 1f\n"
10412 "call __read_lock_failed\n\t"
10413 "1:\n"
10414@@ -197,6 +205,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
10415 static inline void arch_write_lock(arch_rwlock_t *rw)
10416 {
10417 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
10418+
10419+#ifdef CONFIG_PAX_REFCOUNT
10420+ "jno 0f\n"
10421+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
10422+ "int $4\n0:\n"
10423+ _ASM_EXTABLE(0b, 0b)
10424+#endif
10425+
10426 "jz 1f\n"
10427 "call __write_lock_failed\n\t"
10428 "1:\n"
10429@@ -226,13 +242,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
10430
10431 static inline void arch_read_unlock(arch_rwlock_t *rw)
10432 {
10433- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
10434+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
10435+
10436+#ifdef CONFIG_PAX_REFCOUNT
10437+ "jno 0f\n"
10438+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
10439+ "int $4\n0:\n"
10440+ _ASM_EXTABLE(0b, 0b)
10441+#endif
10442+
10443 :"+m" (rw->lock) : : "memory");
10444 }
10445
10446 static inline void arch_write_unlock(arch_rwlock_t *rw)
10447 {
10448- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
10449+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
10450+
10451+#ifdef CONFIG_PAX_REFCOUNT
10452+ "jno 0f\n"
10453+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
10454+ "int $4\n0:\n"
10455+ _ASM_EXTABLE(0b, 0b)
10456+#endif
10457+
10458 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
10459 }
10460
10461diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
10462index 1575177..cb23f52 100644
10463--- a/arch/x86/include/asm/stackprotector.h
10464+++ b/arch/x86/include/asm/stackprotector.h
10465@@ -48,7 +48,7 @@
10466 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10467 */
10468 #define GDT_STACK_CANARY_INIT \
10469- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10470+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10471
10472 /*
10473 * Initialize the stackprotector canary value.
10474@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
10475
10476 static inline void load_stack_canary_segment(void)
10477 {
10478-#ifdef CONFIG_X86_32
10479+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10480 asm volatile ("mov %0, %%gs" : : "r" (0));
10481 #endif
10482 }
10483diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
10484index 70bbe39..4ae2bd4 100644
10485--- a/arch/x86/include/asm/stacktrace.h
10486+++ b/arch/x86/include/asm/stacktrace.h
10487@@ -11,28 +11,20 @@
10488
10489 extern int kstack_depth_to_print;
10490
10491-struct thread_info;
10492+struct task_struct;
10493 struct stacktrace_ops;
10494
10495-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
10496- unsigned long *stack,
10497- unsigned long bp,
10498- const struct stacktrace_ops *ops,
10499- void *data,
10500- unsigned long *end,
10501- int *graph);
10502+typedef unsigned long walk_stack_t(struct task_struct *task,
10503+ void *stack_start,
10504+ unsigned long *stack,
10505+ unsigned long bp,
10506+ const struct stacktrace_ops *ops,
10507+ void *data,
10508+ unsigned long *end,
10509+ int *graph);
10510
10511-extern unsigned long
10512-print_context_stack(struct thread_info *tinfo,
10513- unsigned long *stack, unsigned long bp,
10514- const struct stacktrace_ops *ops, void *data,
10515- unsigned long *end, int *graph);
10516-
10517-extern unsigned long
10518-print_context_stack_bp(struct thread_info *tinfo,
10519- unsigned long *stack, unsigned long bp,
10520- const struct stacktrace_ops *ops, void *data,
10521- unsigned long *end, int *graph);
10522+extern walk_stack_t print_context_stack;
10523+extern walk_stack_t print_context_stack_bp;
10524
10525 /* Generic stack tracer with callbacks */
10526
10527@@ -40,7 +32,7 @@ struct stacktrace_ops {
10528 void (*address)(void *data, unsigned long address, int reliable);
10529 /* On negative return stop dumping */
10530 int (*stack)(void *data, char *name);
10531- walk_stack_t walk_stack;
10532+ walk_stack_t *walk_stack;
10533 };
10534
10535 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
10536diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
10537index cb23852..2dde194 100644
10538--- a/arch/x86/include/asm/sys_ia32.h
10539+++ b/arch/x86/include/asm/sys_ia32.h
10540@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
10541 compat_sigset_t __user *, unsigned int);
10542 asmlinkage long sys32_alarm(unsigned int);
10543
10544-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
10545+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
10546 asmlinkage long sys32_sysfs(int, u32, u32);
10547
10548 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
10549diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
10550index 2d2f01c..f985723 100644
10551--- a/arch/x86/include/asm/system.h
10552+++ b/arch/x86/include/asm/system.h
10553@@ -129,7 +129,7 @@ do { \
10554 "call __switch_to\n\t" \
10555 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10556 __switch_canary \
10557- "movq %P[thread_info](%%rsi),%%r8\n\t" \
10558+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10559 "movq %%rax,%%rdi\n\t" \
10560 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10561 "jnz ret_from_fork\n\t" \
10562@@ -140,7 +140,7 @@ do { \
10563 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10564 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10565 [_tif_fork] "i" (_TIF_FORK), \
10566- [thread_info] "i" (offsetof(struct task_struct, stack)), \
10567+ [thread_info] "m" (current_tinfo), \
10568 [current_task] "m" (current_task) \
10569 __switch_canary_iparam \
10570 : "memory", "cc" __EXTRA_CLOBBER)
10571@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
10572 {
10573 unsigned long __limit;
10574 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10575- return __limit + 1;
10576+ return __limit;
10577 }
10578
10579 static inline void native_clts(void)
10580@@ -397,13 +397,13 @@ void enable_hlt(void);
10581
10582 void cpu_idle_wait(void);
10583
10584-extern unsigned long arch_align_stack(unsigned long sp);
10585+#define arch_align_stack(x) ((x) & ~0xfUL)
10586 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10587
10588 void default_idle(void);
10589 bool set_pm_idle_to_default(void);
10590
10591-void stop_this_cpu(void *dummy);
10592+void stop_this_cpu(void *dummy) __noreturn;
10593
10594 /*
10595 * Force strict CPU ordering.
10596diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
10597index d7ef849..6af292e 100644
10598--- a/arch/x86/include/asm/thread_info.h
10599+++ b/arch/x86/include/asm/thread_info.h
10600@@ -10,6 +10,7 @@
10601 #include <linux/compiler.h>
10602 #include <asm/page.h>
10603 #include <asm/types.h>
10604+#include <asm/percpu.h>
10605
10606 /*
10607 * low level task data that entry.S needs immediate access to
10608@@ -24,7 +25,6 @@ struct exec_domain;
10609 #include <linux/atomic.h>
10610
10611 struct thread_info {
10612- struct task_struct *task; /* main task structure */
10613 struct exec_domain *exec_domain; /* execution domain */
10614 __u32 flags; /* low level flags */
10615 __u32 status; /* thread synchronous flags */
10616@@ -34,18 +34,12 @@ struct thread_info {
10617 mm_segment_t addr_limit;
10618 struct restart_block restart_block;
10619 void __user *sysenter_return;
10620-#ifdef CONFIG_X86_32
10621- unsigned long previous_esp; /* ESP of the previous stack in
10622- case of nested (IRQ) stacks
10623- */
10624- __u8 supervisor_stack[0];
10625-#endif
10626+ unsigned long lowest_stack;
10627 int uaccess_err;
10628 };
10629
10630-#define INIT_THREAD_INFO(tsk) \
10631+#define INIT_THREAD_INFO \
10632 { \
10633- .task = &tsk, \
10634 .exec_domain = &default_exec_domain, \
10635 .flags = 0, \
10636 .cpu = 0, \
10637@@ -56,7 +50,7 @@ struct thread_info {
10638 }, \
10639 }
10640
10641-#define init_thread_info (init_thread_union.thread_info)
10642+#define init_thread_info (init_thread_union.stack)
10643 #define init_stack (init_thread_union.stack)
10644
10645 #else /* !__ASSEMBLY__ */
10646@@ -170,45 +164,40 @@ struct thread_info {
10647 ret; \
10648 })
10649
10650-#ifdef CONFIG_X86_32
10651-
10652-#define STACK_WARN (THREAD_SIZE/8)
10653-/*
10654- * macros/functions for gaining access to the thread information structure
10655- *
10656- * preempt_count needs to be 1 initially, until the scheduler is functional.
10657- */
10658-#ifndef __ASSEMBLY__
10659-
10660-
10661-/* how to get the current stack pointer from C */
10662-register unsigned long current_stack_pointer asm("esp") __used;
10663-
10664-/* how to get the thread information struct from C */
10665-static inline struct thread_info *current_thread_info(void)
10666-{
10667- return (struct thread_info *)
10668- (current_stack_pointer & ~(THREAD_SIZE - 1));
10669-}
10670-
10671-#else /* !__ASSEMBLY__ */
10672-
10673+#ifdef __ASSEMBLY__
10674 /* how to get the thread information struct from ASM */
10675 #define GET_THREAD_INFO(reg) \
10676- movl $-THREAD_SIZE, reg; \
10677- andl %esp, reg
10678+ mov PER_CPU_VAR(current_tinfo), reg
10679
10680 /* use this one if reg already contains %esp */
10681-#define GET_THREAD_INFO_WITH_ESP(reg) \
10682- andl $-THREAD_SIZE, reg
10683+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10684+#else
10685+/* how to get the thread information struct from C */
10686+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10687+
10688+static __always_inline struct thread_info *current_thread_info(void)
10689+{
10690+ return percpu_read_stable(current_tinfo);
10691+}
10692+#endif
10693+
10694+#ifdef CONFIG_X86_32
10695+
10696+#define STACK_WARN (THREAD_SIZE/8)
10697+/*
10698+ * macros/functions for gaining access to the thread information structure
10699+ *
10700+ * preempt_count needs to be 1 initially, until the scheduler is functional.
10701+ */
10702+#ifndef __ASSEMBLY__
10703+
10704+/* how to get the current stack pointer from C */
10705+register unsigned long current_stack_pointer asm("esp") __used;
10706
10707 #endif
10708
10709 #else /* X86_32 */
10710
10711-#include <asm/percpu.h>
10712-#define KERNEL_STACK_OFFSET (5*8)
10713-
10714 /*
10715 * macros/functions for gaining access to the thread information structure
10716 * preempt_count needs to be 1 initially, until the scheduler is functional.
10717@@ -216,21 +205,8 @@ static inline struct thread_info *current_thread_info(void)
10718 #ifndef __ASSEMBLY__
10719 DECLARE_PER_CPU(unsigned long, kernel_stack);
10720
10721-static inline struct thread_info *current_thread_info(void)
10722-{
10723- struct thread_info *ti;
10724- ti = (void *)(percpu_read_stable(kernel_stack) +
10725- KERNEL_STACK_OFFSET - THREAD_SIZE);
10726- return ti;
10727-}
10728-
10729-#else /* !__ASSEMBLY__ */
10730-
10731-/* how to get the thread information struct from ASM */
10732-#define GET_THREAD_INFO(reg) \
10733- movq PER_CPU_VAR(kernel_stack),reg ; \
10734- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10735-
10736+/* how to get the current stack pointer from C */
10737+register unsigned long current_stack_pointer asm("rsp") __used;
10738 #endif
10739
10740 #endif /* !X86_32 */
10741@@ -264,5 +240,16 @@ extern void arch_task_cache_init(void);
10742 extern void free_thread_info(struct thread_info *ti);
10743 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10744 #define arch_task_cache_init arch_task_cache_init
10745+
10746+#define __HAVE_THREAD_FUNCTIONS
10747+#define task_thread_info(task) (&(task)->tinfo)
10748+#define task_stack_page(task) ((task)->stack)
10749+#define setup_thread_stack(p, org) do {} while (0)
10750+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10751+
10752+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10753+extern struct task_struct *alloc_task_struct_node(int node);
10754+extern void free_task_struct(struct task_struct *);
10755+
10756 #endif
10757 #endif /* _ASM_X86_THREAD_INFO_H */
10758diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
10759index 36361bf..324f262 100644
10760--- a/arch/x86/include/asm/uaccess.h
10761+++ b/arch/x86/include/asm/uaccess.h
10762@@ -7,12 +7,15 @@
10763 #include <linux/compiler.h>
10764 #include <linux/thread_info.h>
10765 #include <linux/string.h>
10766+#include <linux/sched.h>
10767 #include <asm/asm.h>
10768 #include <asm/page.h>
10769
10770 #define VERIFY_READ 0
10771 #define VERIFY_WRITE 1
10772
10773+extern void check_object_size(const void *ptr, unsigned long n, bool to);
10774+
10775 /*
10776 * The fs value determines whether argument validity checking should be
10777 * performed or not. If get_fs() == USER_DS, checking is performed, with
10778@@ -28,7 +31,12 @@
10779
10780 #define get_ds() (KERNEL_DS)
10781 #define get_fs() (current_thread_info()->addr_limit)
10782+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10783+void __set_fs(mm_segment_t x);
10784+void set_fs(mm_segment_t x);
10785+#else
10786 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10787+#endif
10788
10789 #define segment_eq(a, b) ((a).seg == (b).seg)
10790
10791@@ -76,7 +84,33 @@
10792 * checks that the pointer is in the user space range - after calling
10793 * this function, memory access functions may still return -EFAULT.
10794 */
10795-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10796+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10797+#define access_ok(type, addr, size) \
10798+({ \
10799+ long __size = size; \
10800+ unsigned long __addr = (unsigned long)addr; \
10801+ unsigned long __addr_ao = __addr & PAGE_MASK; \
10802+ unsigned long __end_ao = __addr + __size - 1; \
10803+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10804+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10805+ while(__addr_ao <= __end_ao) { \
10806+ char __c_ao; \
10807+ __addr_ao += PAGE_SIZE; \
10808+ if (__size > PAGE_SIZE) \
10809+ cond_resched(); \
10810+ if (__get_user(__c_ao, (char __user *)__addr)) \
10811+ break; \
10812+ if (type != VERIFY_WRITE) { \
10813+ __addr = __addr_ao; \
10814+ continue; \
10815+ } \
10816+ if (__put_user(__c_ao, (char __user *)__addr)) \
10817+ break; \
10818+ __addr = __addr_ao; \
10819+ } \
10820+ } \
10821+ __ret_ao; \
10822+})
10823
10824 /*
10825 * The exception table consists of pairs of addresses: the first is the
10826@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
10827 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10828 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10829
10830-
10831+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10832+#define __copyuser_seg "gs;"
10833+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10834+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10835+#else
10836+#define __copyuser_seg
10837+#define __COPYUSER_SET_ES
10838+#define __COPYUSER_RESTORE_ES
10839+#endif
10840
10841 #ifdef CONFIG_X86_32
10842 #define __put_user_asm_u64(x, addr, err, errret) \
10843- asm volatile("1: movl %%eax,0(%2)\n" \
10844- "2: movl %%edx,4(%2)\n" \
10845+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10846+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10847 "3:\n" \
10848 ".section .fixup,\"ax\"\n" \
10849 "4: movl %3,%0\n" \
10850@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10851 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10852
10853 #define __put_user_asm_ex_u64(x, addr) \
10854- asm volatile("1: movl %%eax,0(%1)\n" \
10855- "2: movl %%edx,4(%1)\n" \
10856+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10857+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10858 "3:\n" \
10859 _ASM_EXTABLE(1b, 2b - 1b) \
10860 _ASM_EXTABLE(2b, 3b - 2b) \
10861@@ -252,7 +294,7 @@ extern void __put_user_8(void);
10862 __typeof__(*(ptr)) __pu_val; \
10863 __chk_user_ptr(ptr); \
10864 might_fault(); \
10865- __pu_val = x; \
10866+ __pu_val = (x); \
10867 switch (sizeof(*(ptr))) { \
10868 case 1: \
10869 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10870@@ -373,7 +415,7 @@ do { \
10871 } while (0)
10872
10873 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10874- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10875+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10876 "2:\n" \
10877 ".section .fixup,\"ax\"\n" \
10878 "3: mov %3,%0\n" \
10879@@ -381,7 +423,7 @@ do { \
10880 " jmp 2b\n" \
10881 ".previous\n" \
10882 _ASM_EXTABLE(1b, 3b) \
10883- : "=r" (err), ltype(x) \
10884+ : "=r" (err), ltype (x) \
10885 : "m" (__m(addr)), "i" (errret), "0" (err))
10886
10887 #define __get_user_size_ex(x, ptr, size) \
10888@@ -406,7 +448,7 @@ do { \
10889 } while (0)
10890
10891 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10892- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10893+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10894 "2:\n" \
10895 _ASM_EXTABLE(1b, 2b - 1b) \
10896 : ltype(x) : "m" (__m(addr)))
10897@@ -423,13 +465,24 @@ do { \
10898 int __gu_err; \
10899 unsigned long __gu_val; \
10900 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10901- (x) = (__force __typeof__(*(ptr)))__gu_val; \
10902+ (x) = (__typeof__(*(ptr)))__gu_val; \
10903 __gu_err; \
10904 })
10905
10906 /* FIXME: this hack is definitely wrong -AK */
10907 struct __large_struct { unsigned long buf[100]; };
10908-#define __m(x) (*(struct __large_struct __user *)(x))
10909+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10910+#define ____m(x) \
10911+({ \
10912+ unsigned long ____x = (unsigned long)(x); \
10913+ if (____x < PAX_USER_SHADOW_BASE) \
10914+ ____x += PAX_USER_SHADOW_BASE; \
10915+ (void __user *)____x; \
10916+})
10917+#else
10918+#define ____m(x) (x)
10919+#endif
10920+#define __m(x) (*(struct __large_struct __user *)____m(x))
10921
10922 /*
10923 * Tell gcc we read from memory instead of writing: this is because
10924@@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
10925 * aliasing issues.
10926 */
10927 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10928- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10929+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10930 "2:\n" \
10931 ".section .fixup,\"ax\"\n" \
10932 "3: mov %3,%0\n" \
10933@@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
10934 ".previous\n" \
10935 _ASM_EXTABLE(1b, 3b) \
10936 : "=r"(err) \
10937- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10938+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10939
10940 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10941- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10942+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10943 "2:\n" \
10944 _ASM_EXTABLE(1b, 2b - 1b) \
10945 : : ltype(x), "m" (__m(addr)))
10946@@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
10947 * On error, the variable @x is set to zero.
10948 */
10949
10950+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10951+#define __get_user(x, ptr) get_user((x), (ptr))
10952+#else
10953 #define __get_user(x, ptr) \
10954 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10955+#endif
10956
10957 /**
10958 * __put_user: - Write a simple value into user space, with less checking.
10959@@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
10960 * Returns zero on success, or -EFAULT on error.
10961 */
10962
10963+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10964+#define __put_user(x, ptr) put_user((x), (ptr))
10965+#else
10966 #define __put_user(x, ptr) \
10967 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10968+#endif
10969
10970 #define __get_user_unaligned __get_user
10971 #define __put_user_unaligned __put_user
10972@@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
10973 #define get_user_ex(x, ptr) do { \
10974 unsigned long __gue_val; \
10975 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10976- (x) = (__force __typeof__(*(ptr)))__gue_val; \
10977+ (x) = (__typeof__(*(ptr)))__gue_val; \
10978 } while (0)
10979
10980 #ifdef CONFIG_X86_WP_WORKS_OK
10981diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
10982index 566e803..b9521e9 100644
10983--- a/arch/x86/include/asm/uaccess_32.h
10984+++ b/arch/x86/include/asm/uaccess_32.h
10985@@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
10986 static __always_inline unsigned long __must_check
10987 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10988 {
10989+ if ((long)n < 0)
10990+ return n;
10991+
10992 if (__builtin_constant_p(n)) {
10993 unsigned long ret;
10994
10995@@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10996 return ret;
10997 }
10998 }
10999+ if (!__builtin_constant_p(n))
11000+ check_object_size(from, n, true);
11001 return __copy_to_user_ll(to, from, n);
11002 }
11003
11004@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
11005 __copy_to_user(void __user *to, const void *from, unsigned long n)
11006 {
11007 might_fault();
11008+
11009 return __copy_to_user_inatomic(to, from, n);
11010 }
11011
11012 static __always_inline unsigned long
11013 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
11014 {
11015+ if ((long)n < 0)
11016+ return n;
11017+
11018 /* Avoid zeroing the tail if the copy fails..
11019 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
11020 * but as the zeroing behaviour is only significant when n is not
11021@@ -137,6 +146,10 @@ static __always_inline unsigned long
11022 __copy_from_user(void *to, const void __user *from, unsigned long n)
11023 {
11024 might_fault();
11025+
11026+ if ((long)n < 0)
11027+ return n;
11028+
11029 if (__builtin_constant_p(n)) {
11030 unsigned long ret;
11031
11032@@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
11033 return ret;
11034 }
11035 }
11036+ if (!__builtin_constant_p(n))
11037+ check_object_size(to, n, false);
11038 return __copy_from_user_ll(to, from, n);
11039 }
11040
11041@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
11042 const void __user *from, unsigned long n)
11043 {
11044 might_fault();
11045+
11046+ if ((long)n < 0)
11047+ return n;
11048+
11049 if (__builtin_constant_p(n)) {
11050 unsigned long ret;
11051
11052@@ -181,15 +200,19 @@ static __always_inline unsigned long
11053 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
11054 unsigned long n)
11055 {
11056- return __copy_from_user_ll_nocache_nozero(to, from, n);
11057+ if ((long)n < 0)
11058+ return n;
11059+
11060+ return __copy_from_user_ll_nocache_nozero(to, from, n);
11061 }
11062
11063-unsigned long __must_check copy_to_user(void __user *to,
11064- const void *from, unsigned long n);
11065-unsigned long __must_check _copy_from_user(void *to,
11066- const void __user *from,
11067- unsigned long n);
11068-
11069+extern void copy_to_user_overflow(void)
11070+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
11071+ __compiletime_error("copy_to_user() buffer size is not provably correct")
11072+#else
11073+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
11074+#endif
11075+;
11076
11077 extern void copy_from_user_overflow(void)
11078 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
11079@@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
11080 #endif
11081 ;
11082
11083-static inline unsigned long __must_check copy_from_user(void *to,
11084- const void __user *from,
11085- unsigned long n)
11086+/**
11087+ * copy_to_user: - Copy a block of data into user space.
11088+ * @to: Destination address, in user space.
11089+ * @from: Source address, in kernel space.
11090+ * @n: Number of bytes to copy.
11091+ *
11092+ * Context: User context only. This function may sleep.
11093+ *
11094+ * Copy data from kernel space to user space.
11095+ *
11096+ * Returns number of bytes that could not be copied.
11097+ * On success, this will be zero.
11098+ */
11099+static inline unsigned long __must_check
11100+copy_to_user(void __user *to, const void *from, unsigned long n)
11101+{
11102+ int sz = __compiletime_object_size(from);
11103+
11104+ if (unlikely(sz != -1 && sz < n))
11105+ copy_to_user_overflow();
11106+ else if (access_ok(VERIFY_WRITE, to, n))
11107+ n = __copy_to_user(to, from, n);
11108+ return n;
11109+}
11110+
11111+/**
11112+ * copy_from_user: - Copy a block of data from user space.
11113+ * @to: Destination address, in kernel space.
11114+ * @from: Source address, in user space.
11115+ * @n: Number of bytes to copy.
11116+ *
11117+ * Context: User context only. This function may sleep.
11118+ *
11119+ * Copy data from user space to kernel space.
11120+ *
11121+ * Returns number of bytes that could not be copied.
11122+ * On success, this will be zero.
11123+ *
11124+ * If some data could not be copied, this function will pad the copied
11125+ * data to the requested size using zero bytes.
11126+ */
11127+static inline unsigned long __must_check
11128+copy_from_user(void *to, const void __user *from, unsigned long n)
11129 {
11130 int sz = __compiletime_object_size(to);
11131
11132- if (likely(sz == -1 || sz >= n))
11133- n = _copy_from_user(to, from, n);
11134- else
11135+ if (unlikely(sz != -1 && sz < n))
11136 copy_from_user_overflow();
11137-
11138+ else if (access_ok(VERIFY_READ, from, n))
11139+ n = __copy_from_user(to, from, n);
11140+ else if ((long)n > 0) {
11141+ if (!__builtin_constant_p(n))
11142+ check_object_size(to, n, false);
11143+ memset(to, 0, n);
11144+ }
11145 return n;
11146 }
11147
11148diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
11149index 1c66d30..e66922c 100644
11150--- a/arch/x86/include/asm/uaccess_64.h
11151+++ b/arch/x86/include/asm/uaccess_64.h
11152@@ -10,6 +10,9 @@
11153 #include <asm/alternative.h>
11154 #include <asm/cpufeature.h>
11155 #include <asm/page.h>
11156+#include <asm/pgtable.h>
11157+
11158+#define set_fs(x) (current_thread_info()->addr_limit = (x))
11159
11160 /*
11161 * Copy To/From Userspace
11162@@ -17,12 +20,12 @@
11163
11164 /* Handles exceptions in both to and from, but doesn't do access_ok */
11165 __must_check unsigned long
11166-copy_user_generic_string(void *to, const void *from, unsigned len);
11167+copy_user_generic_string(void *to, const void *from, unsigned long len);
11168 __must_check unsigned long
11169-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
11170+copy_user_generic_unrolled(void *to, const void *from, unsigned long len);
11171
11172 static __always_inline __must_check unsigned long
11173-copy_user_generic(void *to, const void *from, unsigned len)
11174+copy_user_generic(void *to, const void *from, unsigned long len)
11175 {
11176 unsigned ret;
11177
11178@@ -32,142 +35,226 @@ copy_user_generic(void *to, const void *from, unsigned len)
11179 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
11180 "=d" (len)),
11181 "1" (to), "2" (from), "3" (len)
11182- : "memory", "rcx", "r8", "r9", "r10", "r11");
11183+ : "memory", "rcx", "r8", "r9", "r11");
11184 return ret;
11185 }
11186
11187+static __always_inline __must_check unsigned long
11188+__copy_to_user(void __user *to, const void *from, unsigned long len);
11189+static __always_inline __must_check unsigned long
11190+__copy_from_user(void *to, const void __user *from, unsigned long len);
11191 __must_check unsigned long
11192-_copy_to_user(void __user *to, const void *from, unsigned len);
11193-__must_check unsigned long
11194-_copy_from_user(void *to, const void __user *from, unsigned len);
11195-__must_check unsigned long
11196-copy_in_user(void __user *to, const void __user *from, unsigned len);
11197+copy_in_user(void __user *to, const void __user *from, unsigned long len);
11198
11199 static inline unsigned long __must_check copy_from_user(void *to,
11200 const void __user *from,
11201 unsigned long n)
11202 {
11203- int sz = __compiletime_object_size(to);
11204-
11205 might_fault();
11206- if (likely(sz == -1 || sz >= n))
11207- n = _copy_from_user(to, from, n);
11208-#ifdef CONFIG_DEBUG_VM
11209- else
11210- WARN(1, "Buffer overflow detected!\n");
11211-#endif
11212+
11213+ if (access_ok(VERIFY_READ, from, n))
11214+ n = __copy_from_user(to, from, n);
11215+ else if (n < INT_MAX) {
11216+ if (!__builtin_constant_p(n))
11217+ check_object_size(to, n, false);
11218+ memset(to, 0, n);
11219+ }
11220 return n;
11221 }
11222
11223 static __always_inline __must_check
11224-int copy_to_user(void __user *dst, const void *src, unsigned size)
11225+int copy_to_user(void __user *dst, const void *src, unsigned long size)
11226 {
11227 might_fault();
11228
11229- return _copy_to_user(dst, src, size);
11230+ if (access_ok(VERIFY_WRITE, dst, size))
11231+ size = __copy_to_user(dst, src, size);
11232+ return size;
11233 }
11234
11235 static __always_inline __must_check
11236-int __copy_from_user(void *dst, const void __user *src, unsigned size)
11237+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
11238 {
11239- int ret = 0;
11240+ int sz = __compiletime_object_size(dst);
11241+ unsigned ret = 0;
11242
11243 might_fault();
11244- if (!__builtin_constant_p(size))
11245- return copy_user_generic(dst, (__force void *)src, size);
11246+
11247+ if (size > INT_MAX)
11248+ return size;
11249+
11250+#ifdef CONFIG_PAX_MEMORY_UDEREF
11251+ if (!__access_ok(VERIFY_READ, src, size))
11252+ return size;
11253+#endif
11254+
11255+ if (unlikely(sz != -1 && sz < size)) {
11256+#ifdef CONFIG_DEBUG_VM
11257+ WARN(1, "Buffer overflow detected!\n");
11258+#endif
11259+ return size;
11260+ }
11261+
11262+ if (!__builtin_constant_p(size)) {
11263+ check_object_size(dst, size, false);
11264+
11265+#ifdef CONFIG_PAX_MEMORY_UDEREF
11266+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11267+ src += PAX_USER_SHADOW_BASE;
11268+#endif
11269+
11270+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
11271+ }
11272 switch (size) {
11273- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
11274+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
11275 ret, "b", "b", "=q", 1);
11276 return ret;
11277- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
11278+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
11279 ret, "w", "w", "=r", 2);
11280 return ret;
11281- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
11282+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
11283 ret, "l", "k", "=r", 4);
11284 return ret;
11285- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
11286+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11287 ret, "q", "", "=r", 8);
11288 return ret;
11289 case 10:
11290- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11291+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11292 ret, "q", "", "=r", 10);
11293 if (unlikely(ret))
11294 return ret;
11295 __get_user_asm(*(u16 *)(8 + (char *)dst),
11296- (u16 __user *)(8 + (char __user *)src),
11297+ (const u16 __user *)(8 + (const char __user *)src),
11298 ret, "w", "w", "=r", 2);
11299 return ret;
11300 case 16:
11301- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11302+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11303 ret, "q", "", "=r", 16);
11304 if (unlikely(ret))
11305 return ret;
11306 __get_user_asm(*(u64 *)(8 + (char *)dst),
11307- (u64 __user *)(8 + (char __user *)src),
11308+ (const u64 __user *)(8 + (const char __user *)src),
11309 ret, "q", "", "=r", 8);
11310 return ret;
11311 default:
11312- return copy_user_generic(dst, (__force void *)src, size);
11313+
11314+#ifdef CONFIG_PAX_MEMORY_UDEREF
11315+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11316+ src += PAX_USER_SHADOW_BASE;
11317+#endif
11318+
11319+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
11320 }
11321 }
11322
11323 static __always_inline __must_check
11324-int __copy_to_user(void __user *dst, const void *src, unsigned size)
11325+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
11326 {
11327- int ret = 0;
11328+ int sz = __compiletime_object_size(src);
11329+ unsigned ret = 0;
11330
11331 might_fault();
11332- if (!__builtin_constant_p(size))
11333- return copy_user_generic((__force void *)dst, src, size);
11334+
11335+ if (size > INT_MAX)
11336+ return size;
11337+
11338+#ifdef CONFIG_PAX_MEMORY_UDEREF
11339+ if (!__access_ok(VERIFY_WRITE, dst, size))
11340+ return size;
11341+#endif
11342+
11343+ if (unlikely(sz != -1 && sz < size)) {
11344+#ifdef CONFIG_DEBUG_VM
11345+ WARN(1, "Buffer overflow detected!\n");
11346+#endif
11347+ return size;
11348+ }
11349+
11350+ if (!__builtin_constant_p(size)) {
11351+ check_object_size(src, size, true);
11352+
11353+#ifdef CONFIG_PAX_MEMORY_UDEREF
11354+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11355+ dst += PAX_USER_SHADOW_BASE;
11356+#endif
11357+
11358+ return copy_user_generic((__force_kernel void *)dst, src, size);
11359+ }
11360 switch (size) {
11361- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
11362+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
11363 ret, "b", "b", "iq", 1);
11364 return ret;
11365- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
11366+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
11367 ret, "w", "w", "ir", 2);
11368 return ret;
11369- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
11370+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
11371 ret, "l", "k", "ir", 4);
11372 return ret;
11373- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
11374+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11375 ret, "q", "", "er", 8);
11376 return ret;
11377 case 10:
11378- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11379+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11380 ret, "q", "", "er", 10);
11381 if (unlikely(ret))
11382 return ret;
11383 asm("":::"memory");
11384- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
11385+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
11386 ret, "w", "w", "ir", 2);
11387 return ret;
11388 case 16:
11389- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11390+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11391 ret, "q", "", "er", 16);
11392 if (unlikely(ret))
11393 return ret;
11394 asm("":::"memory");
11395- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
11396+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
11397 ret, "q", "", "er", 8);
11398 return ret;
11399 default:
11400- return copy_user_generic((__force void *)dst, src, size);
11401+
11402+#ifdef CONFIG_PAX_MEMORY_UDEREF
11403+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11404+ dst += PAX_USER_SHADOW_BASE;
11405+#endif
11406+
11407+ return copy_user_generic((__force_kernel void *)dst, src, size);
11408 }
11409 }
11410
11411 static __always_inline __must_check
11412-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11413+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
11414 {
11415- int ret = 0;
11416+ unsigned ret = 0;
11417
11418 might_fault();
11419- if (!__builtin_constant_p(size))
11420- return copy_user_generic((__force void *)dst,
11421- (__force void *)src, size);
11422+
11423+ if (size > INT_MAX)
11424+ return size;
11425+
11426+#ifdef CONFIG_PAX_MEMORY_UDEREF
11427+ if (!__access_ok(VERIFY_READ, src, size))
11428+ return size;
11429+ if (!__access_ok(VERIFY_WRITE, dst, size))
11430+ return size;
11431+#endif
11432+
11433+ if (!__builtin_constant_p(size)) {
11434+
11435+#ifdef CONFIG_PAX_MEMORY_UDEREF
11436+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11437+ src += PAX_USER_SHADOW_BASE;
11438+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11439+ dst += PAX_USER_SHADOW_BASE;
11440+#endif
11441+
11442+ return copy_user_generic((__force_kernel void *)dst,
11443+ (__force_kernel const void *)src, size);
11444+ }
11445 switch (size) {
11446 case 1: {
11447 u8 tmp;
11448- __get_user_asm(tmp, (u8 __user *)src,
11449+ __get_user_asm(tmp, (const u8 __user *)src,
11450 ret, "b", "b", "=q", 1);
11451 if (likely(!ret))
11452 __put_user_asm(tmp, (u8 __user *)dst,
11453@@ -176,7 +263,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11454 }
11455 case 2: {
11456 u16 tmp;
11457- __get_user_asm(tmp, (u16 __user *)src,
11458+ __get_user_asm(tmp, (const u16 __user *)src,
11459 ret, "w", "w", "=r", 2);
11460 if (likely(!ret))
11461 __put_user_asm(tmp, (u16 __user *)dst,
11462@@ -186,7 +273,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11463
11464 case 4: {
11465 u32 tmp;
11466- __get_user_asm(tmp, (u32 __user *)src,
11467+ __get_user_asm(tmp, (const u32 __user *)src,
11468 ret, "l", "k", "=r", 4);
11469 if (likely(!ret))
11470 __put_user_asm(tmp, (u32 __user *)dst,
11471@@ -195,7 +282,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11472 }
11473 case 8: {
11474 u64 tmp;
11475- __get_user_asm(tmp, (u64 __user *)src,
11476+ __get_user_asm(tmp, (const u64 __user *)src,
11477 ret, "q", "", "=r", 8);
11478 if (likely(!ret))
11479 __put_user_asm(tmp, (u64 __user *)dst,
11480@@ -203,8 +290,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11481 return ret;
11482 }
11483 default:
11484- return copy_user_generic((__force void *)dst,
11485- (__force void *)src, size);
11486+
11487+#ifdef CONFIG_PAX_MEMORY_UDEREF
11488+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11489+ src += PAX_USER_SHADOW_BASE;
11490+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11491+ dst += PAX_USER_SHADOW_BASE;
11492+#endif
11493+
11494+ return copy_user_generic((__force_kernel void *)dst,
11495+ (__force_kernel const void *)src, size);
11496 }
11497 }
11498
11499@@ -219,35 +314,72 @@ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
11500 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11501
11502 static __must_check __always_inline int
11503-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11504+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
11505 {
11506- return copy_user_generic(dst, (__force const void *)src, size);
11507+ if (size > INT_MAX)
11508+ return size;
11509+
11510+#ifdef CONFIG_PAX_MEMORY_UDEREF
11511+ if (!__access_ok(VERIFY_READ, src, size))
11512+ return size;
11513+
11514+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11515+ src += PAX_USER_SHADOW_BASE;
11516+#endif
11517+
11518+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
11519 }
11520
11521-static __must_check __always_inline int
11522-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11523+static __must_check __always_inline unsigned long
11524+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
11525 {
11526- return copy_user_generic((__force void *)dst, src, size);
11527+ if (size > INT_MAX)
11528+ return size;
11529+
11530+#ifdef CONFIG_PAX_MEMORY_UDEREF
11531+ if (!__access_ok(VERIFY_WRITE, dst, size))
11532+ return size;
11533+
11534+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11535+ dst += PAX_USER_SHADOW_BASE;
11536+#endif
11537+
11538+ return copy_user_generic((__force_kernel void *)dst, src, size);
11539 }
11540
11541-extern long __copy_user_nocache(void *dst, const void __user *src,
11542- unsigned size, int zerorest);
11543+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11544+ unsigned long size, int zerorest);
11545
11546-static inline int
11547-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11548+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
11549 {
11550 might_sleep();
11551+
11552+ if (size > INT_MAX)
11553+ return size;
11554+
11555+#ifdef CONFIG_PAX_MEMORY_UDEREF
11556+ if (!__access_ok(VERIFY_READ, src, size))
11557+ return size;
11558+#endif
11559+
11560 return __copy_user_nocache(dst, src, size, 1);
11561 }
11562
11563-static inline int
11564-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11565- unsigned size)
11566+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11567+ unsigned long size)
11568 {
11569+ if (size > INT_MAX)
11570+ return size;
11571+
11572+#ifdef CONFIG_PAX_MEMORY_UDEREF
11573+ if (!__access_ok(VERIFY_READ, src, size))
11574+ return size;
11575+#endif
11576+
11577 return __copy_user_nocache(dst, src, size, 0);
11578 }
11579
11580-unsigned long
11581-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11582+extern unsigned long
11583+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
11584
11585 #endif /* _ASM_X86_UACCESS_64_H */
11586diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
11587index bb05228..d763d5b 100644
11588--- a/arch/x86/include/asm/vdso.h
11589+++ b/arch/x86/include/asm/vdso.h
11590@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
11591 #define VDSO32_SYMBOL(base, name) \
11592 ({ \
11593 extern const char VDSO32_##name[]; \
11594- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11595+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11596 })
11597 #endif
11598
11599diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
11600index 1971e65..1e3559b 100644
11601--- a/arch/x86/include/asm/x86_init.h
11602+++ b/arch/x86/include/asm/x86_init.h
11603@@ -28,7 +28,7 @@ struct x86_init_mpparse {
11604 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11605 void (*find_smp_config)(void);
11606 void (*get_smp_config)(unsigned int early);
11607-};
11608+} __no_const;
11609
11610 /**
11611 * struct x86_init_resources - platform specific resource related ops
11612@@ -42,7 +42,7 @@ struct x86_init_resources {
11613 void (*probe_roms)(void);
11614 void (*reserve_resources)(void);
11615 char *(*memory_setup)(void);
11616-};
11617+} __no_const;
11618
11619 /**
11620 * struct x86_init_irqs - platform specific interrupt setup
11621@@ -55,7 +55,7 @@ struct x86_init_irqs {
11622 void (*pre_vector_init)(void);
11623 void (*intr_init)(void);
11624 void (*trap_init)(void);
11625-};
11626+} __no_const;
11627
11628 /**
11629 * struct x86_init_oem - oem platform specific customizing functions
11630@@ -65,7 +65,7 @@ struct x86_init_irqs {
11631 struct x86_init_oem {
11632 void (*arch_setup)(void);
11633 void (*banner)(void);
11634-};
11635+} __no_const;
11636
11637 /**
11638 * struct x86_init_mapping - platform specific initial kernel pagetable setup
11639@@ -76,7 +76,7 @@ struct x86_init_oem {
11640 */
11641 struct x86_init_mapping {
11642 void (*pagetable_reserve)(u64 start, u64 end);
11643-};
11644+} __no_const;
11645
11646 /**
11647 * struct x86_init_paging - platform specific paging functions
11648@@ -86,7 +86,7 @@ struct x86_init_mapping {
11649 struct x86_init_paging {
11650 void (*pagetable_setup_start)(pgd_t *base);
11651 void (*pagetable_setup_done)(pgd_t *base);
11652-};
11653+} __no_const;
11654
11655 /**
11656 * struct x86_init_timers - platform specific timer setup
11657@@ -101,7 +101,7 @@ struct x86_init_timers {
11658 void (*tsc_pre_init)(void);
11659 void (*timer_init)(void);
11660 void (*wallclock_init)(void);
11661-};
11662+} __no_const;
11663
11664 /**
11665 * struct x86_init_iommu - platform specific iommu setup
11666@@ -109,7 +109,7 @@ struct x86_init_timers {
11667 */
11668 struct x86_init_iommu {
11669 int (*iommu_init)(void);
11670-};
11671+} __no_const;
11672
11673 /**
11674 * struct x86_init_pci - platform specific pci init functions
11675@@ -123,7 +123,7 @@ struct x86_init_pci {
11676 int (*init)(void);
11677 void (*init_irq)(void);
11678 void (*fixup_irqs)(void);
11679-};
11680+} __no_const;
11681
11682 /**
11683 * struct x86_init_ops - functions for platform specific setup
11684@@ -139,7 +139,7 @@ struct x86_init_ops {
11685 struct x86_init_timers timers;
11686 struct x86_init_iommu iommu;
11687 struct x86_init_pci pci;
11688-};
11689+} __no_const;
11690
11691 /**
11692 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11693@@ -147,7 +147,7 @@ struct x86_init_ops {
11694 */
11695 struct x86_cpuinit_ops {
11696 void (*setup_percpu_clockev)(void);
11697-};
11698+} __no_const;
11699
11700 /**
11701 * struct x86_platform_ops - platform specific runtime functions
11702@@ -169,7 +169,7 @@ struct x86_platform_ops {
11703 void (*nmi_init)(void);
11704 unsigned char (*get_nmi_reason)(void);
11705 int (*i8042_detect)(void);
11706-};
11707+} __no_const;
11708
11709 struct pci_dev;
11710
11711@@ -177,7 +177,7 @@ struct x86_msi_ops {
11712 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
11713 void (*teardown_msi_irq)(unsigned int irq);
11714 void (*teardown_msi_irqs)(struct pci_dev *dev);
11715-};
11716+} __no_const;
11717
11718 extern struct x86_init_ops x86_init;
11719 extern struct x86_cpuinit_ops x86_cpuinit;
11720diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
11721index c6ce245..ffbdab7 100644
11722--- a/arch/x86/include/asm/xsave.h
11723+++ b/arch/x86/include/asm/xsave.h
11724@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11725 {
11726 int err;
11727
11728+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11729+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11730+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11731+#endif
11732+
11733 /*
11734 * Clear the xsave header first, so that reserved fields are
11735 * initialized to zero.
11736@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11737 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
11738 {
11739 int err;
11740- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
11741+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
11742 u32 lmask = mask;
11743 u32 hmask = mask >> 32;
11744
11745+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11746+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11747+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11748+#endif
11749+
11750 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11751 "2:\n"
11752 ".section .fixup,\"ax\"\n"
11753diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
11754index 6a564ac..9b1340c 100644
11755--- a/arch/x86/kernel/acpi/realmode/Makefile
11756+++ b/arch/x86/kernel/acpi/realmode/Makefile
11757@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
11758 $(call cc-option, -fno-stack-protector) \
11759 $(call cc-option, -mpreferred-stack-boundary=2)
11760 KBUILD_CFLAGS += $(call cc-option, -m32)
11761+ifdef CONSTIFY_PLUGIN
11762+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11763+endif
11764 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11765 GCOV_PROFILE := n
11766
11767diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
11768index b4fd836..4358fe3 100644
11769--- a/arch/x86/kernel/acpi/realmode/wakeup.S
11770+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
11771@@ -108,6 +108,9 @@ wakeup_code:
11772 /* Do any other stuff... */
11773
11774 #ifndef CONFIG_64BIT
11775+ /* Recheck NX bit overrides (64bit path does this in trampoline */
11776+ call verify_cpu
11777+
11778 /* This could also be done in C code... */
11779 movl pmode_cr3, %eax
11780 movl %eax, %cr3
11781@@ -131,6 +134,7 @@ wakeup_code:
11782 movl pmode_cr0, %eax
11783 movl %eax, %cr0
11784 jmp pmode_return
11785+# include "../../verify_cpu.S"
11786 #else
11787 pushw $0
11788 pushw trampoline_segment
11789diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
11790index 103b6ab..2004d0a 100644
11791--- a/arch/x86/kernel/acpi/sleep.c
11792+++ b/arch/x86/kernel/acpi/sleep.c
11793@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
11794 header->trampoline_segment = trampoline_address() >> 4;
11795 #ifdef CONFIG_SMP
11796 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11797+
11798+ pax_open_kernel();
11799 early_gdt_descr.address =
11800 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11801+ pax_close_kernel();
11802+
11803 initial_gs = per_cpu_offset(smp_processor_id());
11804 #endif
11805 initial_code = (unsigned long)wakeup_long64;
11806diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
11807index 13ab720..95d5442 100644
11808--- a/arch/x86/kernel/acpi/wakeup_32.S
11809+++ b/arch/x86/kernel/acpi/wakeup_32.S
11810@@ -30,13 +30,11 @@ wakeup_pmode_return:
11811 # and restore the stack ... but you need gdt for this to work
11812 movl saved_context_esp, %esp
11813
11814- movl %cs:saved_magic, %eax
11815- cmpl $0x12345678, %eax
11816+ cmpl $0x12345678, saved_magic
11817 jne bogus_magic
11818
11819 # jump to place where we left off
11820- movl saved_eip, %eax
11821- jmp *%eax
11822+ jmp *(saved_eip)
11823
11824 bogus_magic:
11825 jmp bogus_magic
11826diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
11827index 1f84794..e23f862 100644
11828--- a/arch/x86/kernel/alternative.c
11829+++ b/arch/x86/kernel/alternative.c
11830@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
11831 */
11832 for (a = start; a < end; a++) {
11833 instr = (u8 *)&a->instr_offset + a->instr_offset;
11834+
11835+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11836+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11837+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
11838+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11839+#endif
11840+
11841 replacement = (u8 *)&a->repl_offset + a->repl_offset;
11842 BUG_ON(a->replacementlen > a->instrlen);
11843 BUG_ON(a->instrlen > sizeof(insnbuf));
11844@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
11845 for (poff = start; poff < end; poff++) {
11846 u8 *ptr = (u8 *)poff + *poff;
11847
11848+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11849+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11850+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11851+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11852+#endif
11853+
11854 if (!*poff || ptr < text || ptr >= text_end)
11855 continue;
11856 /* turn DS segment override prefix into lock prefix */
11857- if (*ptr == 0x3e)
11858+ if (*ktla_ktva(ptr) == 0x3e)
11859 text_poke(ptr, ((unsigned char []){0xf0}), 1);
11860 };
11861 mutex_unlock(&text_mutex);
11862@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
11863 for (poff = start; poff < end; poff++) {
11864 u8 *ptr = (u8 *)poff + *poff;
11865
11866+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11867+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11868+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11869+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11870+#endif
11871+
11872 if (!*poff || ptr < text || ptr >= text_end)
11873 continue;
11874 /* turn lock prefix into DS segment override prefix */
11875- if (*ptr == 0xf0)
11876+ if (*ktla_ktva(ptr) == 0xf0)
11877 text_poke(ptr, ((unsigned char []){0x3E}), 1);
11878 };
11879 mutex_unlock(&text_mutex);
11880@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
11881
11882 BUG_ON(p->len > MAX_PATCH_LEN);
11883 /* prep the buffer with the original instructions */
11884- memcpy(insnbuf, p->instr, p->len);
11885+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11886 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11887 (unsigned long)p->instr, p->len);
11888
11889@@ -568,7 +587,7 @@ void __init alternative_instructions(void)
11890 if (smp_alt_once)
11891 free_init_pages("SMP alternatives",
11892 (unsigned long)__smp_locks,
11893- (unsigned long)__smp_locks_end);
11894+ PAGE_ALIGN((unsigned long)__smp_locks_end));
11895
11896 restart_nmi();
11897 }
11898@@ -585,13 +604,17 @@ void __init alternative_instructions(void)
11899 * instructions. And on the local CPU you need to be protected again NMI or MCE
11900 * handlers seeing an inconsistent instruction while you patch.
11901 */
11902-void *__init_or_module text_poke_early(void *addr, const void *opcode,
11903+void *__kprobes text_poke_early(void *addr, const void *opcode,
11904 size_t len)
11905 {
11906 unsigned long flags;
11907 local_irq_save(flags);
11908- memcpy(addr, opcode, len);
11909+
11910+ pax_open_kernel();
11911+ memcpy(ktla_ktva(addr), opcode, len);
11912 sync_core();
11913+ pax_close_kernel();
11914+
11915 local_irq_restore(flags);
11916 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11917 that causes hangs on some VIA CPUs. */
11918@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
11919 */
11920 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11921 {
11922- unsigned long flags;
11923- char *vaddr;
11924+ unsigned char *vaddr = ktla_ktva(addr);
11925 struct page *pages[2];
11926- int i;
11927+ size_t i;
11928
11929 if (!core_kernel_text((unsigned long)addr)) {
11930- pages[0] = vmalloc_to_page(addr);
11931- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11932+ pages[0] = vmalloc_to_page(vaddr);
11933+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11934 } else {
11935- pages[0] = virt_to_page(addr);
11936+ pages[0] = virt_to_page(vaddr);
11937 WARN_ON(!PageReserved(pages[0]));
11938- pages[1] = virt_to_page(addr + PAGE_SIZE);
11939+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11940 }
11941 BUG_ON(!pages[0]);
11942- local_irq_save(flags);
11943- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11944- if (pages[1])
11945- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11946- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11947- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11948- clear_fixmap(FIX_TEXT_POKE0);
11949- if (pages[1])
11950- clear_fixmap(FIX_TEXT_POKE1);
11951- local_flush_tlb();
11952- sync_core();
11953- /* Could also do a CLFLUSH here to speed up CPU recovery; but
11954- that causes hangs on some VIA CPUs. */
11955+ text_poke_early(addr, opcode, len);
11956 for (i = 0; i < len; i++)
11957- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11958- local_irq_restore(flags);
11959+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11960 return addr;
11961 }
11962
11963diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
11964index f98d84c..e402a69 100644
11965--- a/arch/x86/kernel/apic/apic.c
11966+++ b/arch/x86/kernel/apic/apic.c
11967@@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
11968 /*
11969 * Debug level, exported for io_apic.c
11970 */
11971-unsigned int apic_verbosity;
11972+int apic_verbosity;
11973
11974 int pic_mode;
11975
11976@@ -1853,7 +1853,7 @@ void smp_error_interrupt(struct pt_regs *regs)
11977 apic_write(APIC_ESR, 0);
11978 v1 = apic_read(APIC_ESR);
11979 ack_APIC_irq();
11980- atomic_inc(&irq_err_count);
11981+ atomic_inc_unchecked(&irq_err_count);
11982
11983 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
11984 smp_processor_id(), v0 , v1);
11985diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
11986index 6d939d7..0697fcc 100644
11987--- a/arch/x86/kernel/apic/io_apic.c
11988+++ b/arch/x86/kernel/apic/io_apic.c
11989@@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
11990 }
11991 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11992
11993-void lock_vector_lock(void)
11994+void lock_vector_lock(void) __acquires(vector_lock)
11995 {
11996 /* Used to the online set of cpus does not change
11997 * during assign_irq_vector.
11998@@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
11999 raw_spin_lock(&vector_lock);
12000 }
12001
12002-void unlock_vector_lock(void)
12003+void unlock_vector_lock(void) __releases(vector_lock)
12004 {
12005 raw_spin_unlock(&vector_lock);
12006 }
12007@@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
12008 ack_APIC_irq();
12009 }
12010
12011-atomic_t irq_mis_count;
12012+atomic_unchecked_t irq_mis_count;
12013
12014 static void ack_apic_level(struct irq_data *data)
12015 {
12016@@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
12017 * at the cpu.
12018 */
12019 if (!(v & (1 << (i & 0x1f)))) {
12020- atomic_inc(&irq_mis_count);
12021+ atomic_inc_unchecked(&irq_mis_count);
12022
12023 eoi_ioapic_irq(irq, cfg);
12024 }
12025diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
12026index a46bd38..6b906d7 100644
12027--- a/arch/x86/kernel/apm_32.c
12028+++ b/arch/x86/kernel/apm_32.c
12029@@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
12030 * This is for buggy BIOS's that refer to (real mode) segment 0x40
12031 * even though they are called in protected mode.
12032 */
12033-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
12034+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
12035 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
12036
12037 static const char driver_version[] = "1.16ac"; /* no spaces */
12038@@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
12039 BUG_ON(cpu != 0);
12040 gdt = get_cpu_gdt_table(cpu);
12041 save_desc_40 = gdt[0x40 / 8];
12042+
12043+ pax_open_kernel();
12044 gdt[0x40 / 8] = bad_bios_desc;
12045+ pax_close_kernel();
12046
12047 apm_irq_save(flags);
12048 APM_DO_SAVE_SEGS;
12049@@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
12050 &call->esi);
12051 APM_DO_RESTORE_SEGS;
12052 apm_irq_restore(flags);
12053+
12054+ pax_open_kernel();
12055 gdt[0x40 / 8] = save_desc_40;
12056+ pax_close_kernel();
12057+
12058 put_cpu();
12059
12060 return call->eax & 0xff;
12061@@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
12062 BUG_ON(cpu != 0);
12063 gdt = get_cpu_gdt_table(cpu);
12064 save_desc_40 = gdt[0x40 / 8];
12065+
12066+ pax_open_kernel();
12067 gdt[0x40 / 8] = bad_bios_desc;
12068+ pax_close_kernel();
12069
12070 apm_irq_save(flags);
12071 APM_DO_SAVE_SEGS;
12072@@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
12073 &call->eax);
12074 APM_DO_RESTORE_SEGS;
12075 apm_irq_restore(flags);
12076+
12077+ pax_open_kernel();
12078 gdt[0x40 / 8] = save_desc_40;
12079+ pax_close_kernel();
12080+
12081 put_cpu();
12082 return error;
12083 }
12084@@ -2347,12 +2361,15 @@ static int __init apm_init(void)
12085 * code to that CPU.
12086 */
12087 gdt = get_cpu_gdt_table(0);
12088+
12089+ pax_open_kernel();
12090 set_desc_base(&gdt[APM_CS >> 3],
12091 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
12092 set_desc_base(&gdt[APM_CS_16 >> 3],
12093 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
12094 set_desc_base(&gdt[APM_DS >> 3],
12095 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
12096+ pax_close_kernel();
12097
12098 proc_create("apm", 0, NULL, &apm_file_ops);
12099
12100diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
12101index 4f13faf..87db5d2 100644
12102--- a/arch/x86/kernel/asm-offsets.c
12103+++ b/arch/x86/kernel/asm-offsets.c
12104@@ -33,6 +33,8 @@ void common(void) {
12105 OFFSET(TI_status, thread_info, status);
12106 OFFSET(TI_addr_limit, thread_info, addr_limit);
12107 OFFSET(TI_preempt_count, thread_info, preempt_count);
12108+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
12109+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
12110
12111 BLANK();
12112 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
12113@@ -53,8 +55,26 @@ void common(void) {
12114 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12115 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12116 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
12117+
12118+#ifdef CONFIG_PAX_KERNEXEC
12119+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12120 #endif
12121
12122+#ifdef CONFIG_PAX_MEMORY_UDEREF
12123+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
12124+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
12125+#ifdef CONFIG_X86_64
12126+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
12127+#endif
12128+#endif
12129+
12130+#endif
12131+
12132+ BLANK();
12133+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12134+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
12135+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12136+
12137 #ifdef CONFIG_XEN
12138 BLANK();
12139 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
12140diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
12141index e72a119..6e2955d 100644
12142--- a/arch/x86/kernel/asm-offsets_64.c
12143+++ b/arch/x86/kernel/asm-offsets_64.c
12144@@ -69,6 +69,7 @@ int main(void)
12145 BLANK();
12146 #undef ENTRY
12147
12148+ DEFINE(TSS_size, sizeof(struct tss_struct));
12149 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
12150 BLANK();
12151
12152diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
12153index 25f24dc..4094a7f 100644
12154--- a/arch/x86/kernel/cpu/Makefile
12155+++ b/arch/x86/kernel/cpu/Makefile
12156@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
12157 CFLAGS_REMOVE_perf_event.o = -pg
12158 endif
12159
12160-# Make sure load_percpu_segment has no stackprotector
12161-nostackp := $(call cc-option, -fno-stack-protector)
12162-CFLAGS_common.o := $(nostackp)
12163-
12164 obj-y := intel_cacheinfo.o scattered.o topology.o
12165 obj-y += proc.o capflags.o powerflags.o common.o
12166 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
12167diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
12168index 0bab2b1..d0a1bf8 100644
12169--- a/arch/x86/kernel/cpu/amd.c
12170+++ b/arch/x86/kernel/cpu/amd.c
12171@@ -664,7 +664,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
12172 unsigned int size)
12173 {
12174 /* AMD errata T13 (order #21922) */
12175- if ((c->x86 == 6)) {
12176+ if (c->x86 == 6) {
12177 /* Duron Rev A0 */
12178 if (c->x86_model == 3 && c->x86_mask == 0)
12179 size = 64;
12180diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
12181index aa003b1..47ea638 100644
12182--- a/arch/x86/kernel/cpu/common.c
12183+++ b/arch/x86/kernel/cpu/common.c
12184@@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
12185
12186 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
12187
12188-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
12189-#ifdef CONFIG_X86_64
12190- /*
12191- * We need valid kernel segments for data and code in long mode too
12192- * IRET will check the segment types kkeil 2000/10/28
12193- * Also sysret mandates a special GDT layout
12194- *
12195- * TLS descriptors are currently at a different place compared to i386.
12196- * Hopefully nobody expects them at a fixed place (Wine?)
12197- */
12198- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
12199- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
12200- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
12201- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
12202- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
12203- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
12204-#else
12205- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
12206- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12207- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
12208- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
12209- /*
12210- * Segments used for calling PnP BIOS have byte granularity.
12211- * They code segments and data segments have fixed 64k limits,
12212- * the transfer segment sizes are set at run time.
12213- */
12214- /* 32-bit code */
12215- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12216- /* 16-bit code */
12217- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12218- /* 16-bit data */
12219- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
12220- /* 16-bit data */
12221- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
12222- /* 16-bit data */
12223- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
12224- /*
12225- * The APM segments have byte granularity and their bases
12226- * are set at run time. All have 64k limits.
12227- */
12228- /* 32-bit code */
12229- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12230- /* 16-bit code */
12231- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12232- /* data */
12233- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
12234-
12235- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12236- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12237- GDT_STACK_CANARY_INIT
12238-#endif
12239-} };
12240-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12241-
12242 static int __init x86_xsave_setup(char *s)
12243 {
12244 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12245@@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
12246 {
12247 struct desc_ptr gdt_descr;
12248
12249- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12250+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12251 gdt_descr.size = GDT_SIZE - 1;
12252 load_gdt(&gdt_descr);
12253 /* Reload the per-cpu base */
12254@@ -844,6 +790,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
12255 /* Filter out anything that depends on CPUID levels we don't have */
12256 filter_cpuid_features(c, true);
12257
12258+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
12259+ setup_clear_cpu_cap(X86_FEATURE_SEP);
12260+#endif
12261+
12262 /* If the model name is still unset, do table lookup. */
12263 if (!c->x86_model_id[0]) {
12264 const char *p;
12265@@ -1024,6 +974,9 @@ static __init int setup_disablecpuid(char *arg)
12266 }
12267 __setup("clearcpuid=", setup_disablecpuid);
12268
12269+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12270+EXPORT_PER_CPU_SYMBOL(current_tinfo);
12271+
12272 #ifdef CONFIG_X86_64
12273 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12274
12275@@ -1039,7 +992,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
12276 EXPORT_PER_CPU_SYMBOL(current_task);
12277
12278 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12279- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12280+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12281 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12282
12283 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12284@@ -1104,7 +1057,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
12285 {
12286 memset(regs, 0, sizeof(struct pt_regs));
12287 regs->fs = __KERNEL_PERCPU;
12288- regs->gs = __KERNEL_STACK_CANARY;
12289+ savesegment(gs, regs->gs);
12290
12291 return regs;
12292 }
12293@@ -1159,7 +1112,7 @@ void __cpuinit cpu_init(void)
12294 int i;
12295
12296 cpu = stack_smp_processor_id();
12297- t = &per_cpu(init_tss, cpu);
12298+ t = init_tss + cpu;
12299 oist = &per_cpu(orig_ist, cpu);
12300
12301 #ifdef CONFIG_NUMA
12302@@ -1185,7 +1138,7 @@ void __cpuinit cpu_init(void)
12303 switch_to_new_gdt(cpu);
12304 loadsegment(fs, 0);
12305
12306- load_idt((const struct desc_ptr *)&idt_descr);
12307+ load_idt(&idt_descr);
12308
12309 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12310 syscall_init();
12311@@ -1194,7 +1147,6 @@ void __cpuinit cpu_init(void)
12312 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12313 barrier();
12314
12315- x86_configure_nx();
12316 if (cpu != 0)
12317 enable_x2apic();
12318
12319@@ -1248,7 +1200,7 @@ void __cpuinit cpu_init(void)
12320 {
12321 int cpu = smp_processor_id();
12322 struct task_struct *curr = current;
12323- struct tss_struct *t = &per_cpu(init_tss, cpu);
12324+ struct tss_struct *t = init_tss + cpu;
12325 struct thread_struct *thread = &curr->thread;
12326
12327 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12328diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
12329index 5231312..a78a987 100644
12330--- a/arch/x86/kernel/cpu/intel.c
12331+++ b/arch/x86/kernel/cpu/intel.c
12332@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
12333 * Update the IDT descriptor and reload the IDT so that
12334 * it uses the read-only mapped virtual address.
12335 */
12336- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12337+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12338 load_idt(&idt_descr);
12339 }
12340 #endif
12341diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
12342index 2af127d..8ff7ac0 100644
12343--- a/arch/x86/kernel/cpu/mcheck/mce.c
12344+++ b/arch/x86/kernel/cpu/mcheck/mce.c
12345@@ -42,6 +42,7 @@
12346 #include <asm/processor.h>
12347 #include <asm/mce.h>
12348 #include <asm/msr.h>
12349+#include <asm/local.h>
12350
12351 #include "mce-internal.h"
12352
12353@@ -202,7 +203,7 @@ static void print_mce(struct mce *m)
12354 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12355 m->cs, m->ip);
12356
12357- if (m->cs == __KERNEL_CS)
12358+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12359 print_symbol("{%s}", m->ip);
12360 pr_cont("\n");
12361 }
12362@@ -235,10 +236,10 @@ static void print_mce(struct mce *m)
12363
12364 #define PANIC_TIMEOUT 5 /* 5 seconds */
12365
12366-static atomic_t mce_paniced;
12367+static atomic_unchecked_t mce_paniced;
12368
12369 static int fake_panic;
12370-static atomic_t mce_fake_paniced;
12371+static atomic_unchecked_t mce_fake_paniced;
12372
12373 /* Panic in progress. Enable interrupts and wait for final IPI */
12374 static void wait_for_panic(void)
12375@@ -262,7 +263,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12376 /*
12377 * Make sure only one CPU runs in machine check panic
12378 */
12379- if (atomic_inc_return(&mce_paniced) > 1)
12380+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12381 wait_for_panic();
12382 barrier();
12383
12384@@ -270,7 +271,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12385 console_verbose();
12386 } else {
12387 /* Don't log too much for fake panic */
12388- if (atomic_inc_return(&mce_fake_paniced) > 1)
12389+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12390 return;
12391 }
12392 /* First print corrected ones that are still unlogged */
12393@@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
12394 * might have been modified by someone else.
12395 */
12396 rmb();
12397- if (atomic_read(&mce_paniced))
12398+ if (atomic_read_unchecked(&mce_paniced))
12399 wait_for_panic();
12400 if (!monarch_timeout)
12401 goto out;
12402@@ -1398,7 +1399,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
12403 }
12404
12405 /* Call the installed machine check handler for this CPU setup. */
12406-void (*machine_check_vector)(struct pt_regs *, long error_code) =
12407+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
12408 unexpected_machine_check;
12409
12410 /*
12411@@ -1421,7 +1422,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12412 return;
12413 }
12414
12415+ pax_open_kernel();
12416 machine_check_vector = do_machine_check;
12417+ pax_close_kernel();
12418
12419 __mcheck_cpu_init_generic();
12420 __mcheck_cpu_init_vendor(c);
12421@@ -1435,7 +1438,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12422 */
12423
12424 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
12425-static int mce_chrdev_open_count; /* #times opened */
12426+static local_t mce_chrdev_open_count; /* #times opened */
12427 static int mce_chrdev_open_exclu; /* already open exclusive? */
12428
12429 static int mce_chrdev_open(struct inode *inode, struct file *file)
12430@@ -1443,7 +1446,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12431 spin_lock(&mce_chrdev_state_lock);
12432
12433 if (mce_chrdev_open_exclu ||
12434- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
12435+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
12436 spin_unlock(&mce_chrdev_state_lock);
12437
12438 return -EBUSY;
12439@@ -1451,7 +1454,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12440
12441 if (file->f_flags & O_EXCL)
12442 mce_chrdev_open_exclu = 1;
12443- mce_chrdev_open_count++;
12444+ local_inc(&mce_chrdev_open_count);
12445
12446 spin_unlock(&mce_chrdev_state_lock);
12447
12448@@ -1462,7 +1465,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
12449 {
12450 spin_lock(&mce_chrdev_state_lock);
12451
12452- mce_chrdev_open_count--;
12453+ local_dec(&mce_chrdev_open_count);
12454 mce_chrdev_open_exclu = 0;
12455
12456 spin_unlock(&mce_chrdev_state_lock);
12457@@ -2171,7 +2174,7 @@ struct dentry *mce_get_debugfs_dir(void)
12458 static void mce_reset(void)
12459 {
12460 cpu_missing = 0;
12461- atomic_set(&mce_fake_paniced, 0);
12462+ atomic_set_unchecked(&mce_fake_paniced, 0);
12463 atomic_set(&mce_executing, 0);
12464 atomic_set(&mce_callin, 0);
12465 atomic_set(&global_nwo, 0);
12466diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
12467index 5c0e653..0882b0a 100644
12468--- a/arch/x86/kernel/cpu/mcheck/p5.c
12469+++ b/arch/x86/kernel/cpu/mcheck/p5.c
12470@@ -12,6 +12,7 @@
12471 #include <asm/system.h>
12472 #include <asm/mce.h>
12473 #include <asm/msr.h>
12474+#include <asm/pgtable.h>
12475
12476 /* By default disabled */
12477 int mce_p5_enabled __read_mostly;
12478@@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
12479 if (!cpu_has(c, X86_FEATURE_MCE))
12480 return;
12481
12482+ pax_open_kernel();
12483 machine_check_vector = pentium_machine_check;
12484+ pax_close_kernel();
12485 /* Make sure the vector pointer is visible before we enable MCEs: */
12486 wmb();
12487
12488diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
12489index 54060f5..c1a7577 100644
12490--- a/arch/x86/kernel/cpu/mcheck/winchip.c
12491+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
12492@@ -11,6 +11,7 @@
12493 #include <asm/system.h>
12494 #include <asm/mce.h>
12495 #include <asm/msr.h>
12496+#include <asm/pgtable.h>
12497
12498 /* Machine check handler for WinChip C6: */
12499 static void winchip_machine_check(struct pt_regs *regs, long error_code)
12500@@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
12501 {
12502 u32 lo, hi;
12503
12504+ pax_open_kernel();
12505 machine_check_vector = winchip_machine_check;
12506+ pax_close_kernel();
12507 /* Make sure the vector pointer is visible before we enable MCEs: */
12508 wmb();
12509
12510diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
12511index 6b96110..0da73eb 100644
12512--- a/arch/x86/kernel/cpu/mtrr/main.c
12513+++ b/arch/x86/kernel/cpu/mtrr/main.c
12514@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
12515 u64 size_or_mask, size_and_mask;
12516 static bool mtrr_aps_delayed_init;
12517
12518-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12519+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12520
12521 const struct mtrr_ops *mtrr_if;
12522
12523diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
12524index df5e41f..816c719 100644
12525--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
12526+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
12527@@ -25,7 +25,7 @@ struct mtrr_ops {
12528 int (*validate_add_page)(unsigned long base, unsigned long size,
12529 unsigned int type);
12530 int (*have_wrcomb)(void);
12531-};
12532+} __do_const;
12533
12534 extern int generic_get_free_region(unsigned long base, unsigned long size,
12535 int replace_reg);
12536diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
12537index 2bda212..78cc605 100644
12538--- a/arch/x86/kernel/cpu/perf_event.c
12539+++ b/arch/x86/kernel/cpu/perf_event.c
12540@@ -1529,7 +1529,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
12541 break;
12542
12543 perf_callchain_store(entry, frame.return_address);
12544- fp = frame.next_frame;
12545+ fp = (const void __force_user *)frame.next_frame;
12546 }
12547 }
12548
12549diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
12550index 13ad899..f642b9a 100644
12551--- a/arch/x86/kernel/crash.c
12552+++ b/arch/x86/kernel/crash.c
12553@@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
12554 {
12555 #ifdef CONFIG_X86_32
12556 struct pt_regs fixed_regs;
12557-#endif
12558
12559-#ifdef CONFIG_X86_32
12560- if (!user_mode_vm(regs)) {
12561+ if (!user_mode(regs)) {
12562 crash_fixup_ss_esp(&fixed_regs, regs);
12563 regs = &fixed_regs;
12564 }
12565diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
12566index 37250fe..bf2ec74 100644
12567--- a/arch/x86/kernel/doublefault_32.c
12568+++ b/arch/x86/kernel/doublefault_32.c
12569@@ -11,7 +11,7 @@
12570
12571 #define DOUBLEFAULT_STACKSIZE (1024)
12572 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12573-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12574+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12575
12576 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12577
12578@@ -21,7 +21,7 @@ static void doublefault_fn(void)
12579 unsigned long gdt, tss;
12580
12581 store_gdt(&gdt_desc);
12582- gdt = gdt_desc.address;
12583+ gdt = (unsigned long)gdt_desc.address;
12584
12585 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12586
12587@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
12588 /* 0x2 bit is always set */
12589 .flags = X86_EFLAGS_SF | 0x2,
12590 .sp = STACK_START,
12591- .es = __USER_DS,
12592+ .es = __KERNEL_DS,
12593 .cs = __KERNEL_CS,
12594 .ss = __KERNEL_DS,
12595- .ds = __USER_DS,
12596+ .ds = __KERNEL_DS,
12597 .fs = __KERNEL_PERCPU,
12598
12599 .__cr3 = __pa_nodebug(swapper_pg_dir),
12600diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
12601index 1aae78f..aab3a3d 100644
12602--- a/arch/x86/kernel/dumpstack.c
12603+++ b/arch/x86/kernel/dumpstack.c
12604@@ -2,6 +2,9 @@
12605 * Copyright (C) 1991, 1992 Linus Torvalds
12606 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12607 */
12608+#ifdef CONFIG_GRKERNSEC_HIDESYM
12609+#define __INCLUDED_BY_HIDESYM 1
12610+#endif
12611 #include <linux/kallsyms.h>
12612 #include <linux/kprobes.h>
12613 #include <linux/uaccess.h>
12614@@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
12615 static void
12616 print_ftrace_graph_addr(unsigned long addr, void *data,
12617 const struct stacktrace_ops *ops,
12618- struct thread_info *tinfo, int *graph)
12619+ struct task_struct *task, int *graph)
12620 {
12621- struct task_struct *task = tinfo->task;
12622 unsigned long ret_addr;
12623 int index = task->curr_ret_stack;
12624
12625@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12626 static inline void
12627 print_ftrace_graph_addr(unsigned long addr, void *data,
12628 const struct stacktrace_ops *ops,
12629- struct thread_info *tinfo, int *graph)
12630+ struct task_struct *task, int *graph)
12631 { }
12632 #endif
12633
12634@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12635 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12636 */
12637
12638-static inline int valid_stack_ptr(struct thread_info *tinfo,
12639- void *p, unsigned int size, void *end)
12640+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12641 {
12642- void *t = tinfo;
12643 if (end) {
12644 if (p < end && p >= (end-THREAD_SIZE))
12645 return 1;
12646@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
12647 }
12648
12649 unsigned long
12650-print_context_stack(struct thread_info *tinfo,
12651+print_context_stack(struct task_struct *task, void *stack_start,
12652 unsigned long *stack, unsigned long bp,
12653 const struct stacktrace_ops *ops, void *data,
12654 unsigned long *end, int *graph)
12655 {
12656 struct stack_frame *frame = (struct stack_frame *)bp;
12657
12658- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12659+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12660 unsigned long addr;
12661
12662 addr = *stack;
12663@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
12664 } else {
12665 ops->address(data, addr, 0);
12666 }
12667- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12668+ print_ftrace_graph_addr(addr, data, ops, task, graph);
12669 }
12670 stack++;
12671 }
12672@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
12673 EXPORT_SYMBOL_GPL(print_context_stack);
12674
12675 unsigned long
12676-print_context_stack_bp(struct thread_info *tinfo,
12677+print_context_stack_bp(struct task_struct *task, void *stack_start,
12678 unsigned long *stack, unsigned long bp,
12679 const struct stacktrace_ops *ops, void *data,
12680 unsigned long *end, int *graph)
12681@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12682 struct stack_frame *frame = (struct stack_frame *)bp;
12683 unsigned long *ret_addr = &frame->return_address;
12684
12685- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
12686+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
12687 unsigned long addr = *ret_addr;
12688
12689 if (!__kernel_text_address(addr))
12690@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12691 ops->address(data, addr, 1);
12692 frame = frame->next_frame;
12693 ret_addr = &frame->return_address;
12694- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12695+ print_ftrace_graph_addr(addr, data, ops, task, graph);
12696 }
12697
12698 return (unsigned long)frame;
12699@@ -186,7 +186,7 @@ void dump_stack(void)
12700
12701 bp = stack_frame(current, NULL);
12702 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12703- current->pid, current->comm, print_tainted(),
12704+ task_pid_nr(current), current->comm, print_tainted(),
12705 init_utsname()->release,
12706 (int)strcspn(init_utsname()->version, " "),
12707 init_utsname()->version);
12708@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
12709 }
12710 EXPORT_SYMBOL_GPL(oops_begin);
12711
12712+extern void gr_handle_kernel_exploit(void);
12713+
12714 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12715 {
12716 if (regs && kexec_should_crash(current))
12717@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12718 panic("Fatal exception in interrupt");
12719 if (panic_on_oops)
12720 panic("Fatal exception");
12721- do_exit(signr);
12722+
12723+ gr_handle_kernel_exploit();
12724+
12725+ do_group_exit(signr);
12726 }
12727
12728 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12729@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12730
12731 show_registers(regs);
12732 #ifdef CONFIG_X86_32
12733- if (user_mode_vm(regs)) {
12734+ if (user_mode(regs)) {
12735 sp = regs->sp;
12736 ss = regs->ss & 0xffff;
12737 } else {
12738@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs *regs, long err)
12739 unsigned long flags = oops_begin();
12740 int sig = SIGSEGV;
12741
12742- if (!user_mode_vm(regs))
12743+ if (!user_mode(regs))
12744 report_bug(regs->ip, regs);
12745
12746 if (__die(str, regs, err))
12747diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
12748index c99f9ed..2a15d80 100644
12749--- a/arch/x86/kernel/dumpstack_32.c
12750+++ b/arch/x86/kernel/dumpstack_32.c
12751@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12752 bp = stack_frame(task, regs);
12753
12754 for (;;) {
12755- struct thread_info *context;
12756+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12757
12758- context = (struct thread_info *)
12759- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12760- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
12761+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12762
12763- stack = (unsigned long *)context->previous_esp;
12764- if (!stack)
12765+ if (stack_start == task_stack_page(task))
12766 break;
12767+ stack = *(unsigned long **)stack_start;
12768 if (ops->stack(data, "IRQ") < 0)
12769 break;
12770 touch_nmi_watchdog();
12771@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
12772 * When in-kernel, we also print out the stack and code at the
12773 * time of the fault..
12774 */
12775- if (!user_mode_vm(regs)) {
12776+ if (!user_mode(regs)) {
12777 unsigned int code_prologue = code_bytes * 43 / 64;
12778 unsigned int code_len = code_bytes;
12779 unsigned char c;
12780 u8 *ip;
12781+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12782
12783 printk(KERN_EMERG "Stack:\n");
12784 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
12785
12786 printk(KERN_EMERG "Code: ");
12787
12788- ip = (u8 *)regs->ip - code_prologue;
12789+ ip = (u8 *)regs->ip - code_prologue + cs_base;
12790 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12791 /* try starting at IP */
12792- ip = (u8 *)regs->ip;
12793+ ip = (u8 *)regs->ip + cs_base;
12794 code_len = code_len - code_prologue + 1;
12795 }
12796 for (i = 0; i < code_len; i++, ip++) {
12797@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
12798 printk(KERN_CONT " Bad EIP value.");
12799 break;
12800 }
12801- if (ip == (u8 *)regs->ip)
12802+ if (ip == (u8 *)regs->ip + cs_base)
12803 printk(KERN_CONT "<%02x> ", c);
12804 else
12805 printk(KERN_CONT "%02x ", c);
12806@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
12807 {
12808 unsigned short ud2;
12809
12810+ ip = ktla_ktva(ip);
12811 if (ip < PAGE_OFFSET)
12812 return 0;
12813 if (probe_kernel_address((unsigned short *)ip, ud2))
12814@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
12815
12816 return ud2 == 0x0b0f;
12817 }
12818+
12819+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12820+void pax_check_alloca(unsigned long size)
12821+{
12822+ unsigned long sp = (unsigned long)&sp, stack_left;
12823+
12824+ /* all kernel stacks are of the same size */
12825+ stack_left = sp & (THREAD_SIZE - 1);
12826+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12827+}
12828+EXPORT_SYMBOL(pax_check_alloca);
12829+#endif
12830diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
12831index 6d728d9..279514e 100644
12832--- a/arch/x86/kernel/dumpstack_64.c
12833+++ b/arch/x86/kernel/dumpstack_64.c
12834@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12835 unsigned long *irq_stack_end =
12836 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12837 unsigned used = 0;
12838- struct thread_info *tinfo;
12839 int graph = 0;
12840 unsigned long dummy;
12841+ void *stack_start;
12842
12843 if (!task)
12844 task = current;
12845@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12846 * current stack address. If the stacks consist of nested
12847 * exceptions
12848 */
12849- tinfo = task_thread_info(task);
12850 for (;;) {
12851 char *id;
12852 unsigned long *estack_end;
12853+
12854 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12855 &used, &id);
12856
12857@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12858 if (ops->stack(data, id) < 0)
12859 break;
12860
12861- bp = ops->walk_stack(tinfo, stack, bp, ops,
12862+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12863 data, estack_end, &graph);
12864 ops->stack(data, "<EOE>");
12865 /*
12866@@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12867 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
12868 if (ops->stack(data, "IRQ") < 0)
12869 break;
12870- bp = ops->walk_stack(tinfo, stack, bp,
12871+ bp = ops->walk_stack(task, irq_stack, stack, bp,
12872 ops, data, irq_stack_end, &graph);
12873 /*
12874 * We link to the next stack (which would be
12875@@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12876 /*
12877 * This handles the process stack:
12878 */
12879- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12880+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12881+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12882 put_cpu();
12883 }
12884 EXPORT_SYMBOL(dump_trace);
12885@@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
12886
12887 return ud2 == 0x0b0f;
12888 }
12889+
12890+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12891+void pax_check_alloca(unsigned long size)
12892+{
12893+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
12894+ unsigned cpu, used;
12895+ char *id;
12896+
12897+ /* check the process stack first */
12898+ stack_start = (unsigned long)task_stack_page(current);
12899+ stack_end = stack_start + THREAD_SIZE;
12900+ if (likely(stack_start <= sp && sp < stack_end)) {
12901+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
12902+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12903+ return;
12904+ }
12905+
12906+ cpu = get_cpu();
12907+
12908+ /* check the irq stacks */
12909+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
12910+ stack_start = stack_end - IRQ_STACK_SIZE;
12911+ if (stack_start <= sp && sp < stack_end) {
12912+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
12913+ put_cpu();
12914+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12915+ return;
12916+ }
12917+
12918+ /* check the exception stacks */
12919+ used = 0;
12920+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
12921+ stack_start = stack_end - EXCEPTION_STKSZ;
12922+ if (stack_end && stack_start <= sp && sp < stack_end) {
12923+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
12924+ put_cpu();
12925+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12926+ return;
12927+ }
12928+
12929+ put_cpu();
12930+
12931+ /* unknown stack */
12932+ BUG();
12933+}
12934+EXPORT_SYMBOL(pax_check_alloca);
12935+#endif
12936diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
12937index cd28a35..c72ed9a 100644
12938--- a/arch/x86/kernel/early_printk.c
12939+++ b/arch/x86/kernel/early_printk.c
12940@@ -7,6 +7,7 @@
12941 #include <linux/pci_regs.h>
12942 #include <linux/pci_ids.h>
12943 #include <linux/errno.h>
12944+#include <linux/sched.h>
12945 #include <asm/io.h>
12946 #include <asm/processor.h>
12947 #include <asm/fcntl.h>
12948diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
12949index f3f6f53..0841b66 100644
12950--- a/arch/x86/kernel/entry_32.S
12951+++ b/arch/x86/kernel/entry_32.S
12952@@ -186,13 +186,146 @@
12953 /*CFI_REL_OFFSET gs, PT_GS*/
12954 .endm
12955 .macro SET_KERNEL_GS reg
12956+
12957+#ifdef CONFIG_CC_STACKPROTECTOR
12958 movl $(__KERNEL_STACK_CANARY), \reg
12959+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12960+ movl $(__USER_DS), \reg
12961+#else
12962+ xorl \reg, \reg
12963+#endif
12964+
12965 movl \reg, %gs
12966 .endm
12967
12968 #endif /* CONFIG_X86_32_LAZY_GS */
12969
12970-.macro SAVE_ALL
12971+.macro pax_enter_kernel
12972+#ifdef CONFIG_PAX_KERNEXEC
12973+ call pax_enter_kernel
12974+#endif
12975+.endm
12976+
12977+.macro pax_exit_kernel
12978+#ifdef CONFIG_PAX_KERNEXEC
12979+ call pax_exit_kernel
12980+#endif
12981+.endm
12982+
12983+#ifdef CONFIG_PAX_KERNEXEC
12984+ENTRY(pax_enter_kernel)
12985+#ifdef CONFIG_PARAVIRT
12986+ pushl %eax
12987+ pushl %ecx
12988+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12989+ mov %eax, %esi
12990+#else
12991+ mov %cr0, %esi
12992+#endif
12993+ bts $16, %esi
12994+ jnc 1f
12995+ mov %cs, %esi
12996+ cmp $__KERNEL_CS, %esi
12997+ jz 3f
12998+ ljmp $__KERNEL_CS, $3f
12999+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
13000+2:
13001+#ifdef CONFIG_PARAVIRT
13002+ mov %esi, %eax
13003+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
13004+#else
13005+ mov %esi, %cr0
13006+#endif
13007+3:
13008+#ifdef CONFIG_PARAVIRT
13009+ popl %ecx
13010+ popl %eax
13011+#endif
13012+ ret
13013+ENDPROC(pax_enter_kernel)
13014+
13015+ENTRY(pax_exit_kernel)
13016+#ifdef CONFIG_PARAVIRT
13017+ pushl %eax
13018+ pushl %ecx
13019+#endif
13020+ mov %cs, %esi
13021+ cmp $__KERNEXEC_KERNEL_CS, %esi
13022+ jnz 2f
13023+#ifdef CONFIG_PARAVIRT
13024+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
13025+ mov %eax, %esi
13026+#else
13027+ mov %cr0, %esi
13028+#endif
13029+ btr $16, %esi
13030+ ljmp $__KERNEL_CS, $1f
13031+1:
13032+#ifdef CONFIG_PARAVIRT
13033+ mov %esi, %eax
13034+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
13035+#else
13036+ mov %esi, %cr0
13037+#endif
13038+2:
13039+#ifdef CONFIG_PARAVIRT
13040+ popl %ecx
13041+ popl %eax
13042+#endif
13043+ ret
13044+ENDPROC(pax_exit_kernel)
13045+#endif
13046+
13047+.macro pax_erase_kstack
13048+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13049+ call pax_erase_kstack
13050+#endif
13051+.endm
13052+
13053+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13054+/*
13055+ * ebp: thread_info
13056+ * ecx, edx: can be clobbered
13057+ */
13058+ENTRY(pax_erase_kstack)
13059+ pushl %edi
13060+ pushl %eax
13061+
13062+ mov TI_lowest_stack(%ebp), %edi
13063+ mov $-0xBEEF, %eax
13064+ std
13065+
13066+1: mov %edi, %ecx
13067+ and $THREAD_SIZE_asm - 1, %ecx
13068+ shr $2, %ecx
13069+ repne scasl
13070+ jecxz 2f
13071+
13072+ cmp $2*16, %ecx
13073+ jc 2f
13074+
13075+ mov $2*16, %ecx
13076+ repe scasl
13077+ jecxz 2f
13078+ jne 1b
13079+
13080+2: cld
13081+ mov %esp, %ecx
13082+ sub %edi, %ecx
13083+ shr $2, %ecx
13084+ rep stosl
13085+
13086+ mov TI_task_thread_sp0(%ebp), %edi
13087+ sub $128, %edi
13088+ mov %edi, TI_lowest_stack(%ebp)
13089+
13090+ popl %eax
13091+ popl %edi
13092+ ret
13093+ENDPROC(pax_erase_kstack)
13094+#endif
13095+
13096+.macro __SAVE_ALL _DS
13097 cld
13098 PUSH_GS
13099 pushl_cfi %fs
13100@@ -215,7 +348,7 @@
13101 CFI_REL_OFFSET ecx, 0
13102 pushl_cfi %ebx
13103 CFI_REL_OFFSET ebx, 0
13104- movl $(__USER_DS), %edx
13105+ movl $\_DS, %edx
13106 movl %edx, %ds
13107 movl %edx, %es
13108 movl $(__KERNEL_PERCPU), %edx
13109@@ -223,6 +356,15 @@
13110 SET_KERNEL_GS %edx
13111 .endm
13112
13113+.macro SAVE_ALL
13114+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
13115+ __SAVE_ALL __KERNEL_DS
13116+ pax_enter_kernel
13117+#else
13118+ __SAVE_ALL __USER_DS
13119+#endif
13120+.endm
13121+
13122 .macro RESTORE_INT_REGS
13123 popl_cfi %ebx
13124 CFI_RESTORE ebx
13125@@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
13126 popfl_cfi
13127 jmp syscall_exit
13128 CFI_ENDPROC
13129-END(ret_from_fork)
13130+ENDPROC(ret_from_fork)
13131
13132 /*
13133 * Interrupt exit functions should be protected against kprobes
13134@@ -333,7 +475,15 @@ check_userspace:
13135 movb PT_CS(%esp), %al
13136 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
13137 cmpl $USER_RPL, %eax
13138+
13139+#ifdef CONFIG_PAX_KERNEXEC
13140+ jae resume_userspace
13141+
13142+ PAX_EXIT_KERNEL
13143+ jmp resume_kernel
13144+#else
13145 jb resume_kernel # not returning to v8086 or userspace
13146+#endif
13147
13148 ENTRY(resume_userspace)
13149 LOCKDEP_SYS_EXIT
13150@@ -345,8 +495,8 @@ ENTRY(resume_userspace)
13151 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
13152 # int/exception return?
13153 jne work_pending
13154- jmp restore_all
13155-END(ret_from_exception)
13156+ jmp restore_all_pax
13157+ENDPROC(ret_from_exception)
13158
13159 #ifdef CONFIG_PREEMPT
13160 ENTRY(resume_kernel)
13161@@ -361,7 +511,7 @@ need_resched:
13162 jz restore_all
13163 call preempt_schedule_irq
13164 jmp need_resched
13165-END(resume_kernel)
13166+ENDPROC(resume_kernel)
13167 #endif
13168 CFI_ENDPROC
13169 /*
13170@@ -395,23 +545,34 @@ sysenter_past_esp:
13171 /*CFI_REL_OFFSET cs, 0*/
13172 /*
13173 * Push current_thread_info()->sysenter_return to the stack.
13174- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
13175- * pushed above; +8 corresponds to copy_thread's esp0 setting.
13176 */
13177- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
13178+ pushl_cfi $0
13179 CFI_REL_OFFSET eip, 0
13180
13181 pushl_cfi %eax
13182 SAVE_ALL
13183+ GET_THREAD_INFO(%ebp)
13184+ movl TI_sysenter_return(%ebp),%ebp
13185+ movl %ebp,PT_EIP(%esp)
13186 ENABLE_INTERRUPTS(CLBR_NONE)
13187
13188 /*
13189 * Load the potential sixth argument from user stack.
13190 * Careful about security.
13191 */
13192+ movl PT_OLDESP(%esp),%ebp
13193+
13194+#ifdef CONFIG_PAX_MEMORY_UDEREF
13195+ mov PT_OLDSS(%esp),%ds
13196+1: movl %ds:(%ebp),%ebp
13197+ push %ss
13198+ pop %ds
13199+#else
13200 cmpl $__PAGE_OFFSET-3,%ebp
13201 jae syscall_fault
13202 1: movl (%ebp),%ebp
13203+#endif
13204+
13205 movl %ebp,PT_EBP(%esp)
13206 .section __ex_table,"a"
13207 .align 4
13208@@ -434,12 +595,24 @@ sysenter_do_call:
13209 testl $_TIF_ALLWORK_MASK, %ecx
13210 jne sysexit_audit
13211 sysenter_exit:
13212+
13213+#ifdef CONFIG_PAX_RANDKSTACK
13214+ pushl_cfi %eax
13215+ movl %esp, %eax
13216+ call pax_randomize_kstack
13217+ popl_cfi %eax
13218+#endif
13219+
13220+ pax_erase_kstack
13221+
13222 /* if something modifies registers it must also disable sysexit */
13223 movl PT_EIP(%esp), %edx
13224 movl PT_OLDESP(%esp), %ecx
13225 xorl %ebp,%ebp
13226 TRACE_IRQS_ON
13227 1: mov PT_FS(%esp), %fs
13228+2: mov PT_DS(%esp), %ds
13229+3: mov PT_ES(%esp), %es
13230 PTGS_TO_GS
13231 ENABLE_INTERRUPTS_SYSEXIT
13232
13233@@ -456,6 +629,9 @@ sysenter_audit:
13234 movl %eax,%edx /* 2nd arg: syscall number */
13235 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
13236 call audit_syscall_entry
13237+
13238+ pax_erase_kstack
13239+
13240 pushl_cfi %ebx
13241 movl PT_EAX(%esp),%eax /* reload syscall number */
13242 jmp sysenter_do_call
13243@@ -482,11 +658,17 @@ sysexit_audit:
13244
13245 CFI_ENDPROC
13246 .pushsection .fixup,"ax"
13247-2: movl $0,PT_FS(%esp)
13248+4: movl $0,PT_FS(%esp)
13249+ jmp 1b
13250+5: movl $0,PT_DS(%esp)
13251+ jmp 1b
13252+6: movl $0,PT_ES(%esp)
13253 jmp 1b
13254 .section __ex_table,"a"
13255 .align 4
13256- .long 1b,2b
13257+ .long 1b,4b
13258+ .long 2b,5b
13259+ .long 3b,6b
13260 .popsection
13261 PTGS_TO_GS_EX
13262 ENDPROC(ia32_sysenter_target)
13263@@ -519,6 +701,15 @@ syscall_exit:
13264 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13265 jne syscall_exit_work
13266
13267+restore_all_pax:
13268+
13269+#ifdef CONFIG_PAX_RANDKSTACK
13270+ movl %esp, %eax
13271+ call pax_randomize_kstack
13272+#endif
13273+
13274+ pax_erase_kstack
13275+
13276 restore_all:
13277 TRACE_IRQS_IRET
13278 restore_all_notrace:
13279@@ -578,14 +769,34 @@ ldt_ss:
13280 * compensating for the offset by changing to the ESPFIX segment with
13281 * a base address that matches for the difference.
13282 */
13283-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
13284+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
13285 mov %esp, %edx /* load kernel esp */
13286 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13287 mov %dx, %ax /* eax: new kernel esp */
13288 sub %eax, %edx /* offset (low word is 0) */
13289+#ifdef CONFIG_SMP
13290+ movl PER_CPU_VAR(cpu_number), %ebx
13291+ shll $PAGE_SHIFT_asm, %ebx
13292+ addl $cpu_gdt_table, %ebx
13293+#else
13294+ movl $cpu_gdt_table, %ebx
13295+#endif
13296 shr $16, %edx
13297- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
13298- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
13299+
13300+#ifdef CONFIG_PAX_KERNEXEC
13301+ mov %cr0, %esi
13302+ btr $16, %esi
13303+ mov %esi, %cr0
13304+#endif
13305+
13306+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
13307+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
13308+
13309+#ifdef CONFIG_PAX_KERNEXEC
13310+ bts $16, %esi
13311+ mov %esi, %cr0
13312+#endif
13313+
13314 pushl_cfi $__ESPFIX_SS
13315 pushl_cfi %eax /* new kernel esp */
13316 /* Disable interrupts, but do not irqtrace this section: we
13317@@ -614,34 +825,28 @@ work_resched:
13318 movl TI_flags(%ebp), %ecx
13319 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13320 # than syscall tracing?
13321- jz restore_all
13322+ jz restore_all_pax
13323 testb $_TIF_NEED_RESCHED, %cl
13324 jnz work_resched
13325
13326 work_notifysig: # deal with pending signals and
13327 # notify-resume requests
13328+ movl %esp, %eax
13329 #ifdef CONFIG_VM86
13330 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13331- movl %esp, %eax
13332- jne work_notifysig_v86 # returning to kernel-space or
13333+ jz 1f # returning to kernel-space or
13334 # vm86-space
13335- xorl %edx, %edx
13336- call do_notify_resume
13337- jmp resume_userspace_sig
13338
13339- ALIGN
13340-work_notifysig_v86:
13341 pushl_cfi %ecx # save ti_flags for do_notify_resume
13342 call save_v86_state # %eax contains pt_regs pointer
13343 popl_cfi %ecx
13344 movl %eax, %esp
13345-#else
13346- movl %esp, %eax
13347+1:
13348 #endif
13349 xorl %edx, %edx
13350 call do_notify_resume
13351 jmp resume_userspace_sig
13352-END(work_pending)
13353+ENDPROC(work_pending)
13354
13355 # perform syscall exit tracing
13356 ALIGN
13357@@ -649,11 +854,14 @@ syscall_trace_entry:
13358 movl $-ENOSYS,PT_EAX(%esp)
13359 movl %esp, %eax
13360 call syscall_trace_enter
13361+
13362+ pax_erase_kstack
13363+
13364 /* What it returned is what we'll actually use. */
13365 cmpl $(nr_syscalls), %eax
13366 jnae syscall_call
13367 jmp syscall_exit
13368-END(syscall_trace_entry)
13369+ENDPROC(syscall_trace_entry)
13370
13371 # perform syscall exit tracing
13372 ALIGN
13373@@ -666,20 +874,24 @@ syscall_exit_work:
13374 movl %esp, %eax
13375 call syscall_trace_leave
13376 jmp resume_userspace
13377-END(syscall_exit_work)
13378+ENDPROC(syscall_exit_work)
13379 CFI_ENDPROC
13380
13381 RING0_INT_FRAME # can't unwind into user space anyway
13382 syscall_fault:
13383+#ifdef CONFIG_PAX_MEMORY_UDEREF
13384+ push %ss
13385+ pop %ds
13386+#endif
13387 GET_THREAD_INFO(%ebp)
13388 movl $-EFAULT,PT_EAX(%esp)
13389 jmp resume_userspace
13390-END(syscall_fault)
13391+ENDPROC(syscall_fault)
13392
13393 syscall_badsys:
13394 movl $-ENOSYS,PT_EAX(%esp)
13395 jmp resume_userspace
13396-END(syscall_badsys)
13397+ENDPROC(syscall_badsys)
13398 CFI_ENDPROC
13399 /*
13400 * End of kprobes section
13401@@ -753,6 +965,36 @@ ptregs_clone:
13402 CFI_ENDPROC
13403 ENDPROC(ptregs_clone)
13404
13405+ ALIGN;
13406+ENTRY(kernel_execve)
13407+ CFI_STARTPROC
13408+ pushl_cfi %ebp
13409+ sub $PT_OLDSS+4,%esp
13410+ pushl_cfi %edi
13411+ pushl_cfi %ecx
13412+ pushl_cfi %eax
13413+ lea 3*4(%esp),%edi
13414+ mov $PT_OLDSS/4+1,%ecx
13415+ xorl %eax,%eax
13416+ rep stosl
13417+ popl_cfi %eax
13418+ popl_cfi %ecx
13419+ popl_cfi %edi
13420+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13421+ pushl_cfi %esp
13422+ call sys_execve
13423+ add $4,%esp
13424+ CFI_ADJUST_CFA_OFFSET -4
13425+ GET_THREAD_INFO(%ebp)
13426+ test %eax,%eax
13427+ jz syscall_exit
13428+ add $PT_OLDSS+4,%esp
13429+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
13430+ popl_cfi %ebp
13431+ ret
13432+ CFI_ENDPROC
13433+ENDPROC(kernel_execve)
13434+
13435 .macro FIXUP_ESPFIX_STACK
13436 /*
13437 * Switch back for ESPFIX stack to the normal zerobased stack
13438@@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
13439 * normal stack and adjusts ESP with the matching offset.
13440 */
13441 /* fixup the stack */
13442- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
13443- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
13444+#ifdef CONFIG_SMP
13445+ movl PER_CPU_VAR(cpu_number), %ebx
13446+ shll $PAGE_SHIFT_asm, %ebx
13447+ addl $cpu_gdt_table, %ebx
13448+#else
13449+ movl $cpu_gdt_table, %ebx
13450+#endif
13451+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
13452+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
13453 shl $16, %eax
13454 addl %esp, %eax /* the adjusted stack pointer */
13455 pushl_cfi $__KERNEL_DS
13456@@ -816,7 +1065,7 @@ vector=vector+1
13457 .endr
13458 2: jmp common_interrupt
13459 .endr
13460-END(irq_entries_start)
13461+ENDPROC(irq_entries_start)
13462
13463 .previous
13464 END(interrupt)
13465@@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
13466 pushl_cfi $do_coprocessor_error
13467 jmp error_code
13468 CFI_ENDPROC
13469-END(coprocessor_error)
13470+ENDPROC(coprocessor_error)
13471
13472 ENTRY(simd_coprocessor_error)
13473 RING0_INT_FRAME
13474@@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
13475 #endif
13476 jmp error_code
13477 CFI_ENDPROC
13478-END(simd_coprocessor_error)
13479+ENDPROC(simd_coprocessor_error)
13480
13481 ENTRY(device_not_available)
13482 RING0_INT_FRAME
13483@@ -893,7 +1142,7 @@ ENTRY(device_not_available)
13484 pushl_cfi $do_device_not_available
13485 jmp error_code
13486 CFI_ENDPROC
13487-END(device_not_available)
13488+ENDPROC(device_not_available)
13489
13490 #ifdef CONFIG_PARAVIRT
13491 ENTRY(native_iret)
13492@@ -902,12 +1151,12 @@ ENTRY(native_iret)
13493 .align 4
13494 .long native_iret, iret_exc
13495 .previous
13496-END(native_iret)
13497+ENDPROC(native_iret)
13498
13499 ENTRY(native_irq_enable_sysexit)
13500 sti
13501 sysexit
13502-END(native_irq_enable_sysexit)
13503+ENDPROC(native_irq_enable_sysexit)
13504 #endif
13505
13506 ENTRY(overflow)
13507@@ -916,7 +1165,7 @@ ENTRY(overflow)
13508 pushl_cfi $do_overflow
13509 jmp error_code
13510 CFI_ENDPROC
13511-END(overflow)
13512+ENDPROC(overflow)
13513
13514 ENTRY(bounds)
13515 RING0_INT_FRAME
13516@@ -924,7 +1173,7 @@ ENTRY(bounds)
13517 pushl_cfi $do_bounds
13518 jmp error_code
13519 CFI_ENDPROC
13520-END(bounds)
13521+ENDPROC(bounds)
13522
13523 ENTRY(invalid_op)
13524 RING0_INT_FRAME
13525@@ -932,7 +1181,7 @@ ENTRY(invalid_op)
13526 pushl_cfi $do_invalid_op
13527 jmp error_code
13528 CFI_ENDPROC
13529-END(invalid_op)
13530+ENDPROC(invalid_op)
13531
13532 ENTRY(coprocessor_segment_overrun)
13533 RING0_INT_FRAME
13534@@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
13535 pushl_cfi $do_coprocessor_segment_overrun
13536 jmp error_code
13537 CFI_ENDPROC
13538-END(coprocessor_segment_overrun)
13539+ENDPROC(coprocessor_segment_overrun)
13540
13541 ENTRY(invalid_TSS)
13542 RING0_EC_FRAME
13543 pushl_cfi $do_invalid_TSS
13544 jmp error_code
13545 CFI_ENDPROC
13546-END(invalid_TSS)
13547+ENDPROC(invalid_TSS)
13548
13549 ENTRY(segment_not_present)
13550 RING0_EC_FRAME
13551 pushl_cfi $do_segment_not_present
13552 jmp error_code
13553 CFI_ENDPROC
13554-END(segment_not_present)
13555+ENDPROC(segment_not_present)
13556
13557 ENTRY(stack_segment)
13558 RING0_EC_FRAME
13559 pushl_cfi $do_stack_segment
13560 jmp error_code
13561 CFI_ENDPROC
13562-END(stack_segment)
13563+ENDPROC(stack_segment)
13564
13565 ENTRY(alignment_check)
13566 RING0_EC_FRAME
13567 pushl_cfi $do_alignment_check
13568 jmp error_code
13569 CFI_ENDPROC
13570-END(alignment_check)
13571+ENDPROC(alignment_check)
13572
13573 ENTRY(divide_error)
13574 RING0_INT_FRAME
13575@@ -976,7 +1225,7 @@ ENTRY(divide_error)
13576 pushl_cfi $do_divide_error
13577 jmp error_code
13578 CFI_ENDPROC
13579-END(divide_error)
13580+ENDPROC(divide_error)
13581
13582 #ifdef CONFIG_X86_MCE
13583 ENTRY(machine_check)
13584@@ -985,7 +1234,7 @@ ENTRY(machine_check)
13585 pushl_cfi machine_check_vector
13586 jmp error_code
13587 CFI_ENDPROC
13588-END(machine_check)
13589+ENDPROC(machine_check)
13590 #endif
13591
13592 ENTRY(spurious_interrupt_bug)
13593@@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
13594 pushl_cfi $do_spurious_interrupt_bug
13595 jmp error_code
13596 CFI_ENDPROC
13597-END(spurious_interrupt_bug)
13598+ENDPROC(spurious_interrupt_bug)
13599 /*
13600 * End of kprobes section
13601 */
13602@@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
13603
13604 ENTRY(mcount)
13605 ret
13606-END(mcount)
13607+ENDPROC(mcount)
13608
13609 ENTRY(ftrace_caller)
13610 cmpl $0, function_trace_stop
13611@@ -1138,7 +1387,7 @@ ftrace_graph_call:
13612 .globl ftrace_stub
13613 ftrace_stub:
13614 ret
13615-END(ftrace_caller)
13616+ENDPROC(ftrace_caller)
13617
13618 #else /* ! CONFIG_DYNAMIC_FTRACE */
13619
13620@@ -1174,7 +1423,7 @@ trace:
13621 popl %ecx
13622 popl %eax
13623 jmp ftrace_stub
13624-END(mcount)
13625+ENDPROC(mcount)
13626 #endif /* CONFIG_DYNAMIC_FTRACE */
13627 #endif /* CONFIG_FUNCTION_TRACER */
13628
13629@@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
13630 popl %ecx
13631 popl %eax
13632 ret
13633-END(ftrace_graph_caller)
13634+ENDPROC(ftrace_graph_caller)
13635
13636 .globl return_to_handler
13637 return_to_handler:
13638@@ -1209,7 +1458,6 @@ return_to_handler:
13639 jmp *%ecx
13640 #endif
13641
13642-.section .rodata,"a"
13643 #include "syscall_table_32.S"
13644
13645 syscall_table_size=(.-sys_call_table)
13646@@ -1255,15 +1503,18 @@ error_code:
13647 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13648 REG_TO_PTGS %ecx
13649 SET_KERNEL_GS %ecx
13650- movl $(__USER_DS), %ecx
13651+ movl $(__KERNEL_DS), %ecx
13652 movl %ecx, %ds
13653 movl %ecx, %es
13654+
13655+ pax_enter_kernel
13656+
13657 TRACE_IRQS_OFF
13658 movl %esp,%eax # pt_regs pointer
13659 call *%edi
13660 jmp ret_from_exception
13661 CFI_ENDPROC
13662-END(page_fault)
13663+ENDPROC(page_fault)
13664
13665 /*
13666 * Debug traps and NMI can happen at the one SYSENTER instruction
13667@@ -1305,7 +1556,7 @@ debug_stack_correct:
13668 call do_debug
13669 jmp ret_from_exception
13670 CFI_ENDPROC
13671-END(debug)
13672+ENDPROC(debug)
13673
13674 /*
13675 * NMI is doubly nasty. It can happen _while_ we're handling
13676@@ -1342,6 +1593,9 @@ nmi_stack_correct:
13677 xorl %edx,%edx # zero error code
13678 movl %esp,%eax # pt_regs pointer
13679 call do_nmi
13680+
13681+ pax_exit_kernel
13682+
13683 jmp restore_all_notrace
13684 CFI_ENDPROC
13685
13686@@ -1378,12 +1632,15 @@ nmi_espfix_stack:
13687 FIXUP_ESPFIX_STACK # %eax == %esp
13688 xorl %edx,%edx # zero error code
13689 call do_nmi
13690+
13691+ pax_exit_kernel
13692+
13693 RESTORE_REGS
13694 lss 12+4(%esp), %esp # back to espfix stack
13695 CFI_ADJUST_CFA_OFFSET -24
13696 jmp irq_return
13697 CFI_ENDPROC
13698-END(nmi)
13699+ENDPROC(nmi)
13700
13701 ENTRY(int3)
13702 RING0_INT_FRAME
13703@@ -1395,14 +1652,14 @@ ENTRY(int3)
13704 call do_int3
13705 jmp ret_from_exception
13706 CFI_ENDPROC
13707-END(int3)
13708+ENDPROC(int3)
13709
13710 ENTRY(general_protection)
13711 RING0_EC_FRAME
13712 pushl_cfi $do_general_protection
13713 jmp error_code
13714 CFI_ENDPROC
13715-END(general_protection)
13716+ENDPROC(general_protection)
13717
13718 #ifdef CONFIG_KVM_GUEST
13719 ENTRY(async_page_fault)
13720@@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
13721 pushl_cfi $do_async_page_fault
13722 jmp error_code
13723 CFI_ENDPROC
13724-END(async_page_fault)
13725+ENDPROC(async_page_fault)
13726 #endif
13727
13728 /*
13729diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
13730index faf8d5e..4f16a68 100644
13731--- a/arch/x86/kernel/entry_64.S
13732+++ b/arch/x86/kernel/entry_64.S
13733@@ -55,6 +55,8 @@
13734 #include <asm/paravirt.h>
13735 #include <asm/ftrace.h>
13736 #include <asm/percpu.h>
13737+#include <asm/pgtable.h>
13738+#include <asm/alternative-asm.h>
13739
13740 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13741 #include <linux/elf-em.h>
13742@@ -68,8 +70,9 @@
13743 #ifdef CONFIG_FUNCTION_TRACER
13744 #ifdef CONFIG_DYNAMIC_FTRACE
13745 ENTRY(mcount)
13746+ pax_force_retaddr
13747 retq
13748-END(mcount)
13749+ENDPROC(mcount)
13750
13751 ENTRY(ftrace_caller)
13752 cmpl $0, function_trace_stop
13753@@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
13754 #endif
13755
13756 GLOBAL(ftrace_stub)
13757+ pax_force_retaddr
13758 retq
13759-END(ftrace_caller)
13760+ENDPROC(ftrace_caller)
13761
13762 #else /* ! CONFIG_DYNAMIC_FTRACE */
13763 ENTRY(mcount)
13764@@ -112,6 +116,7 @@ ENTRY(mcount)
13765 #endif
13766
13767 GLOBAL(ftrace_stub)
13768+ pax_force_retaddr
13769 retq
13770
13771 trace:
13772@@ -121,12 +126,13 @@ trace:
13773 movq 8(%rbp), %rsi
13774 subq $MCOUNT_INSN_SIZE, %rdi
13775
13776+ pax_force_fptr ftrace_trace_function
13777 call *ftrace_trace_function
13778
13779 MCOUNT_RESTORE_FRAME
13780
13781 jmp ftrace_stub
13782-END(mcount)
13783+ENDPROC(mcount)
13784 #endif /* CONFIG_DYNAMIC_FTRACE */
13785 #endif /* CONFIG_FUNCTION_TRACER */
13786
13787@@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
13788
13789 MCOUNT_RESTORE_FRAME
13790
13791+ pax_force_retaddr
13792 retq
13793-END(ftrace_graph_caller)
13794+ENDPROC(ftrace_graph_caller)
13795
13796 GLOBAL(return_to_handler)
13797 subq $24, %rsp
13798@@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
13799 movq 8(%rsp), %rdx
13800 movq (%rsp), %rax
13801 addq $24, %rsp
13802+ pax_force_fptr %rdi
13803 jmp *%rdi
13804 #endif
13805
13806@@ -178,6 +186,282 @@ ENTRY(native_usergs_sysret64)
13807 ENDPROC(native_usergs_sysret64)
13808 #endif /* CONFIG_PARAVIRT */
13809
13810+ .macro ljmpq sel, off
13811+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13812+ .byte 0x48; ljmp *1234f(%rip)
13813+ .pushsection .rodata
13814+ .align 16
13815+ 1234: .quad \off; .word \sel
13816+ .popsection
13817+#else
13818+ pushq $\sel
13819+ pushq $\off
13820+ lretq
13821+#endif
13822+ .endm
13823+
13824+ .macro pax_enter_kernel
13825+ pax_set_fptr_mask
13826+#ifdef CONFIG_PAX_KERNEXEC
13827+ call pax_enter_kernel
13828+#endif
13829+ .endm
13830+
13831+ .macro pax_exit_kernel
13832+#ifdef CONFIG_PAX_KERNEXEC
13833+ call pax_exit_kernel
13834+#endif
13835+ .endm
13836+
13837+#ifdef CONFIG_PAX_KERNEXEC
13838+ENTRY(pax_enter_kernel)
13839+ pushq %rdi
13840+
13841+#ifdef CONFIG_PARAVIRT
13842+ PV_SAVE_REGS(CLBR_RDI)
13843+#endif
13844+
13845+ GET_CR0_INTO_RDI
13846+ bts $16,%rdi
13847+ jnc 3f
13848+ mov %cs,%edi
13849+ cmp $__KERNEL_CS,%edi
13850+ jnz 2f
13851+1:
13852+
13853+#ifdef CONFIG_PARAVIRT
13854+ PV_RESTORE_REGS(CLBR_RDI)
13855+#endif
13856+
13857+ popq %rdi
13858+ pax_force_retaddr
13859+ retq
13860+
13861+2: ljmpq __KERNEL_CS,1f
13862+3: ljmpq __KERNEXEC_KERNEL_CS,4f
13863+4: SET_RDI_INTO_CR0
13864+ jmp 1b
13865+ENDPROC(pax_enter_kernel)
13866+
13867+ENTRY(pax_exit_kernel)
13868+ pushq %rdi
13869+
13870+#ifdef CONFIG_PARAVIRT
13871+ PV_SAVE_REGS(CLBR_RDI)
13872+#endif
13873+
13874+ mov %cs,%rdi
13875+ cmp $__KERNEXEC_KERNEL_CS,%edi
13876+ jz 2f
13877+1:
13878+
13879+#ifdef CONFIG_PARAVIRT
13880+ PV_RESTORE_REGS(CLBR_RDI);
13881+#endif
13882+
13883+ popq %rdi
13884+ pax_force_retaddr
13885+ retq
13886+
13887+2: GET_CR0_INTO_RDI
13888+ btr $16,%rdi
13889+ ljmpq __KERNEL_CS,3f
13890+3: SET_RDI_INTO_CR0
13891+ jmp 1b
13892+#ifdef CONFIG_PARAVIRT
13893+ PV_RESTORE_REGS(CLBR_RDI);
13894+#endif
13895+
13896+ popq %rdi
13897+ pax_force_retaddr
13898+ retq
13899+ENDPROC(pax_exit_kernel)
13900+#endif
13901+
13902+ .macro pax_enter_kernel_user
13903+ pax_set_fptr_mask
13904+#ifdef CONFIG_PAX_MEMORY_UDEREF
13905+ call pax_enter_kernel_user
13906+#endif
13907+ .endm
13908+
13909+ .macro pax_exit_kernel_user
13910+#ifdef CONFIG_PAX_MEMORY_UDEREF
13911+ call pax_exit_kernel_user
13912+#endif
13913+#ifdef CONFIG_PAX_RANDKSTACK
13914+ pushq %rax
13915+ call pax_randomize_kstack
13916+ popq %rax
13917+#endif
13918+ .endm
13919+
13920+#ifdef CONFIG_PAX_MEMORY_UDEREF
13921+ENTRY(pax_enter_kernel_user)
13922+ pushq %rdi
13923+ pushq %rbx
13924+
13925+#ifdef CONFIG_PARAVIRT
13926+ PV_SAVE_REGS(CLBR_RDI)
13927+#endif
13928+
13929+ GET_CR3_INTO_RDI
13930+ mov %rdi,%rbx
13931+ add $__START_KERNEL_map,%rbx
13932+ sub phys_base(%rip),%rbx
13933+
13934+#ifdef CONFIG_PARAVIRT
13935+ pushq %rdi
13936+ cmpl $0, pv_info+PARAVIRT_enabled
13937+ jz 1f
13938+ i = 0
13939+ .rept USER_PGD_PTRS
13940+ mov i*8(%rbx),%rsi
13941+ mov $0,%sil
13942+ lea i*8(%rbx),%rdi
13943+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13944+ i = i + 1
13945+ .endr
13946+ jmp 2f
13947+1:
13948+#endif
13949+
13950+ i = 0
13951+ .rept USER_PGD_PTRS
13952+ movb $0,i*8(%rbx)
13953+ i = i + 1
13954+ .endr
13955+
13956+#ifdef CONFIG_PARAVIRT
13957+2: popq %rdi
13958+#endif
13959+ SET_RDI_INTO_CR3
13960+
13961+#ifdef CONFIG_PAX_KERNEXEC
13962+ GET_CR0_INTO_RDI
13963+ bts $16,%rdi
13964+ SET_RDI_INTO_CR0
13965+#endif
13966+
13967+#ifdef CONFIG_PARAVIRT
13968+ PV_RESTORE_REGS(CLBR_RDI)
13969+#endif
13970+
13971+ popq %rbx
13972+ popq %rdi
13973+ pax_force_retaddr
13974+ retq
13975+ENDPROC(pax_enter_kernel_user)
13976+
13977+ENTRY(pax_exit_kernel_user)
13978+ push %rdi
13979+
13980+#ifdef CONFIG_PARAVIRT
13981+ pushq %rbx
13982+ PV_SAVE_REGS(CLBR_RDI)
13983+#endif
13984+
13985+#ifdef CONFIG_PAX_KERNEXEC
13986+ GET_CR0_INTO_RDI
13987+ btr $16,%rdi
13988+ SET_RDI_INTO_CR0
13989+#endif
13990+
13991+ GET_CR3_INTO_RDI
13992+ add $__START_KERNEL_map,%rdi
13993+ sub phys_base(%rip),%rdi
13994+
13995+#ifdef CONFIG_PARAVIRT
13996+ cmpl $0, pv_info+PARAVIRT_enabled
13997+ jz 1f
13998+ mov %rdi,%rbx
13999+ i = 0
14000+ .rept USER_PGD_PTRS
14001+ mov i*8(%rbx),%rsi
14002+ mov $0x67,%sil
14003+ lea i*8(%rbx),%rdi
14004+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
14005+ i = i + 1
14006+ .endr
14007+ jmp 2f
14008+1:
14009+#endif
14010+
14011+ i = 0
14012+ .rept USER_PGD_PTRS
14013+ movb $0x67,i*8(%rdi)
14014+ i = i + 1
14015+ .endr
14016+
14017+#ifdef CONFIG_PARAVIRT
14018+2: PV_RESTORE_REGS(CLBR_RDI)
14019+ popq %rbx
14020+#endif
14021+
14022+ popq %rdi
14023+ pax_force_retaddr
14024+ retq
14025+ENDPROC(pax_exit_kernel_user)
14026+#endif
14027+
14028+.macro pax_erase_kstack
14029+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14030+ call pax_erase_kstack
14031+#endif
14032+.endm
14033+
14034+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14035+/*
14036+ * r11: thread_info
14037+ * rcx, rdx: can be clobbered
14038+ */
14039+ENTRY(pax_erase_kstack)
14040+ pushq %rdi
14041+ pushq %rax
14042+ pushq %r11
14043+
14044+ GET_THREAD_INFO(%r11)
14045+ mov TI_lowest_stack(%r11), %rdi
14046+ mov $-0xBEEF, %rax
14047+ std
14048+
14049+1: mov %edi, %ecx
14050+ and $THREAD_SIZE_asm - 1, %ecx
14051+ shr $3, %ecx
14052+ repne scasq
14053+ jecxz 2f
14054+
14055+ cmp $2*8, %ecx
14056+ jc 2f
14057+
14058+ mov $2*8, %ecx
14059+ repe scasq
14060+ jecxz 2f
14061+ jne 1b
14062+
14063+2: cld
14064+ mov %esp, %ecx
14065+ sub %edi, %ecx
14066+
14067+ cmp $THREAD_SIZE_asm, %rcx
14068+ jb 3f
14069+ ud2
14070+3:
14071+
14072+ shr $3, %ecx
14073+ rep stosq
14074+
14075+ mov TI_task_thread_sp0(%r11), %rdi
14076+ sub $256, %rdi
14077+ mov %rdi, TI_lowest_stack(%r11)
14078+
14079+ popq %r11
14080+ popq %rax
14081+ popq %rdi
14082+ pax_force_retaddr
14083+ ret
14084+ENDPROC(pax_erase_kstack)
14085+#endif
14086
14087 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
14088 #ifdef CONFIG_TRACE_IRQFLAGS
14089@@ -231,8 +515,8 @@ ENDPROC(native_usergs_sysret64)
14090 .endm
14091
14092 .macro UNFAKE_STACK_FRAME
14093- addq $8*6, %rsp
14094- CFI_ADJUST_CFA_OFFSET -(6*8)
14095+ addq $8*6 + ARG_SKIP, %rsp
14096+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
14097 .endm
14098
14099 /*
14100@@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64)
14101 movq %rsp, %rsi
14102
14103 leaq -RBP(%rsp),%rdi /* arg1 for handler */
14104- testl $3, CS(%rdi)
14105+ testb $3, CS(%rdi)
14106 je 1f
14107 SWAPGS
14108 /*
14109@@ -355,9 +639,10 @@ ENTRY(save_rest)
14110 movq_cfi r15, R15+16
14111 movq %r11, 8(%rsp) /* return address */
14112 FIXUP_TOP_OF_STACK %r11, 16
14113+ pax_force_retaddr
14114 ret
14115 CFI_ENDPROC
14116-END(save_rest)
14117+ENDPROC(save_rest)
14118
14119 /* save complete stack frame */
14120 .pushsection .kprobes.text, "ax"
14121@@ -386,9 +671,10 @@ ENTRY(save_paranoid)
14122 js 1f /* negative -> in kernel */
14123 SWAPGS
14124 xorl %ebx,%ebx
14125-1: ret
14126+1: pax_force_retaddr_bts
14127+ ret
14128 CFI_ENDPROC
14129-END(save_paranoid)
14130+ENDPROC(save_paranoid)
14131 .popsection
14132
14133 /*
14134@@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
14135
14136 RESTORE_REST
14137
14138- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
14139+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
14140 je int_ret_from_sys_call
14141
14142 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
14143@@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
14144 jmp ret_from_sys_call # go to the SYSRET fastpath
14145
14146 CFI_ENDPROC
14147-END(ret_from_fork)
14148+ENDPROC(ret_from_fork)
14149
14150 /*
14151 * System call entry. Up to 6 arguments in registers are supported.
14152@@ -456,7 +742,7 @@ END(ret_from_fork)
14153 ENTRY(system_call)
14154 CFI_STARTPROC simple
14155 CFI_SIGNAL_FRAME
14156- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14157+ CFI_DEF_CFA rsp,0
14158 CFI_REGISTER rip,rcx
14159 /*CFI_REGISTER rflags,r11*/
14160 SWAPGS_UNSAFE_STACK
14161@@ -469,12 +755,13 @@ ENTRY(system_call_after_swapgs)
14162
14163 movq %rsp,PER_CPU_VAR(old_rsp)
14164 movq PER_CPU_VAR(kernel_stack),%rsp
14165+ SAVE_ARGS 8*6,0
14166+ pax_enter_kernel_user
14167 /*
14168 * No need to follow this irqs off/on section - it's straight
14169 * and short:
14170 */
14171 ENABLE_INTERRUPTS(CLBR_NONE)
14172- SAVE_ARGS 8,0
14173 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14174 movq %rcx,RIP-ARGOFFSET(%rsp)
14175 CFI_REL_OFFSET rip,RIP-ARGOFFSET
14176@@ -484,7 +771,7 @@ ENTRY(system_call_after_swapgs)
14177 system_call_fastpath:
14178 cmpq $__NR_syscall_max,%rax
14179 ja badsys
14180- movq %r10,%rcx
14181+ movq R10-ARGOFFSET(%rsp),%rcx
14182 call *sys_call_table(,%rax,8) # XXX: rip relative
14183 movq %rax,RAX-ARGOFFSET(%rsp)
14184 /*
14185@@ -503,6 +790,8 @@ sysret_check:
14186 andl %edi,%edx
14187 jnz sysret_careful
14188 CFI_REMEMBER_STATE
14189+ pax_exit_kernel_user
14190+ pax_erase_kstack
14191 /*
14192 * sysretq will re-enable interrupts:
14193 */
14194@@ -554,14 +843,18 @@ badsys:
14195 * jump back to the normal fast path.
14196 */
14197 auditsys:
14198- movq %r10,%r9 /* 6th arg: 4th syscall arg */
14199+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
14200 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
14201 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
14202 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
14203 movq %rax,%rsi /* 2nd arg: syscall number */
14204 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
14205 call audit_syscall_entry
14206+
14207+ pax_erase_kstack
14208+
14209 LOAD_ARGS 0 /* reload call-clobbered registers */
14210+ pax_set_fptr_mask
14211 jmp system_call_fastpath
14212
14213 /*
14214@@ -591,16 +884,20 @@ tracesys:
14215 FIXUP_TOP_OF_STACK %rdi
14216 movq %rsp,%rdi
14217 call syscall_trace_enter
14218+
14219+ pax_erase_kstack
14220+
14221 /*
14222 * Reload arg registers from stack in case ptrace changed them.
14223 * We don't reload %rax because syscall_trace_enter() returned
14224 * the value it wants us to use in the table lookup.
14225 */
14226 LOAD_ARGS ARGOFFSET, 1
14227+ pax_set_fptr_mask
14228 RESTORE_REST
14229 cmpq $__NR_syscall_max,%rax
14230 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
14231- movq %r10,%rcx /* fixup for C */
14232+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
14233 call *sys_call_table(,%rax,8)
14234 movq %rax,RAX-ARGOFFSET(%rsp)
14235 /* Use IRET because user could have changed frame */
14236@@ -612,7 +909,7 @@ tracesys:
14237 GLOBAL(int_ret_from_sys_call)
14238 DISABLE_INTERRUPTS(CLBR_NONE)
14239 TRACE_IRQS_OFF
14240- testl $3,CS-ARGOFFSET(%rsp)
14241+ testb $3,CS-ARGOFFSET(%rsp)
14242 je retint_restore_args
14243 movl $_TIF_ALLWORK_MASK,%edi
14244 /* edi: mask to check */
14245@@ -623,6 +920,7 @@ GLOBAL(int_with_check)
14246 andl %edi,%edx
14247 jnz int_careful
14248 andl $~TS_COMPAT,TI_status(%rcx)
14249+ pax_erase_kstack
14250 jmp retint_swapgs
14251
14252 /* Either reschedule or signal or syscall exit tracking needed. */
14253@@ -669,7 +967,7 @@ int_restore_rest:
14254 TRACE_IRQS_OFF
14255 jmp int_with_check
14256 CFI_ENDPROC
14257-END(system_call)
14258+ENDPROC(system_call)
14259
14260 /*
14261 * Certain special system calls that need to save a complete full stack frame.
14262@@ -685,7 +983,7 @@ ENTRY(\label)
14263 call \func
14264 jmp ptregscall_common
14265 CFI_ENDPROC
14266-END(\label)
14267+ENDPROC(\label)
14268 .endm
14269
14270 PTREGSCALL stub_clone, sys_clone, %r8
14271@@ -703,9 +1001,10 @@ ENTRY(ptregscall_common)
14272 movq_cfi_restore R12+8, r12
14273 movq_cfi_restore RBP+8, rbp
14274 movq_cfi_restore RBX+8, rbx
14275+ pax_force_retaddr
14276 ret $REST_SKIP /* pop extended registers */
14277 CFI_ENDPROC
14278-END(ptregscall_common)
14279+ENDPROC(ptregscall_common)
14280
14281 ENTRY(stub_execve)
14282 CFI_STARTPROC
14283@@ -720,7 +1019,7 @@ ENTRY(stub_execve)
14284 RESTORE_REST
14285 jmp int_ret_from_sys_call
14286 CFI_ENDPROC
14287-END(stub_execve)
14288+ENDPROC(stub_execve)
14289
14290 /*
14291 * sigreturn is special because it needs to restore all registers on return.
14292@@ -738,7 +1037,7 @@ ENTRY(stub_rt_sigreturn)
14293 RESTORE_REST
14294 jmp int_ret_from_sys_call
14295 CFI_ENDPROC
14296-END(stub_rt_sigreturn)
14297+ENDPROC(stub_rt_sigreturn)
14298
14299 /*
14300 * Build the entry stubs and pointer table with some assembler magic.
14301@@ -773,7 +1072,7 @@ vector=vector+1
14302 2: jmp common_interrupt
14303 .endr
14304 CFI_ENDPROC
14305-END(irq_entries_start)
14306+ENDPROC(irq_entries_start)
14307
14308 .previous
14309 END(interrupt)
14310@@ -793,6 +1092,16 @@ END(interrupt)
14311 subq $ORIG_RAX-RBP, %rsp
14312 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
14313 SAVE_ARGS_IRQ
14314+#ifdef CONFIG_PAX_MEMORY_UDEREF
14315+ testb $3, CS(%rdi)
14316+ jnz 1f
14317+ pax_enter_kernel
14318+ jmp 2f
14319+1: pax_enter_kernel_user
14320+2:
14321+#else
14322+ pax_enter_kernel
14323+#endif
14324 call \func
14325 .endm
14326
14327@@ -824,7 +1133,7 @@ ret_from_intr:
14328
14329 exit_intr:
14330 GET_THREAD_INFO(%rcx)
14331- testl $3,CS-ARGOFFSET(%rsp)
14332+ testb $3,CS-ARGOFFSET(%rsp)
14333 je retint_kernel
14334
14335 /* Interrupt came from user space */
14336@@ -846,12 +1155,15 @@ retint_swapgs: /* return to user-space */
14337 * The iretq could re-enable interrupts:
14338 */
14339 DISABLE_INTERRUPTS(CLBR_ANY)
14340+ pax_exit_kernel_user
14341 TRACE_IRQS_IRETQ
14342 SWAPGS
14343 jmp restore_args
14344
14345 retint_restore_args: /* return to kernel space */
14346 DISABLE_INTERRUPTS(CLBR_ANY)
14347+ pax_exit_kernel
14348+ pax_force_retaddr RIP-ARGOFFSET
14349 /*
14350 * The iretq could re-enable interrupts:
14351 */
14352@@ -940,7 +1252,7 @@ ENTRY(retint_kernel)
14353 #endif
14354
14355 CFI_ENDPROC
14356-END(common_interrupt)
14357+ENDPROC(common_interrupt)
14358 /*
14359 * End of kprobes section
14360 */
14361@@ -956,7 +1268,7 @@ ENTRY(\sym)
14362 interrupt \do_sym
14363 jmp ret_from_intr
14364 CFI_ENDPROC
14365-END(\sym)
14366+ENDPROC(\sym)
14367 .endm
14368
14369 #ifdef CONFIG_SMP
14370@@ -1021,12 +1333,22 @@ ENTRY(\sym)
14371 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14372 call error_entry
14373 DEFAULT_FRAME 0
14374+#ifdef CONFIG_PAX_MEMORY_UDEREF
14375+ testb $3, CS(%rsp)
14376+ jnz 1f
14377+ pax_enter_kernel
14378+ jmp 2f
14379+1: pax_enter_kernel_user
14380+2:
14381+#else
14382+ pax_enter_kernel
14383+#endif
14384 movq %rsp,%rdi /* pt_regs pointer */
14385 xorl %esi,%esi /* no error code */
14386 call \do_sym
14387 jmp error_exit /* %ebx: no swapgs flag */
14388 CFI_ENDPROC
14389-END(\sym)
14390+ENDPROC(\sym)
14391 .endm
14392
14393 .macro paranoidzeroentry sym do_sym
14394@@ -1038,15 +1360,25 @@ ENTRY(\sym)
14395 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14396 call save_paranoid
14397 TRACE_IRQS_OFF
14398+#ifdef CONFIG_PAX_MEMORY_UDEREF
14399+ testb $3, CS(%rsp)
14400+ jnz 1f
14401+ pax_enter_kernel
14402+ jmp 2f
14403+1: pax_enter_kernel_user
14404+2:
14405+#else
14406+ pax_enter_kernel
14407+#endif
14408 movq %rsp,%rdi /* pt_regs pointer */
14409 xorl %esi,%esi /* no error code */
14410 call \do_sym
14411 jmp paranoid_exit /* %ebx: no swapgs flag */
14412 CFI_ENDPROC
14413-END(\sym)
14414+ENDPROC(\sym)
14415 .endm
14416
14417-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
14418+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
14419 .macro paranoidzeroentry_ist sym do_sym ist
14420 ENTRY(\sym)
14421 INTR_FRAME
14422@@ -1056,14 +1388,30 @@ ENTRY(\sym)
14423 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14424 call save_paranoid
14425 TRACE_IRQS_OFF
14426+#ifdef CONFIG_PAX_MEMORY_UDEREF
14427+ testb $3, CS(%rsp)
14428+ jnz 1f
14429+ pax_enter_kernel
14430+ jmp 2f
14431+1: pax_enter_kernel_user
14432+2:
14433+#else
14434+ pax_enter_kernel
14435+#endif
14436 movq %rsp,%rdi /* pt_regs pointer */
14437 xorl %esi,%esi /* no error code */
14438+#ifdef CONFIG_SMP
14439+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
14440+ lea init_tss(%r12), %r12
14441+#else
14442+ lea init_tss(%rip), %r12
14443+#endif
14444 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14445 call \do_sym
14446 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14447 jmp paranoid_exit /* %ebx: no swapgs flag */
14448 CFI_ENDPROC
14449-END(\sym)
14450+ENDPROC(\sym)
14451 .endm
14452
14453 .macro errorentry sym do_sym
14454@@ -1074,13 +1422,23 @@ ENTRY(\sym)
14455 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14456 call error_entry
14457 DEFAULT_FRAME 0
14458+#ifdef CONFIG_PAX_MEMORY_UDEREF
14459+ testb $3, CS(%rsp)
14460+ jnz 1f
14461+ pax_enter_kernel
14462+ jmp 2f
14463+1: pax_enter_kernel_user
14464+2:
14465+#else
14466+ pax_enter_kernel
14467+#endif
14468 movq %rsp,%rdi /* pt_regs pointer */
14469 movq ORIG_RAX(%rsp),%rsi /* get error code */
14470 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14471 call \do_sym
14472 jmp error_exit /* %ebx: no swapgs flag */
14473 CFI_ENDPROC
14474-END(\sym)
14475+ENDPROC(\sym)
14476 .endm
14477
14478 /* error code is on the stack already */
14479@@ -1093,13 +1451,23 @@ ENTRY(\sym)
14480 call save_paranoid
14481 DEFAULT_FRAME 0
14482 TRACE_IRQS_OFF
14483+#ifdef CONFIG_PAX_MEMORY_UDEREF
14484+ testb $3, CS(%rsp)
14485+ jnz 1f
14486+ pax_enter_kernel
14487+ jmp 2f
14488+1: pax_enter_kernel_user
14489+2:
14490+#else
14491+ pax_enter_kernel
14492+#endif
14493 movq %rsp,%rdi /* pt_regs pointer */
14494 movq ORIG_RAX(%rsp),%rsi /* get error code */
14495 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14496 call \do_sym
14497 jmp paranoid_exit /* %ebx: no swapgs flag */
14498 CFI_ENDPROC
14499-END(\sym)
14500+ENDPROC(\sym)
14501 .endm
14502
14503 zeroentry divide_error do_divide_error
14504@@ -1129,9 +1497,10 @@ gs_change:
14505 2: mfence /* workaround */
14506 SWAPGS
14507 popfq_cfi
14508+ pax_force_retaddr
14509 ret
14510 CFI_ENDPROC
14511-END(native_load_gs_index)
14512+ENDPROC(native_load_gs_index)
14513
14514 .section __ex_table,"a"
14515 .align 8
14516@@ -1153,13 +1522,14 @@ ENTRY(kernel_thread_helper)
14517 * Here we are in the child and the registers are set as they were
14518 * at kernel_thread() invocation in the parent.
14519 */
14520+ pax_force_fptr %rsi
14521 call *%rsi
14522 # exit
14523 mov %eax, %edi
14524 call do_exit
14525 ud2 # padding for call trace
14526 CFI_ENDPROC
14527-END(kernel_thread_helper)
14528+ENDPROC(kernel_thread_helper)
14529
14530 /*
14531 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
14532@@ -1186,11 +1556,11 @@ ENTRY(kernel_execve)
14533 RESTORE_REST
14534 testq %rax,%rax
14535 je int_ret_from_sys_call
14536- RESTORE_ARGS
14537 UNFAKE_STACK_FRAME
14538+ pax_force_retaddr
14539 ret
14540 CFI_ENDPROC
14541-END(kernel_execve)
14542+ENDPROC(kernel_execve)
14543
14544 /* Call softirq on interrupt stack. Interrupts are off. */
14545 ENTRY(call_softirq)
14546@@ -1208,9 +1578,10 @@ ENTRY(call_softirq)
14547 CFI_DEF_CFA_REGISTER rsp
14548 CFI_ADJUST_CFA_OFFSET -8
14549 decl PER_CPU_VAR(irq_count)
14550+ pax_force_retaddr
14551 ret
14552 CFI_ENDPROC
14553-END(call_softirq)
14554+ENDPROC(call_softirq)
14555
14556 #ifdef CONFIG_XEN
14557 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
14558@@ -1248,7 +1619,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
14559 decl PER_CPU_VAR(irq_count)
14560 jmp error_exit
14561 CFI_ENDPROC
14562-END(xen_do_hypervisor_callback)
14563+ENDPROC(xen_do_hypervisor_callback)
14564
14565 /*
14566 * Hypervisor uses this for application faults while it executes.
14567@@ -1307,7 +1678,7 @@ ENTRY(xen_failsafe_callback)
14568 SAVE_ALL
14569 jmp error_exit
14570 CFI_ENDPROC
14571-END(xen_failsafe_callback)
14572+ENDPROC(xen_failsafe_callback)
14573
14574 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
14575 xen_hvm_callback_vector xen_evtchn_do_upcall
14576@@ -1356,16 +1727,31 @@ ENTRY(paranoid_exit)
14577 TRACE_IRQS_OFF
14578 testl %ebx,%ebx /* swapgs needed? */
14579 jnz paranoid_restore
14580- testl $3,CS(%rsp)
14581+ testb $3,CS(%rsp)
14582 jnz paranoid_userspace
14583+#ifdef CONFIG_PAX_MEMORY_UDEREF
14584+ pax_exit_kernel
14585+ TRACE_IRQS_IRETQ 0
14586+ SWAPGS_UNSAFE_STACK
14587+ RESTORE_ALL 8
14588+ pax_force_retaddr_bts
14589+ jmp irq_return
14590+#endif
14591 paranoid_swapgs:
14592+#ifdef CONFIG_PAX_MEMORY_UDEREF
14593+ pax_exit_kernel_user
14594+#else
14595+ pax_exit_kernel
14596+#endif
14597 TRACE_IRQS_IRETQ 0
14598 SWAPGS_UNSAFE_STACK
14599 RESTORE_ALL 8
14600 jmp irq_return
14601 paranoid_restore:
14602+ pax_exit_kernel
14603 TRACE_IRQS_IRETQ 0
14604 RESTORE_ALL 8
14605+ pax_force_retaddr_bts
14606 jmp irq_return
14607 paranoid_userspace:
14608 GET_THREAD_INFO(%rcx)
14609@@ -1394,7 +1780,7 @@ paranoid_schedule:
14610 TRACE_IRQS_OFF
14611 jmp paranoid_userspace
14612 CFI_ENDPROC
14613-END(paranoid_exit)
14614+ENDPROC(paranoid_exit)
14615
14616 /*
14617 * Exception entry point. This expects an error code/orig_rax on the stack.
14618@@ -1421,12 +1807,13 @@ ENTRY(error_entry)
14619 movq_cfi r14, R14+8
14620 movq_cfi r15, R15+8
14621 xorl %ebx,%ebx
14622- testl $3,CS+8(%rsp)
14623+ testb $3,CS+8(%rsp)
14624 je error_kernelspace
14625 error_swapgs:
14626 SWAPGS
14627 error_sti:
14628 TRACE_IRQS_OFF
14629+ pax_force_retaddr_bts
14630 ret
14631
14632 /*
14633@@ -1453,7 +1840,7 @@ bstep_iret:
14634 movq %rcx,RIP+8(%rsp)
14635 jmp error_swapgs
14636 CFI_ENDPROC
14637-END(error_entry)
14638+ENDPROC(error_entry)
14639
14640
14641 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
14642@@ -1473,7 +1860,7 @@ ENTRY(error_exit)
14643 jnz retint_careful
14644 jmp retint_swapgs
14645 CFI_ENDPROC
14646-END(error_exit)
14647+ENDPROC(error_exit)
14648
14649
14650 /* runs on exception stack */
14651@@ -1485,6 +1872,16 @@ ENTRY(nmi)
14652 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14653 call save_paranoid
14654 DEFAULT_FRAME 0
14655+#ifdef CONFIG_PAX_MEMORY_UDEREF
14656+ testb $3, CS(%rsp)
14657+ jnz 1f
14658+ pax_enter_kernel
14659+ jmp 2f
14660+1: pax_enter_kernel_user
14661+2:
14662+#else
14663+ pax_enter_kernel
14664+#endif
14665 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14666 movq %rsp,%rdi
14667 movq $-1,%rsi
14668@@ -1495,12 +1892,28 @@ ENTRY(nmi)
14669 DISABLE_INTERRUPTS(CLBR_NONE)
14670 testl %ebx,%ebx /* swapgs needed? */
14671 jnz nmi_restore
14672- testl $3,CS(%rsp)
14673+ testb $3,CS(%rsp)
14674 jnz nmi_userspace
14675+#ifdef CONFIG_PAX_MEMORY_UDEREF
14676+ pax_exit_kernel
14677+ SWAPGS_UNSAFE_STACK
14678+ RESTORE_ALL 8
14679+ pax_force_retaddr_bts
14680+ jmp irq_return
14681+#endif
14682 nmi_swapgs:
14683+#ifdef CONFIG_PAX_MEMORY_UDEREF
14684+ pax_exit_kernel_user
14685+#else
14686+ pax_exit_kernel
14687+#endif
14688 SWAPGS_UNSAFE_STACK
14689+ RESTORE_ALL 8
14690+ jmp irq_return
14691 nmi_restore:
14692+ pax_exit_kernel
14693 RESTORE_ALL 8
14694+ pax_force_retaddr_bts
14695 jmp irq_return
14696 nmi_userspace:
14697 GET_THREAD_INFO(%rcx)
14698@@ -1529,14 +1942,14 @@ nmi_schedule:
14699 jmp paranoid_exit
14700 CFI_ENDPROC
14701 #endif
14702-END(nmi)
14703+ENDPROC(nmi)
14704
14705 ENTRY(ignore_sysret)
14706 CFI_STARTPROC
14707 mov $-ENOSYS,%eax
14708 sysret
14709 CFI_ENDPROC
14710-END(ignore_sysret)
14711+ENDPROC(ignore_sysret)
14712
14713 /*
14714 * End of kprobes section
14715diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
14716index c9a281f..ce2f317 100644
14717--- a/arch/x86/kernel/ftrace.c
14718+++ b/arch/x86/kernel/ftrace.c
14719@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
14720 static const void *mod_code_newcode; /* holds the text to write to the IP */
14721
14722 static unsigned nmi_wait_count;
14723-static atomic_t nmi_update_count = ATOMIC_INIT(0);
14724+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14725
14726 int ftrace_arch_read_dyn_info(char *buf, int size)
14727 {
14728@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
14729
14730 r = snprintf(buf, size, "%u %u",
14731 nmi_wait_count,
14732- atomic_read(&nmi_update_count));
14733+ atomic_read_unchecked(&nmi_update_count));
14734 return r;
14735 }
14736
14737@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
14738
14739 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14740 smp_rmb();
14741+ pax_open_kernel();
14742 ftrace_mod_code();
14743- atomic_inc(&nmi_update_count);
14744+ pax_close_kernel();
14745+ atomic_inc_unchecked(&nmi_update_count);
14746 }
14747 /* Must have previous changes seen before executions */
14748 smp_mb();
14749@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
14750 {
14751 unsigned char replaced[MCOUNT_INSN_SIZE];
14752
14753+ ip = ktla_ktva(ip);
14754+
14755 /*
14756 * Note: Due to modules and __init, code can
14757 * disappear and change, we need to protect against faulting
14758@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
14759 unsigned char old[MCOUNT_INSN_SIZE], *new;
14760 int ret;
14761
14762- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14763+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14764 new = ftrace_call_replace(ip, (unsigned long)func);
14765 ret = ftrace_modify_code(ip, old, new);
14766
14767@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
14768 {
14769 unsigned char code[MCOUNT_INSN_SIZE];
14770
14771+ ip = ktla_ktva(ip);
14772+
14773 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14774 return -EFAULT;
14775
14776diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
14777index 3bb0850..55a56f4 100644
14778--- a/arch/x86/kernel/head32.c
14779+++ b/arch/x86/kernel/head32.c
14780@@ -19,6 +19,7 @@
14781 #include <asm/io_apic.h>
14782 #include <asm/bios_ebda.h>
14783 #include <asm/tlbflush.h>
14784+#include <asm/boot.h>
14785
14786 static void __init i386_default_early_setup(void)
14787 {
14788@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
14789 {
14790 memblock_init();
14791
14792- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14793+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14794
14795 #ifdef CONFIG_BLK_DEV_INITRD
14796 /* Reserve INITRD */
14797diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
14798index ce0be7c..c41476e 100644
14799--- a/arch/x86/kernel/head_32.S
14800+++ b/arch/x86/kernel/head_32.S
14801@@ -25,6 +25,12 @@
14802 /* Physical address */
14803 #define pa(X) ((X) - __PAGE_OFFSET)
14804
14805+#ifdef CONFIG_PAX_KERNEXEC
14806+#define ta(X) (X)
14807+#else
14808+#define ta(X) ((X) - __PAGE_OFFSET)
14809+#endif
14810+
14811 /*
14812 * References to members of the new_cpu_data structure.
14813 */
14814@@ -54,11 +60,7 @@
14815 * and small than max_low_pfn, otherwise will waste some page table entries
14816 */
14817
14818-#if PTRS_PER_PMD > 1
14819-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14820-#else
14821-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14822-#endif
14823+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14824
14825 /* Number of possible pages in the lowmem region */
14826 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
14827@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
14828 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14829
14830 /*
14831+ * Real beginning of normal "text" segment
14832+ */
14833+ENTRY(stext)
14834+ENTRY(_stext)
14835+
14836+/*
14837 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14838 * %esi points to the real-mode code as a 32-bit pointer.
14839 * CS and DS must be 4 GB flat segments, but we don't depend on
14840@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14841 * can.
14842 */
14843 __HEAD
14844+
14845+#ifdef CONFIG_PAX_KERNEXEC
14846+ jmp startup_32
14847+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14848+.fill PAGE_SIZE-5,1,0xcc
14849+#endif
14850+
14851 ENTRY(startup_32)
14852 movl pa(stack_start),%ecx
14853
14854@@ -105,6 +120,57 @@ ENTRY(startup_32)
14855 2:
14856 leal -__PAGE_OFFSET(%ecx),%esp
14857
14858+#ifdef CONFIG_SMP
14859+ movl $pa(cpu_gdt_table),%edi
14860+ movl $__per_cpu_load,%eax
14861+ movw %ax,__KERNEL_PERCPU + 2(%edi)
14862+ rorl $16,%eax
14863+ movb %al,__KERNEL_PERCPU + 4(%edi)
14864+ movb %ah,__KERNEL_PERCPU + 7(%edi)
14865+ movl $__per_cpu_end - 1,%eax
14866+ subl $__per_cpu_start,%eax
14867+ movw %ax,__KERNEL_PERCPU + 0(%edi)
14868+#endif
14869+
14870+#ifdef CONFIG_PAX_MEMORY_UDEREF
14871+ movl $NR_CPUS,%ecx
14872+ movl $pa(cpu_gdt_table),%edi
14873+1:
14874+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14875+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14876+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14877+ addl $PAGE_SIZE_asm,%edi
14878+ loop 1b
14879+#endif
14880+
14881+#ifdef CONFIG_PAX_KERNEXEC
14882+ movl $pa(boot_gdt),%edi
14883+ movl $__LOAD_PHYSICAL_ADDR,%eax
14884+ movw %ax,__BOOT_CS + 2(%edi)
14885+ rorl $16,%eax
14886+ movb %al,__BOOT_CS + 4(%edi)
14887+ movb %ah,__BOOT_CS + 7(%edi)
14888+ rorl $16,%eax
14889+
14890+ ljmp $(__BOOT_CS),$1f
14891+1:
14892+
14893+ movl $NR_CPUS,%ecx
14894+ movl $pa(cpu_gdt_table),%edi
14895+ addl $__PAGE_OFFSET,%eax
14896+1:
14897+ movw %ax,__KERNEL_CS + 2(%edi)
14898+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14899+ rorl $16,%eax
14900+ movb %al,__KERNEL_CS + 4(%edi)
14901+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14902+ movb %ah,__KERNEL_CS + 7(%edi)
14903+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14904+ rorl $16,%eax
14905+ addl $PAGE_SIZE_asm,%edi
14906+ loop 1b
14907+#endif
14908+
14909 /*
14910 * Clear BSS first so that there are no surprises...
14911 */
14912@@ -195,8 +261,11 @@ ENTRY(startup_32)
14913 movl %eax, pa(max_pfn_mapped)
14914
14915 /* Do early initialization of the fixmap area */
14916- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14917- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
14918+#ifdef CONFIG_COMPAT_VDSO
14919+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
14920+#else
14921+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
14922+#endif
14923 #else /* Not PAE */
14924
14925 page_pde_offset = (__PAGE_OFFSET >> 20);
14926@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14927 movl %eax, pa(max_pfn_mapped)
14928
14929 /* Do early initialization of the fixmap area */
14930- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14931- movl %eax,pa(initial_page_table+0xffc)
14932+#ifdef CONFIG_COMPAT_VDSO
14933+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
14934+#else
14935+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
14936+#endif
14937 #endif
14938
14939 #ifdef CONFIG_PARAVIRT
14940@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14941 cmpl $num_subarch_entries, %eax
14942 jae bad_subarch
14943
14944- movl pa(subarch_entries)(,%eax,4), %eax
14945- subl $__PAGE_OFFSET, %eax
14946- jmp *%eax
14947+ jmp *pa(subarch_entries)(,%eax,4)
14948
14949 bad_subarch:
14950 WEAK(lguest_entry)
14951@@ -255,10 +325,10 @@ WEAK(xen_entry)
14952 __INITDATA
14953
14954 subarch_entries:
14955- .long default_entry /* normal x86/PC */
14956- .long lguest_entry /* lguest hypervisor */
14957- .long xen_entry /* Xen hypervisor */
14958- .long default_entry /* Moorestown MID */
14959+ .long ta(default_entry) /* normal x86/PC */
14960+ .long ta(lguest_entry) /* lguest hypervisor */
14961+ .long ta(xen_entry) /* Xen hypervisor */
14962+ .long ta(default_entry) /* Moorestown MID */
14963 num_subarch_entries = (. - subarch_entries) / 4
14964 .previous
14965 #else
14966@@ -312,6 +382,7 @@ default_entry:
14967 orl %edx,%eax
14968 movl %eax,%cr4
14969
14970+#ifdef CONFIG_X86_PAE
14971 testb $X86_CR4_PAE, %al # check if PAE is enabled
14972 jz 6f
14973
14974@@ -340,6 +411,9 @@ default_entry:
14975 /* Make changes effective */
14976 wrmsr
14977
14978+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14979+#endif
14980+
14981 6:
14982
14983 /*
14984@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
14985 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14986 movl %eax,%ss # after changing gdt.
14987
14988- movl $(__USER_DS),%eax # DS/ES contains default USER segment
14989+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14990 movl %eax,%ds
14991 movl %eax,%es
14992
14993@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
14994 */
14995 cmpb $0,ready
14996 jne 1f
14997- movl $gdt_page,%eax
14998+ movl $cpu_gdt_table,%eax
14999 movl $stack_canary,%ecx
15000+#ifdef CONFIG_SMP
15001+ addl $__per_cpu_load,%ecx
15002+#endif
15003 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
15004 shrl $16, %ecx
15005 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
15006 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
15007 1:
15008-#endif
15009 movl $(__KERNEL_STACK_CANARY),%eax
15010+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
15011+ movl $(__USER_DS),%eax
15012+#else
15013+ xorl %eax,%eax
15014+#endif
15015 movl %eax,%gs
15016
15017 xorl %eax,%eax # Clear LDT
15018@@ -558,22 +639,22 @@ early_page_fault:
15019 jmp early_fault
15020
15021 early_fault:
15022- cld
15023 #ifdef CONFIG_PRINTK
15024+ cmpl $1,%ss:early_recursion_flag
15025+ je hlt_loop
15026+ incl %ss:early_recursion_flag
15027+ cld
15028 pusha
15029 movl $(__KERNEL_DS),%eax
15030 movl %eax,%ds
15031 movl %eax,%es
15032- cmpl $2,early_recursion_flag
15033- je hlt_loop
15034- incl early_recursion_flag
15035 movl %cr2,%eax
15036 pushl %eax
15037 pushl %edx /* trapno */
15038 pushl $fault_msg
15039 call printk
15040+; call dump_stack
15041 #endif
15042- call dump_stack
15043 hlt_loop:
15044 hlt
15045 jmp hlt_loop
15046@@ -581,8 +662,11 @@ hlt_loop:
15047 /* This is the default interrupt "handler" :-) */
15048 ALIGN
15049 ignore_int:
15050- cld
15051 #ifdef CONFIG_PRINTK
15052+ cmpl $2,%ss:early_recursion_flag
15053+ je hlt_loop
15054+ incl %ss:early_recursion_flag
15055+ cld
15056 pushl %eax
15057 pushl %ecx
15058 pushl %edx
15059@@ -591,9 +675,6 @@ ignore_int:
15060 movl $(__KERNEL_DS),%eax
15061 movl %eax,%ds
15062 movl %eax,%es
15063- cmpl $2,early_recursion_flag
15064- je hlt_loop
15065- incl early_recursion_flag
15066 pushl 16(%esp)
15067 pushl 24(%esp)
15068 pushl 32(%esp)
15069@@ -622,29 +703,43 @@ ENTRY(initial_code)
15070 /*
15071 * BSS section
15072 */
15073-__PAGE_ALIGNED_BSS
15074- .align PAGE_SIZE
15075 #ifdef CONFIG_X86_PAE
15076+.section .initial_pg_pmd,"a",@progbits
15077 initial_pg_pmd:
15078 .fill 1024*KPMDS,4,0
15079 #else
15080+.section .initial_page_table,"a",@progbits
15081 ENTRY(initial_page_table)
15082 .fill 1024,4,0
15083 #endif
15084+.section .initial_pg_fixmap,"a",@progbits
15085 initial_pg_fixmap:
15086 .fill 1024,4,0
15087+.section .empty_zero_page,"a",@progbits
15088 ENTRY(empty_zero_page)
15089 .fill 4096,1,0
15090+.section .swapper_pg_dir,"a",@progbits
15091 ENTRY(swapper_pg_dir)
15092+#ifdef CONFIG_X86_PAE
15093+ .fill 4,8,0
15094+#else
15095 .fill 1024,4,0
15096+#endif
15097+
15098+/*
15099+ * The IDT has to be page-aligned to simplify the Pentium
15100+ * F0 0F bug workaround.. We have a special link segment
15101+ * for this.
15102+ */
15103+.section .idt,"a",@progbits
15104+ENTRY(idt_table)
15105+ .fill 256,8,0
15106
15107 /*
15108 * This starts the data section.
15109 */
15110 #ifdef CONFIG_X86_PAE
15111-__PAGE_ALIGNED_DATA
15112- /* Page-aligned for the benefit of paravirt? */
15113- .align PAGE_SIZE
15114+.section .initial_page_table,"a",@progbits
15115 ENTRY(initial_page_table)
15116 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
15117 # if KPMDS == 3
15118@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
15119 # error "Kernel PMDs should be 1, 2 or 3"
15120 # endif
15121 .align PAGE_SIZE /* needs to be page-sized too */
15122+
15123+#ifdef CONFIG_PAX_PER_CPU_PGD
15124+ENTRY(cpu_pgd)
15125+ .rept NR_CPUS
15126+ .fill 4,8,0
15127+ .endr
15128+#endif
15129+
15130 #endif
15131
15132 .data
15133 .balign 4
15134 ENTRY(stack_start)
15135- .long init_thread_union+THREAD_SIZE
15136+ .long init_thread_union+THREAD_SIZE-8
15137
15138+ready: .byte 0
15139+
15140+.section .rodata,"a",@progbits
15141 early_recursion_flag:
15142 .long 0
15143
15144-ready: .byte 0
15145-
15146 int_msg:
15147 .asciz "Unknown interrupt or fault at: %p %p %p\n"
15148
15149@@ -707,7 +811,7 @@ fault_msg:
15150 .word 0 # 32 bit align gdt_desc.address
15151 boot_gdt_descr:
15152 .word __BOOT_DS+7
15153- .long boot_gdt - __PAGE_OFFSET
15154+ .long pa(boot_gdt)
15155
15156 .word 0 # 32-bit align idt_desc.address
15157 idt_descr:
15158@@ -718,7 +822,7 @@ idt_descr:
15159 .word 0 # 32 bit align gdt_desc.address
15160 ENTRY(early_gdt_descr)
15161 .word GDT_ENTRIES*8-1
15162- .long gdt_page /* Overwritten for secondary CPUs */
15163+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
15164
15165 /*
15166 * The boot_gdt must mirror the equivalent in setup.S and is
15167@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
15168 .align L1_CACHE_BYTES
15169 ENTRY(boot_gdt)
15170 .fill GDT_ENTRY_BOOT_CS,8,0
15171- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
15172- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
15173+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
15174+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
15175+
15176+ .align PAGE_SIZE_asm
15177+ENTRY(cpu_gdt_table)
15178+ .rept NR_CPUS
15179+ .quad 0x0000000000000000 /* NULL descriptor */
15180+ .quad 0x0000000000000000 /* 0x0b reserved */
15181+ .quad 0x0000000000000000 /* 0x13 reserved */
15182+ .quad 0x0000000000000000 /* 0x1b reserved */
15183+
15184+#ifdef CONFIG_PAX_KERNEXEC
15185+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
15186+#else
15187+ .quad 0x0000000000000000 /* 0x20 unused */
15188+#endif
15189+
15190+ .quad 0x0000000000000000 /* 0x28 unused */
15191+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
15192+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
15193+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
15194+ .quad 0x0000000000000000 /* 0x4b reserved */
15195+ .quad 0x0000000000000000 /* 0x53 reserved */
15196+ .quad 0x0000000000000000 /* 0x5b reserved */
15197+
15198+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
15199+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
15200+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
15201+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
15202+
15203+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
15204+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
15205+
15206+ /*
15207+ * Segments used for calling PnP BIOS have byte granularity.
15208+ * The code segments and data segments have fixed 64k limits,
15209+ * the transfer segment sizes are set at run time.
15210+ */
15211+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
15212+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
15213+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
15214+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
15215+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
15216+
15217+ /*
15218+ * The APM segments have byte granularity and their bases
15219+ * are set at run time. All have 64k limits.
15220+ */
15221+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
15222+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
15223+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
15224+
15225+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
15226+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
15227+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
15228+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
15229+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
15230+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
15231+
15232+ /* Be sure this is zeroed to avoid false validations in Xen */
15233+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
15234+ .endr
15235diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
15236index e11e394..9aebc5d 100644
15237--- a/arch/x86/kernel/head_64.S
15238+++ b/arch/x86/kernel/head_64.S
15239@@ -19,6 +19,8 @@
15240 #include <asm/cache.h>
15241 #include <asm/processor-flags.h>
15242 #include <asm/percpu.h>
15243+#include <asm/cpufeature.h>
15244+#include <asm/alternative-asm.h>
15245
15246 #ifdef CONFIG_PARAVIRT
15247 #include <asm/asm-offsets.h>
15248@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
15249 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
15250 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
15251 L3_START_KERNEL = pud_index(__START_KERNEL_map)
15252+L4_VMALLOC_START = pgd_index(VMALLOC_START)
15253+L3_VMALLOC_START = pud_index(VMALLOC_START)
15254+L4_VMALLOC_END = pgd_index(VMALLOC_END)
15255+L3_VMALLOC_END = pud_index(VMALLOC_END)
15256+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
15257+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
15258
15259 .text
15260 __HEAD
15261@@ -85,35 +93,23 @@ startup_64:
15262 */
15263 addq %rbp, init_level4_pgt + 0(%rip)
15264 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
15265+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
15266+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
15267+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
15268 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
15269
15270 addq %rbp, level3_ident_pgt + 0(%rip)
15271+#ifndef CONFIG_XEN
15272+ addq %rbp, level3_ident_pgt + 8(%rip)
15273+#endif
15274
15275- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
15276- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
15277+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
15278+
15279+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
15280+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
15281
15282 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
15283-
15284- /* Add an Identity mapping if I am above 1G */
15285- leaq _text(%rip), %rdi
15286- andq $PMD_PAGE_MASK, %rdi
15287-
15288- movq %rdi, %rax
15289- shrq $PUD_SHIFT, %rax
15290- andq $(PTRS_PER_PUD - 1), %rax
15291- jz ident_complete
15292-
15293- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
15294- leaq level3_ident_pgt(%rip), %rbx
15295- movq %rdx, 0(%rbx, %rax, 8)
15296-
15297- movq %rdi, %rax
15298- shrq $PMD_SHIFT, %rax
15299- andq $(PTRS_PER_PMD - 1), %rax
15300- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
15301- leaq level2_spare_pgt(%rip), %rbx
15302- movq %rdx, 0(%rbx, %rax, 8)
15303-ident_complete:
15304+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
15305
15306 /*
15307 * Fixup the kernel text+data virtual addresses. Note that
15308@@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
15309 * after the boot processor executes this code.
15310 */
15311
15312- /* Enable PAE mode and PGE */
15313- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
15314+ /* Enable PAE mode and PSE/PGE */
15315+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15316 movq %rax, %cr4
15317
15318 /* Setup early boot stage 4 level pagetables. */
15319@@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
15320 movl $MSR_EFER, %ecx
15321 rdmsr
15322 btsl $_EFER_SCE, %eax /* Enable System Call */
15323- btl $20,%edi /* No Execute supported? */
15324+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
15325 jnc 1f
15326 btsl $_EFER_NX, %eax
15327+ leaq init_level4_pgt(%rip), %rdi
15328+#ifndef CONFIG_EFI
15329+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
15330+#endif
15331+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
15332+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
15333+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
15334+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
15335 1: wrmsr /* Make changes effective */
15336
15337 /* Setup cr0 */
15338@@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
15339 * jump. In addition we need to ensure %cs is set so we make this
15340 * a far return.
15341 */
15342+ pax_set_fptr_mask
15343 movq initial_code(%rip),%rax
15344 pushq $0 # fake return address to stop unwinder
15345 pushq $__KERNEL_CS # set correct cs
15346@@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
15347 bad_address:
15348 jmp bad_address
15349
15350- .section ".init.text","ax"
15351+ __INIT
15352 #ifdef CONFIG_EARLY_PRINTK
15353 .globl early_idt_handlers
15354 early_idt_handlers:
15355@@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
15356 #endif /* EARLY_PRINTK */
15357 1: hlt
15358 jmp 1b
15359+ .previous
15360
15361 #ifdef CONFIG_EARLY_PRINTK
15362+ __INITDATA
15363 early_recursion_flag:
15364 .long 0
15365+ .previous
15366
15367+ .section .rodata,"a",@progbits
15368 early_idt_msg:
15369 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
15370 early_idt_ripmsg:
15371 .asciz "RIP %s\n"
15372+ .previous
15373 #endif /* CONFIG_EARLY_PRINTK */
15374- .previous
15375
15376+ .section .rodata,"a",@progbits
15377 #define NEXT_PAGE(name) \
15378 .balign PAGE_SIZE; \
15379 ENTRY(name)
15380@@ -338,7 +348,6 @@ ENTRY(name)
15381 i = i + 1 ; \
15382 .endr
15383
15384- .data
15385 /*
15386 * This default setting generates an ident mapping at address 0x100000
15387 * and a mapping for the kernel that precisely maps virtual address
15388@@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
15389 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15390 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
15391 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15392+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
15393+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
15394+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
15395+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
15396+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
15397+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15398 .org init_level4_pgt + L4_START_KERNEL*8, 0
15399 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
15400 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
15401
15402+#ifdef CONFIG_PAX_PER_CPU_PGD
15403+NEXT_PAGE(cpu_pgd)
15404+ .rept NR_CPUS
15405+ .fill 512,8,0
15406+ .endr
15407+#endif
15408+
15409 NEXT_PAGE(level3_ident_pgt)
15410 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15411+#ifdef CONFIG_XEN
15412 .fill 511,8,0
15413+#else
15414+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
15415+ .fill 510,8,0
15416+#endif
15417+
15418+NEXT_PAGE(level3_vmalloc_start_pgt)
15419+ .fill 512,8,0
15420+
15421+NEXT_PAGE(level3_vmalloc_end_pgt)
15422+ .fill 512,8,0
15423+
15424+NEXT_PAGE(level3_vmemmap_pgt)
15425+ .fill L3_VMEMMAP_START,8,0
15426+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15427
15428 NEXT_PAGE(level3_kernel_pgt)
15429 .fill L3_START_KERNEL,8,0
15430@@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
15431 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
15432 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15433
15434+NEXT_PAGE(level2_vmemmap_pgt)
15435+ .fill 512,8,0
15436+
15437 NEXT_PAGE(level2_fixmap_pgt)
15438- .fill 506,8,0
15439- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15440- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
15441- .fill 5,8,0
15442+ .fill 507,8,0
15443+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
15444+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
15445+ .fill 4,8,0
15446
15447-NEXT_PAGE(level1_fixmap_pgt)
15448+NEXT_PAGE(level1_vsyscall_pgt)
15449 .fill 512,8,0
15450
15451-NEXT_PAGE(level2_ident_pgt)
15452- /* Since I easily can, map the first 1G.
15453+ /* Since I easily can, map the first 2G.
15454 * Don't set NX because code runs from these pages.
15455 */
15456- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
15457+NEXT_PAGE(level2_ident_pgt)
15458+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
15459
15460 NEXT_PAGE(level2_kernel_pgt)
15461 /*
15462@@ -389,33 +429,55 @@ NEXT_PAGE(level2_kernel_pgt)
15463 * If you want to increase this then increase MODULES_VADDR
15464 * too.)
15465 */
15466- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
15467- KERNEL_IMAGE_SIZE/PMD_SIZE)
15468-
15469-NEXT_PAGE(level2_spare_pgt)
15470- .fill 512, 8, 0
15471+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15472
15473 #undef PMDS
15474 #undef NEXT_PAGE
15475
15476- .data
15477+ .align PAGE_SIZE
15478+ENTRY(cpu_gdt_table)
15479+ .rept NR_CPUS
15480+ .quad 0x0000000000000000 /* NULL descriptor */
15481+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15482+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
15483+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
15484+ .quad 0x00cffb000000ffff /* __USER32_CS */
15485+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15486+ .quad 0x00affb000000ffff /* __USER_CS */
15487+
15488+#ifdef CONFIG_PAX_KERNEXEC
15489+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15490+#else
15491+ .quad 0x0 /* unused */
15492+#endif
15493+
15494+ .quad 0,0 /* TSS */
15495+ .quad 0,0 /* LDT */
15496+ .quad 0,0,0 /* three TLS descriptors */
15497+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
15498+ /* asm/segment.h:GDT_ENTRIES must match this */
15499+
15500+ /* zero the remaining page */
15501+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15502+ .endr
15503+
15504 .align 16
15505 .globl early_gdt_descr
15506 early_gdt_descr:
15507 .word GDT_ENTRIES*8-1
15508 early_gdt_descr_base:
15509- .quad INIT_PER_CPU_VAR(gdt_page)
15510+ .quad cpu_gdt_table
15511
15512 ENTRY(phys_base)
15513 /* This must match the first entry in level2_kernel_pgt */
15514 .quad 0x0000000000000000
15515
15516 #include "../../x86/xen/xen-head.S"
15517-
15518- .section .bss, "aw", @nobits
15519+
15520+ .section .rodata,"a",@progbits
15521 .align L1_CACHE_BYTES
15522 ENTRY(idt_table)
15523- .skip IDT_ENTRIES * 16
15524+ .fill 512,8,0
15525
15526 __PAGE_ALIGNED_BSS
15527 .align PAGE_SIZE
15528diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
15529index 9c3bd4a..e1d9b35 100644
15530--- a/arch/x86/kernel/i386_ksyms_32.c
15531+++ b/arch/x86/kernel/i386_ksyms_32.c
15532@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15533 EXPORT_SYMBOL(cmpxchg8b_emu);
15534 #endif
15535
15536+EXPORT_SYMBOL_GPL(cpu_gdt_table);
15537+
15538 /* Networking helper routines. */
15539 EXPORT_SYMBOL(csum_partial_copy_generic);
15540+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15541+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15542
15543 EXPORT_SYMBOL(__get_user_1);
15544 EXPORT_SYMBOL(__get_user_2);
15545@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15546
15547 EXPORT_SYMBOL(csum_partial);
15548 EXPORT_SYMBOL(empty_zero_page);
15549+
15550+#ifdef CONFIG_PAX_KERNEXEC
15551+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15552+#endif
15553diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
15554index 6104852..6114160 100644
15555--- a/arch/x86/kernel/i8259.c
15556+++ b/arch/x86/kernel/i8259.c
15557@@ -210,7 +210,7 @@ spurious_8259A_irq:
15558 "spurious 8259A interrupt: IRQ%d.\n", irq);
15559 spurious_irq_mask |= irqmask;
15560 }
15561- atomic_inc(&irq_err_count);
15562+ atomic_inc_unchecked(&irq_err_count);
15563 /*
15564 * Theoretically we do not have to handle this IRQ,
15565 * but in Linux this does not cause problems and is
15566diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
15567index 43e9ccf..44ccf6f 100644
15568--- a/arch/x86/kernel/init_task.c
15569+++ b/arch/x86/kernel/init_task.c
15570@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
15571 * way process stacks are handled. This is done by having a special
15572 * "init_task" linker map entry..
15573 */
15574-union thread_union init_thread_union __init_task_data =
15575- { INIT_THREAD_INFO(init_task) };
15576+union thread_union init_thread_union __init_task_data;
15577
15578 /*
15579 * Initial task structure.
15580@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15581 * section. Since TSS's are completely CPU-local, we want them
15582 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15583 */
15584-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15585-
15586+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15587+EXPORT_SYMBOL(init_tss);
15588diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
15589index 8c96897..be66bfa 100644
15590--- a/arch/x86/kernel/ioport.c
15591+++ b/arch/x86/kernel/ioport.c
15592@@ -6,6 +6,7 @@
15593 #include <linux/sched.h>
15594 #include <linux/kernel.h>
15595 #include <linux/capability.h>
15596+#include <linux/security.h>
15597 #include <linux/errno.h>
15598 #include <linux/types.h>
15599 #include <linux/ioport.h>
15600@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15601
15602 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15603 return -EINVAL;
15604+#ifdef CONFIG_GRKERNSEC_IO
15605+ if (turn_on && grsec_disable_privio) {
15606+ gr_handle_ioperm();
15607+ return -EPERM;
15608+ }
15609+#endif
15610 if (turn_on && !capable(CAP_SYS_RAWIO))
15611 return -EPERM;
15612
15613@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15614 * because the ->io_bitmap_max value must match the bitmap
15615 * contents:
15616 */
15617- tss = &per_cpu(init_tss, get_cpu());
15618+ tss = init_tss + get_cpu();
15619
15620 if (turn_on)
15621 bitmap_clear(t->io_bitmap_ptr, from, num);
15622@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
15623 return -EINVAL;
15624 /* Trying to gain more privileges? */
15625 if (level > old) {
15626+#ifdef CONFIG_GRKERNSEC_IO
15627+ if (grsec_disable_privio) {
15628+ gr_handle_iopl();
15629+ return -EPERM;
15630+ }
15631+#endif
15632 if (!capable(CAP_SYS_RAWIO))
15633 return -EPERM;
15634 }
15635diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
15636index 429e0c9..17b3ece 100644
15637--- a/arch/x86/kernel/irq.c
15638+++ b/arch/x86/kernel/irq.c
15639@@ -18,7 +18,7 @@
15640 #include <asm/mce.h>
15641 #include <asm/hw_irq.h>
15642
15643-atomic_t irq_err_count;
15644+atomic_unchecked_t irq_err_count;
15645
15646 /* Function pointer for generic interrupt vector handling */
15647 void (*x86_platform_ipi_callback)(void) = NULL;
15648@@ -117,9 +117,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
15649 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15650 seq_printf(p, " Machine check polls\n");
15651 #endif
15652- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15653+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15654 #if defined(CONFIG_X86_IO_APIC)
15655- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15656+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15657 #endif
15658 return 0;
15659 }
15660@@ -159,10 +159,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15661
15662 u64 arch_irq_stat(void)
15663 {
15664- u64 sum = atomic_read(&irq_err_count);
15665+ u64 sum = atomic_read_unchecked(&irq_err_count);
15666
15667 #ifdef CONFIG_X86_IO_APIC
15668- sum += atomic_read(&irq_mis_count);
15669+ sum += atomic_read_unchecked(&irq_mis_count);
15670 #endif
15671 return sum;
15672 }
15673diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
15674index 7209070..cbcd71a 100644
15675--- a/arch/x86/kernel/irq_32.c
15676+++ b/arch/x86/kernel/irq_32.c
15677@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
15678 __asm__ __volatile__("andl %%esp,%0" :
15679 "=r" (sp) : "0" (THREAD_SIZE - 1));
15680
15681- return sp < (sizeof(struct thread_info) + STACK_WARN);
15682+ return sp < STACK_WARN;
15683 }
15684
15685 static void print_stack_overflow(void)
15686@@ -54,8 +54,8 @@ static inline void print_stack_overflow(void) { }
15687 * per-CPU IRQ handling contexts (thread information and stack)
15688 */
15689 union irq_ctx {
15690- struct thread_info tinfo;
15691- u32 stack[THREAD_SIZE/sizeof(u32)];
15692+ unsigned long previous_esp;
15693+ u32 stack[THREAD_SIZE/sizeof(u32)];
15694 } __attribute__((aligned(THREAD_SIZE)));
15695
15696 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15697@@ -75,10 +75,9 @@ static void call_on_stack(void *func, void *stack)
15698 static inline int
15699 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15700 {
15701- union irq_ctx *curctx, *irqctx;
15702+ union irq_ctx *irqctx;
15703 u32 *isp, arg1, arg2;
15704
15705- curctx = (union irq_ctx *) current_thread_info();
15706 irqctx = __this_cpu_read(hardirq_ctx);
15707
15708 /*
15709@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15710 * handler) we can't do that and just have to keep using the
15711 * current stack (which is the irq stack already after all)
15712 */
15713- if (unlikely(curctx == irqctx))
15714+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15715 return 0;
15716
15717 /* build the stack frame on the IRQ stack */
15718- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15719- irqctx->tinfo.task = curctx->tinfo.task;
15720- irqctx->tinfo.previous_esp = current_stack_pointer;
15721+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15722+ irqctx->previous_esp = current_stack_pointer;
15723
15724- /*
15725- * Copy the softirq bits in preempt_count so that the
15726- * softirq checks work in the hardirq context.
15727- */
15728- irqctx->tinfo.preempt_count =
15729- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15730- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15731+#ifdef CONFIG_PAX_MEMORY_UDEREF
15732+ __set_fs(MAKE_MM_SEG(0));
15733+#endif
15734
15735 if (unlikely(overflow))
15736 call_on_stack(print_stack_overflow, isp);
15737@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15738 : "0" (irq), "1" (desc), "2" (isp),
15739 "D" (desc->handle_irq)
15740 : "memory", "cc", "ecx");
15741+
15742+#ifdef CONFIG_PAX_MEMORY_UDEREF
15743+ __set_fs(current_thread_info()->addr_limit);
15744+#endif
15745+
15746 return 1;
15747 }
15748
15749@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15750 */
15751 void __cpuinit irq_ctx_init(int cpu)
15752 {
15753- union irq_ctx *irqctx;
15754-
15755 if (per_cpu(hardirq_ctx, cpu))
15756 return;
15757
15758- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15759- THREAD_FLAGS,
15760- THREAD_ORDER));
15761- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15762- irqctx->tinfo.cpu = cpu;
15763- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15764- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15765-
15766- per_cpu(hardirq_ctx, cpu) = irqctx;
15767-
15768- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15769- THREAD_FLAGS,
15770- THREAD_ORDER));
15771- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15772- irqctx->tinfo.cpu = cpu;
15773- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15774-
15775- per_cpu(softirq_ctx, cpu) = irqctx;
15776+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15777+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15778
15779 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15780 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15781@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
15782 asmlinkage void do_softirq(void)
15783 {
15784 unsigned long flags;
15785- struct thread_info *curctx;
15786 union irq_ctx *irqctx;
15787 u32 *isp;
15788
15789@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
15790 local_irq_save(flags);
15791
15792 if (local_softirq_pending()) {
15793- curctx = current_thread_info();
15794 irqctx = __this_cpu_read(softirq_ctx);
15795- irqctx->tinfo.task = curctx->task;
15796- irqctx->tinfo.previous_esp = current_stack_pointer;
15797+ irqctx->previous_esp = current_stack_pointer;
15798
15799 /* build the stack frame on the softirq stack */
15800- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15801+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15802+
15803+#ifdef CONFIG_PAX_MEMORY_UDEREF
15804+ __set_fs(MAKE_MM_SEG(0));
15805+#endif
15806
15807 call_on_stack(__do_softirq, isp);
15808+
15809+#ifdef CONFIG_PAX_MEMORY_UDEREF
15810+ __set_fs(current_thread_info()->addr_limit);
15811+#endif
15812+
15813 /*
15814 * Shouldn't happen, we returned above if in_interrupt():
15815 */
15816diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
15817index 69bca46..0bac999 100644
15818--- a/arch/x86/kernel/irq_64.c
15819+++ b/arch/x86/kernel/irq_64.c
15820@@ -38,7 +38,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
15821 #ifdef CONFIG_DEBUG_STACKOVERFLOW
15822 u64 curbase = (u64)task_stack_page(current);
15823
15824- if (user_mode_vm(regs))
15825+ if (user_mode(regs))
15826 return;
15827
15828 WARN_ONCE(regs->sp >= curbase &&
15829diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
15830index faba577..93b9e71 100644
15831--- a/arch/x86/kernel/kgdb.c
15832+++ b/arch/x86/kernel/kgdb.c
15833@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
15834 #ifdef CONFIG_X86_32
15835 switch (regno) {
15836 case GDB_SS:
15837- if (!user_mode_vm(regs))
15838+ if (!user_mode(regs))
15839 *(unsigned long *)mem = __KERNEL_DS;
15840 break;
15841 case GDB_SP:
15842- if (!user_mode_vm(regs))
15843+ if (!user_mode(regs))
15844 *(unsigned long *)mem = kernel_stack_pointer(regs);
15845 break;
15846 case GDB_GS:
15847@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
15848 case 'k':
15849 /* clear the trace bit */
15850 linux_regs->flags &= ~X86_EFLAGS_TF;
15851- atomic_set(&kgdb_cpu_doing_single_step, -1);
15852+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15853
15854 /* set the trace bit if we're stepping */
15855 if (remcomInBuffer[0] == 's') {
15856 linux_regs->flags |= X86_EFLAGS_TF;
15857- atomic_set(&kgdb_cpu_doing_single_step,
15858+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15859 raw_smp_processor_id());
15860 }
15861
15862@@ -543,7 +543,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
15863
15864 switch (cmd) {
15865 case DIE_DEBUG:
15866- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
15867+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
15868 if (user_mode(regs))
15869 return single_step_cont(regs, args);
15870 break;
15871diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
15872index 7da647d..56fe348 100644
15873--- a/arch/x86/kernel/kprobes.c
15874+++ b/arch/x86/kernel/kprobes.c
15875@@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
15876 } __attribute__((packed)) *insn;
15877
15878 insn = (struct __arch_relative_insn *)from;
15879+
15880+ pax_open_kernel();
15881 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
15882 insn->op = op;
15883+ pax_close_kernel();
15884 }
15885
15886 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
15887@@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
15888 kprobe_opcode_t opcode;
15889 kprobe_opcode_t *orig_opcodes = opcodes;
15890
15891- if (search_exception_tables((unsigned long)opcodes))
15892+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15893 return 0; /* Page fault may occur on this address. */
15894
15895 retry:
15896@@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15897 }
15898 }
15899 insn_get_length(&insn);
15900+ pax_open_kernel();
15901 memcpy(dest, insn.kaddr, insn.length);
15902+ pax_close_kernel();
15903
15904 #ifdef CONFIG_X86_64
15905 if (insn_rip_relative(&insn)) {
15906@@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15907 (u8 *) dest;
15908 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
15909 disp = (u8 *) dest + insn_offset_displacement(&insn);
15910+ pax_open_kernel();
15911 *(s32 *) disp = (s32) newdisp;
15912+ pax_close_kernel();
15913 }
15914 #endif
15915 return insn.length;
15916@@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
15917 */
15918 __copy_instruction(p->ainsn.insn, p->addr, 0);
15919
15920- if (can_boost(p->addr))
15921+ if (can_boost(ktla_ktva(p->addr)))
15922 p->ainsn.boostable = 0;
15923 else
15924 p->ainsn.boostable = -1;
15925
15926- p->opcode = *p->addr;
15927+ p->opcode = *(ktla_ktva(p->addr));
15928 }
15929
15930 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15931@@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15932 * nor set current_kprobe, because it doesn't use single
15933 * stepping.
15934 */
15935- regs->ip = (unsigned long)p->ainsn.insn;
15936+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15937 preempt_enable_no_resched();
15938 return;
15939 }
15940@@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15941 if (p->opcode == BREAKPOINT_INSTRUCTION)
15942 regs->ip = (unsigned long)p->addr;
15943 else
15944- regs->ip = (unsigned long)p->ainsn.insn;
15945+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15946 }
15947
15948 /*
15949@@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
15950 setup_singlestep(p, regs, kcb, 0);
15951 return 1;
15952 }
15953- } else if (*addr != BREAKPOINT_INSTRUCTION) {
15954+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15955 /*
15956 * The breakpoint instruction was removed right
15957 * after we hit it. Another cpu has removed
15958@@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
15959 " movq %rax, 152(%rsp)\n"
15960 RESTORE_REGS_STRING
15961 " popfq\n"
15962+#ifdef KERNEXEC_PLUGIN
15963+ " btsq $63,(%rsp)\n"
15964+#endif
15965 #else
15966 " pushf\n"
15967 SAVE_REGS_STRING
15968@@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
15969 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15970 {
15971 unsigned long *tos = stack_addr(regs);
15972- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15973+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15974 unsigned long orig_ip = (unsigned long)p->addr;
15975 kprobe_opcode_t *insn = p->ainsn.insn;
15976
15977@@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
15978 struct die_args *args = data;
15979 int ret = NOTIFY_DONE;
15980
15981- if (args->regs && user_mode_vm(args->regs))
15982+ if (args->regs && user_mode(args->regs))
15983 return ret;
15984
15985 switch (val) {
15986@@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15987 * Verify if the address gap is in 2GB range, because this uses
15988 * a relative jump.
15989 */
15990- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
15991+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
15992 if (abs(rel) > 0x7fffffff)
15993 return -ERANGE;
15994
15995@@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15996 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
15997
15998 /* Set probe function call */
15999- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
16000+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
16001
16002 /* Set returning jmp instruction at the tail of out-of-line buffer */
16003 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
16004- (u8 *)op->kp.addr + op->optinsn.size);
16005+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
16006
16007 flush_icache_range((unsigned long) buf,
16008 (unsigned long) buf + TMPL_END_IDX +
16009@@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
16010 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
16011
16012 /* Backup instructions which will be replaced by jump address */
16013- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
16014+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
16015 RELATIVE_ADDR_SIZE);
16016
16017 insn_buf[0] = RELATIVEJUMP_OPCODE;
16018diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
16019index a9c2116..a52d4fc 100644
16020--- a/arch/x86/kernel/kvm.c
16021+++ b/arch/x86/kernel/kvm.c
16022@@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void)
16023 pv_mmu_ops.set_pud = kvm_set_pud;
16024 #if PAGETABLE_LEVELS == 4
16025 pv_mmu_ops.set_pgd = kvm_set_pgd;
16026+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
16027 #endif
16028 #endif
16029 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
16030diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
16031index ea69726..604d066 100644
16032--- a/arch/x86/kernel/ldt.c
16033+++ b/arch/x86/kernel/ldt.c
16034@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
16035 if (reload) {
16036 #ifdef CONFIG_SMP
16037 preempt_disable();
16038- load_LDT(pc);
16039+ load_LDT_nolock(pc);
16040 if (!cpumask_equal(mm_cpumask(current->mm),
16041 cpumask_of(smp_processor_id())))
16042 smp_call_function(flush_ldt, current->mm, 1);
16043 preempt_enable();
16044 #else
16045- load_LDT(pc);
16046+ load_LDT_nolock(pc);
16047 #endif
16048 }
16049 if (oldsize) {
16050@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
16051 return err;
16052
16053 for (i = 0; i < old->size; i++)
16054- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
16055+ write_ldt_entry(new->ldt, i, old->ldt + i);
16056 return 0;
16057 }
16058
16059@@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
16060 retval = copy_ldt(&mm->context, &old_mm->context);
16061 mutex_unlock(&old_mm->context.lock);
16062 }
16063+
16064+ if (tsk == current) {
16065+ mm->context.vdso = 0;
16066+
16067+#ifdef CONFIG_X86_32
16068+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
16069+ mm->context.user_cs_base = 0UL;
16070+ mm->context.user_cs_limit = ~0UL;
16071+
16072+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
16073+ cpus_clear(mm->context.cpu_user_cs_mask);
16074+#endif
16075+
16076+#endif
16077+#endif
16078+
16079+ }
16080+
16081 return retval;
16082 }
16083
16084@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
16085 }
16086 }
16087
16088+#ifdef CONFIG_PAX_SEGMEXEC
16089+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
16090+ error = -EINVAL;
16091+ goto out_unlock;
16092+ }
16093+#endif
16094+
16095 fill_ldt(&ldt, &ldt_info);
16096 if (oldmode)
16097 ldt.avl = 0;
16098diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
16099index a3fa43b..8966f4c 100644
16100--- a/arch/x86/kernel/machine_kexec_32.c
16101+++ b/arch/x86/kernel/machine_kexec_32.c
16102@@ -27,7 +27,7 @@
16103 #include <asm/cacheflush.h>
16104 #include <asm/debugreg.h>
16105
16106-static void set_idt(void *newidt, __u16 limit)
16107+static void set_idt(struct desc_struct *newidt, __u16 limit)
16108 {
16109 struct desc_ptr curidt;
16110
16111@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
16112 }
16113
16114
16115-static void set_gdt(void *newgdt, __u16 limit)
16116+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
16117 {
16118 struct desc_ptr curgdt;
16119
16120@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
16121 }
16122
16123 control_page = page_address(image->control_code_page);
16124- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
16125+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
16126
16127 relocate_kernel_ptr = control_page;
16128 page_list[PA_CONTROL_PAGE] = __pa(control_page);
16129diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
16130index 3ca42d0..7cff8cc 100644
16131--- a/arch/x86/kernel/microcode_intel.c
16132+++ b/arch/x86/kernel/microcode_intel.c
16133@@ -436,13 +436,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
16134
16135 static int get_ucode_user(void *to, const void *from, size_t n)
16136 {
16137- return copy_from_user(to, from, n);
16138+ return copy_from_user(to, (const void __force_user *)from, n);
16139 }
16140
16141 static enum ucode_state
16142 request_microcode_user(int cpu, const void __user *buf, size_t size)
16143 {
16144- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
16145+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
16146 }
16147
16148 static void microcode_fini_cpu(int cpu)
16149diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
16150index 925179f..267ac7a 100644
16151--- a/arch/x86/kernel/module.c
16152+++ b/arch/x86/kernel/module.c
16153@@ -36,15 +36,60 @@
16154 #define DEBUGP(fmt...)
16155 #endif
16156
16157-void *module_alloc(unsigned long size)
16158+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
16159 {
16160- if (PAGE_ALIGN(size) > MODULES_LEN)
16161+ if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
16162 return NULL;
16163 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
16164- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
16165+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
16166 -1, __builtin_return_address(0));
16167 }
16168
16169+void *module_alloc(unsigned long size)
16170+{
16171+
16172+#ifdef CONFIG_PAX_KERNEXEC
16173+ return __module_alloc(size, PAGE_KERNEL);
16174+#else
16175+ return __module_alloc(size, PAGE_KERNEL_EXEC);
16176+#endif
16177+
16178+}
16179+
16180+#ifdef CONFIG_PAX_KERNEXEC
16181+#ifdef CONFIG_X86_32
16182+void *module_alloc_exec(unsigned long size)
16183+{
16184+ struct vm_struct *area;
16185+
16186+ if (size == 0)
16187+ return NULL;
16188+
16189+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
16190+ return area ? area->addr : NULL;
16191+}
16192+EXPORT_SYMBOL(module_alloc_exec);
16193+
16194+void module_free_exec(struct module *mod, void *module_region)
16195+{
16196+ vunmap(module_region);
16197+}
16198+EXPORT_SYMBOL(module_free_exec);
16199+#else
16200+void module_free_exec(struct module *mod, void *module_region)
16201+{
16202+ module_free(mod, module_region);
16203+}
16204+EXPORT_SYMBOL(module_free_exec);
16205+
16206+void *module_alloc_exec(unsigned long size)
16207+{
16208+ return __module_alloc(size, PAGE_KERNEL_RX);
16209+}
16210+EXPORT_SYMBOL(module_alloc_exec);
16211+#endif
16212+#endif
16213+
16214 #ifdef CONFIG_X86_32
16215 int apply_relocate(Elf32_Shdr *sechdrs,
16216 const char *strtab,
16217@@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
16218 unsigned int i;
16219 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
16220 Elf32_Sym *sym;
16221- uint32_t *location;
16222+ uint32_t *plocation, location;
16223
16224 DEBUGP("Applying relocate section %u to %u\n", relsec,
16225 sechdrs[relsec].sh_info);
16226 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
16227 /* This is where to make the change */
16228- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
16229- + rel[i].r_offset;
16230+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
16231+ location = (uint32_t)plocation;
16232+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
16233+ plocation = ktla_ktva((void *)plocation);
16234 /* This is the symbol it is referring to. Note that all
16235 undefined symbols have been resolved. */
16236 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
16237@@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
16238 switch (ELF32_R_TYPE(rel[i].r_info)) {
16239 case R_386_32:
16240 /* We add the value into the location given */
16241- *location += sym->st_value;
16242+ pax_open_kernel();
16243+ *plocation += sym->st_value;
16244+ pax_close_kernel();
16245 break;
16246 case R_386_PC32:
16247 /* Add the value, subtract its postition */
16248- *location += sym->st_value - (uint32_t)location;
16249+ pax_open_kernel();
16250+ *plocation += sym->st_value - location;
16251+ pax_close_kernel();
16252 break;
16253 default:
16254 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
16255@@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
16256 case R_X86_64_NONE:
16257 break;
16258 case R_X86_64_64:
16259+ pax_open_kernel();
16260 *(u64 *)loc = val;
16261+ pax_close_kernel();
16262 break;
16263 case R_X86_64_32:
16264+ pax_open_kernel();
16265 *(u32 *)loc = val;
16266+ pax_close_kernel();
16267 if (val != *(u32 *)loc)
16268 goto overflow;
16269 break;
16270 case R_X86_64_32S:
16271+ pax_open_kernel();
16272 *(s32 *)loc = val;
16273+ pax_close_kernel();
16274 if ((s64)val != *(s32 *)loc)
16275 goto overflow;
16276 break;
16277 case R_X86_64_PC32:
16278 val -= (u64)loc;
16279+ pax_open_kernel();
16280 *(u32 *)loc = val;
16281+ pax_close_kernel();
16282+
16283 #if 0
16284 if ((s64)val != *(s32 *)loc)
16285 goto overflow;
16286diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
16287index e88f37b..1353db6 100644
16288--- a/arch/x86/kernel/nmi.c
16289+++ b/arch/x86/kernel/nmi.c
16290@@ -408,6 +408,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
16291 dotraplinkage notrace __kprobes void
16292 do_nmi(struct pt_regs *regs, long error_code)
16293 {
16294+
16295+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16296+ if (!user_mode(regs)) {
16297+ unsigned long cs = regs->cs & 0xFFFF;
16298+ unsigned long ip = ktva_ktla(regs->ip);
16299+
16300+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
16301+ regs->ip = ip;
16302+ }
16303+#endif
16304+
16305 nmi_enter();
16306
16307 inc_irq_stat(__nmi_count);
16308diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
16309index 676b8c7..870ba04 100644
16310--- a/arch/x86/kernel/paravirt-spinlocks.c
16311+++ b/arch/x86/kernel/paravirt-spinlocks.c
16312@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
16313 arch_spin_lock(lock);
16314 }
16315
16316-struct pv_lock_ops pv_lock_ops = {
16317+struct pv_lock_ops pv_lock_ops __read_only = {
16318 #ifdef CONFIG_SMP
16319 .spin_is_locked = __ticket_spin_is_locked,
16320 .spin_is_contended = __ticket_spin_is_contended,
16321diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
16322index d90272e..6bb013b 100644
16323--- a/arch/x86/kernel/paravirt.c
16324+++ b/arch/x86/kernel/paravirt.c
16325@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
16326 {
16327 return x;
16328 }
16329+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16330+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
16331+#endif
16332
16333 void __init default_banner(void)
16334 {
16335@@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
16336 if (opfunc == NULL)
16337 /* If there's no function, patch it with a ud2a (BUG) */
16338 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
16339- else if (opfunc == _paravirt_nop)
16340+ else if (opfunc == (void *)_paravirt_nop)
16341 /* If the operation is a nop, then nop the callsite */
16342 ret = paravirt_patch_nop();
16343
16344 /* identity functions just return their single argument */
16345- else if (opfunc == _paravirt_ident_32)
16346+ else if (opfunc == (void *)_paravirt_ident_32)
16347 ret = paravirt_patch_ident_32(insnbuf, len);
16348- else if (opfunc == _paravirt_ident_64)
16349+ else if (opfunc == (void *)_paravirt_ident_64)
16350 ret = paravirt_patch_ident_64(insnbuf, len);
16351+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16352+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
16353+ ret = paravirt_patch_ident_64(insnbuf, len);
16354+#endif
16355
16356 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
16357 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
16358@@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
16359 if (insn_len > len || start == NULL)
16360 insn_len = len;
16361 else
16362- memcpy(insnbuf, start, insn_len);
16363+ memcpy(insnbuf, ktla_ktva(start), insn_len);
16364
16365 return insn_len;
16366 }
16367@@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void)
16368 preempt_enable();
16369 }
16370
16371-struct pv_info pv_info = {
16372+struct pv_info pv_info __read_only = {
16373 .name = "bare hardware",
16374 .paravirt_enabled = 0,
16375 .kernel_rpl = 0,
16376@@ -313,16 +320,16 @@ struct pv_info pv_info = {
16377 #endif
16378 };
16379
16380-struct pv_init_ops pv_init_ops = {
16381+struct pv_init_ops pv_init_ops __read_only = {
16382 .patch = native_patch,
16383 };
16384
16385-struct pv_time_ops pv_time_ops = {
16386+struct pv_time_ops pv_time_ops __read_only = {
16387 .sched_clock = native_sched_clock,
16388 .steal_clock = native_steal_clock,
16389 };
16390
16391-struct pv_irq_ops pv_irq_ops = {
16392+struct pv_irq_ops pv_irq_ops __read_only = {
16393 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
16394 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
16395 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
16396@@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
16397 #endif
16398 };
16399
16400-struct pv_cpu_ops pv_cpu_ops = {
16401+struct pv_cpu_ops pv_cpu_ops __read_only = {
16402 .cpuid = native_cpuid,
16403 .get_debugreg = native_get_debugreg,
16404 .set_debugreg = native_set_debugreg,
16405@@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
16406 .end_context_switch = paravirt_nop,
16407 };
16408
16409-struct pv_apic_ops pv_apic_ops = {
16410+struct pv_apic_ops pv_apic_ops __read_only = {
16411 #ifdef CONFIG_X86_LOCAL_APIC
16412 .startup_ipi_hook = paravirt_nop,
16413 #endif
16414 };
16415
16416-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
16417+#ifdef CONFIG_X86_32
16418+#ifdef CONFIG_X86_PAE
16419+/* 64-bit pagetable entries */
16420+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
16421+#else
16422 /* 32-bit pagetable entries */
16423 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
16424+#endif
16425 #else
16426 /* 64-bit pagetable entries */
16427 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
16428 #endif
16429
16430-struct pv_mmu_ops pv_mmu_ops = {
16431+struct pv_mmu_ops pv_mmu_ops __read_only = {
16432
16433 .read_cr2 = native_read_cr2,
16434 .write_cr2 = native_write_cr2,
16435@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
16436 .make_pud = PTE_IDENT,
16437
16438 .set_pgd = native_set_pgd,
16439+ .set_pgd_batched = native_set_pgd_batched,
16440 #endif
16441 #endif /* PAGETABLE_LEVELS >= 3 */
16442
16443@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
16444 },
16445
16446 .set_fixmap = native_set_fixmap,
16447+
16448+#ifdef CONFIG_PAX_KERNEXEC
16449+ .pax_open_kernel = native_pax_open_kernel,
16450+ .pax_close_kernel = native_pax_close_kernel,
16451+#endif
16452+
16453 };
16454
16455 EXPORT_SYMBOL_GPL(pv_time_ops);
16456diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
16457index 35ccf75..7a15747 100644
16458--- a/arch/x86/kernel/pci-iommu_table.c
16459+++ b/arch/x86/kernel/pci-iommu_table.c
16460@@ -2,7 +2,7 @@
16461 #include <asm/iommu_table.h>
16462 #include <linux/string.h>
16463 #include <linux/kallsyms.h>
16464-
16465+#include <linux/sched.h>
16466
16467 #define DEBUG 1
16468
16469diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
16470index ee5d4fb..426649b 100644
16471--- a/arch/x86/kernel/process.c
16472+++ b/arch/x86/kernel/process.c
16473@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
16474
16475 void free_thread_info(struct thread_info *ti)
16476 {
16477- free_thread_xstate(ti->task);
16478 free_pages((unsigned long)ti, THREAD_ORDER);
16479 }
16480
16481+static struct kmem_cache *task_struct_cachep;
16482+
16483 void arch_task_cache_init(void)
16484 {
16485- task_xstate_cachep =
16486- kmem_cache_create("task_xstate", xstate_size,
16487+ /* create a slab on which task_structs can be allocated */
16488+ task_struct_cachep =
16489+ kmem_cache_create("task_struct", sizeof(struct task_struct),
16490+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16491+
16492+ task_xstate_cachep =
16493+ kmem_cache_create("task_xstate", xstate_size,
16494 __alignof__(union thread_xstate),
16495- SLAB_PANIC | SLAB_NOTRACK, NULL);
16496+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16497+}
16498+
16499+struct task_struct *alloc_task_struct_node(int node)
16500+{
16501+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
16502+}
16503+
16504+void free_task_struct(struct task_struct *task)
16505+{
16506+ free_thread_xstate(task);
16507+ kmem_cache_free(task_struct_cachep, task);
16508 }
16509
16510 /*
16511@@ -70,7 +87,7 @@ void exit_thread(void)
16512 unsigned long *bp = t->io_bitmap_ptr;
16513
16514 if (bp) {
16515- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16516+ struct tss_struct *tss = init_tss + get_cpu();
16517
16518 t->io_bitmap_ptr = NULL;
16519 clear_thread_flag(TIF_IO_BITMAP);
16520@@ -106,7 +123,7 @@ void show_regs_common(void)
16521
16522 printk(KERN_CONT "\n");
16523 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
16524- current->pid, current->comm, print_tainted(),
16525+ task_pid_nr(current), current->comm, print_tainted(),
16526 init_utsname()->release,
16527 (int)strcspn(init_utsname()->version, " "),
16528 init_utsname()->version);
16529@@ -120,6 +137,9 @@ void flush_thread(void)
16530 {
16531 struct task_struct *tsk = current;
16532
16533+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16534+ loadsegment(gs, 0);
16535+#endif
16536 flush_ptrace_hw_breakpoint(tsk);
16537 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
16538 /*
16539@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
16540 regs.di = (unsigned long) arg;
16541
16542 #ifdef CONFIG_X86_32
16543- regs.ds = __USER_DS;
16544- regs.es = __USER_DS;
16545+ regs.ds = __KERNEL_DS;
16546+ regs.es = __KERNEL_DS;
16547 regs.fs = __KERNEL_PERCPU;
16548- regs.gs = __KERNEL_STACK_CANARY;
16549+ savesegment(gs, regs.gs);
16550 #else
16551 regs.ss = __KERNEL_DS;
16552 #endif
16553@@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
16554
16555 return ret;
16556 }
16557-void stop_this_cpu(void *dummy)
16558+__noreturn void stop_this_cpu(void *dummy)
16559 {
16560 local_irq_disable();
16561 /*
16562@@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
16563 }
16564 early_param("idle", idle_setup);
16565
16566-unsigned long arch_align_stack(unsigned long sp)
16567+#ifdef CONFIG_PAX_RANDKSTACK
16568+void pax_randomize_kstack(struct pt_regs *regs)
16569 {
16570- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16571- sp -= get_random_int() % 8192;
16572- return sp & ~0xf;
16573-}
16574+ struct thread_struct *thread = &current->thread;
16575+ unsigned long time;
16576
16577-unsigned long arch_randomize_brk(struct mm_struct *mm)
16578-{
16579- unsigned long range_end = mm->brk + 0x02000000;
16580- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16581-}
16582+ if (!randomize_va_space)
16583+ return;
16584+
16585+ if (v8086_mode(regs))
16586+ return;
16587
16588+ rdtscl(time);
16589+
16590+ /* P4 seems to return a 0 LSB, ignore it */
16591+#ifdef CONFIG_MPENTIUM4
16592+ time &= 0x3EUL;
16593+ time <<= 2;
16594+#elif defined(CONFIG_X86_64)
16595+ time &= 0xFUL;
16596+ time <<= 4;
16597+#else
16598+ time &= 0x1FUL;
16599+ time <<= 3;
16600+#endif
16601+
16602+ thread->sp0 ^= time;
16603+ load_sp0(init_tss + smp_processor_id(), thread);
16604+
16605+#ifdef CONFIG_X86_64
16606+ percpu_write(kernel_stack, thread->sp0);
16607+#endif
16608+}
16609+#endif
16610diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
16611index 8598296..bfadef0 100644
16612--- a/arch/x86/kernel/process_32.c
16613+++ b/arch/x86/kernel/process_32.c
16614@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
16615 unsigned long thread_saved_pc(struct task_struct *tsk)
16616 {
16617 return ((unsigned long *)tsk->thread.sp)[3];
16618+//XXX return tsk->thread.eip;
16619 }
16620
16621 #ifndef CONFIG_SMP
16622@@ -130,15 +131,14 @@ void __show_regs(struct pt_regs *regs, int all)
16623 unsigned long sp;
16624 unsigned short ss, gs;
16625
16626- if (user_mode_vm(regs)) {
16627+ if (user_mode(regs)) {
16628 sp = regs->sp;
16629 ss = regs->ss & 0xffff;
16630- gs = get_user_gs(regs);
16631 } else {
16632 sp = kernel_stack_pointer(regs);
16633 savesegment(ss, ss);
16634- savesegment(gs, gs);
16635 }
16636+ gs = get_user_gs(regs);
16637
16638 show_regs_common();
16639
16640@@ -200,13 +200,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16641 struct task_struct *tsk;
16642 int err;
16643
16644- childregs = task_pt_regs(p);
16645+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16646 *childregs = *regs;
16647 childregs->ax = 0;
16648 childregs->sp = sp;
16649
16650 p->thread.sp = (unsigned long) childregs;
16651 p->thread.sp0 = (unsigned long) (childregs+1);
16652+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16653
16654 p->thread.ip = (unsigned long) ret_from_fork;
16655
16656@@ -296,7 +297,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16657 struct thread_struct *prev = &prev_p->thread,
16658 *next = &next_p->thread;
16659 int cpu = smp_processor_id();
16660- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16661+ struct tss_struct *tss = init_tss + cpu;
16662 fpu_switch_t fpu;
16663
16664 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16665@@ -320,6 +321,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16666 */
16667 lazy_save_gs(prev->gs);
16668
16669+#ifdef CONFIG_PAX_MEMORY_UDEREF
16670+ __set_fs(task_thread_info(next_p)->addr_limit);
16671+#endif
16672+
16673 /*
16674 * Load the per-thread Thread-Local Storage descriptor.
16675 */
16676@@ -350,6 +355,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16677 */
16678 arch_end_context_switch(next_p);
16679
16680+ percpu_write(current_task, next_p);
16681+ percpu_write(current_tinfo, &next_p->tinfo);
16682+
16683 /*
16684 * Restore %gs if needed (which is common)
16685 */
16686@@ -358,8 +366,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16687
16688 switch_fpu_finish(next_p, fpu);
16689
16690- percpu_write(current_task, next_p);
16691-
16692 return prev_p;
16693 }
16694
16695@@ -389,4 +395,3 @@ unsigned long get_wchan(struct task_struct *p)
16696 } while (count++ < 16);
16697 return 0;
16698 }
16699-
16700diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
16701index 6a364a6..b147d11 100644
16702--- a/arch/x86/kernel/process_64.c
16703+++ b/arch/x86/kernel/process_64.c
16704@@ -89,7 +89,7 @@ static void __exit_idle(void)
16705 void exit_idle(void)
16706 {
16707 /* idle loop has pid 0 */
16708- if (current->pid)
16709+ if (task_pid_nr(current))
16710 return;
16711 __exit_idle();
16712 }
16713@@ -264,8 +264,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16714 struct pt_regs *childregs;
16715 struct task_struct *me = current;
16716
16717- childregs = ((struct pt_regs *)
16718- (THREAD_SIZE + task_stack_page(p))) - 1;
16719+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16720 *childregs = *regs;
16721
16722 childregs->ax = 0;
16723@@ -277,6 +276,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16724 p->thread.sp = (unsigned long) childregs;
16725 p->thread.sp0 = (unsigned long) (childregs+1);
16726 p->thread.usersp = me->thread.usersp;
16727+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16728
16729 set_tsk_thread_flag(p, TIF_FORK);
16730
16731@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16732 struct thread_struct *prev = &prev_p->thread;
16733 struct thread_struct *next = &next_p->thread;
16734 int cpu = smp_processor_id();
16735- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16736+ struct tss_struct *tss = init_tss + cpu;
16737 unsigned fsindex, gsindex;
16738 fpu_switch_t fpu;
16739
16740@@ -461,10 +461,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16741 prev->usersp = percpu_read(old_rsp);
16742 percpu_write(old_rsp, next->usersp);
16743 percpu_write(current_task, next_p);
16744+ percpu_write(current_tinfo, &next_p->tinfo);
16745
16746- percpu_write(kernel_stack,
16747- (unsigned long)task_stack_page(next_p) +
16748- THREAD_SIZE - KERNEL_STACK_OFFSET);
16749+ percpu_write(kernel_stack, next->sp0);
16750
16751 /*
16752 * Now maybe reload the debug registers and handle I/O bitmaps
16753@@ -519,12 +518,11 @@ unsigned long get_wchan(struct task_struct *p)
16754 if (!p || p == current || p->state == TASK_RUNNING)
16755 return 0;
16756 stack = (unsigned long)task_stack_page(p);
16757- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16758+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16759 return 0;
16760 fp = *(u64 *)(p->thread.sp);
16761 do {
16762- if (fp < (unsigned long)stack ||
16763- fp >= (unsigned long)stack+THREAD_SIZE)
16764+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16765 return 0;
16766 ip = *(u64 *)(fp+8);
16767 if (!in_sched_functions(ip))
16768diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
16769index 8252879..d3219e0 100644
16770--- a/arch/x86/kernel/ptrace.c
16771+++ b/arch/x86/kernel/ptrace.c
16772@@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *child, long request,
16773 unsigned long addr, unsigned long data)
16774 {
16775 int ret;
16776- unsigned long __user *datap = (unsigned long __user *)data;
16777+ unsigned long __user *datap = (__force unsigned long __user *)data;
16778
16779 switch (request) {
16780 /* read the word at location addr in the USER area. */
16781@@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *child, long request,
16782 if ((int) addr < 0)
16783 return -EIO;
16784 ret = do_get_thread_area(child, addr,
16785- (struct user_desc __user *)data);
16786+ (__force struct user_desc __user *) data);
16787 break;
16788
16789 case PTRACE_SET_THREAD_AREA:
16790 if ((int) addr < 0)
16791 return -EIO;
16792 ret = do_set_thread_area(child, addr,
16793- (struct user_desc __user *)data, 0);
16794+ (__force struct user_desc __user *) data, 0);
16795 break;
16796 #endif
16797
16798@@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
16799 memset(info, 0, sizeof(*info));
16800 info->si_signo = SIGTRAP;
16801 info->si_code = si_code;
16802- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
16803+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
16804 }
16805
16806 void user_single_step_siginfo(struct task_struct *tsk,
16807diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
16808index 42eb330..139955c 100644
16809--- a/arch/x86/kernel/pvclock.c
16810+++ b/arch/x86/kernel/pvclock.c
16811@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
16812 return pv_tsc_khz;
16813 }
16814
16815-static atomic64_t last_value = ATOMIC64_INIT(0);
16816+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
16817
16818 void pvclock_resume(void)
16819 {
16820- atomic64_set(&last_value, 0);
16821+ atomic64_set_unchecked(&last_value, 0);
16822 }
16823
16824 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16825@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16826 * updating at the same time, and one of them could be slightly behind,
16827 * making the assumption that last_value always go forward fail to hold.
16828 */
16829- last = atomic64_read(&last_value);
16830+ last = atomic64_read_unchecked(&last_value);
16831 do {
16832 if (ret < last)
16833 return last;
16834- last = atomic64_cmpxchg(&last_value, last, ret);
16835+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
16836 } while (unlikely(last != ret));
16837
16838 return ret;
16839diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
16840index 37a458b..e63d183 100644
16841--- a/arch/x86/kernel/reboot.c
16842+++ b/arch/x86/kernel/reboot.c
16843@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
16844 EXPORT_SYMBOL(pm_power_off);
16845
16846 static const struct desc_ptr no_idt = {};
16847-static int reboot_mode;
16848+static unsigned short reboot_mode;
16849 enum reboot_type reboot_type = BOOT_ACPI;
16850 int reboot_force;
16851
16852@@ -324,13 +324,17 @@ core_initcall(reboot_init);
16853 extern const unsigned char machine_real_restart_asm[];
16854 extern const u64 machine_real_restart_gdt[3];
16855
16856-void machine_real_restart(unsigned int type)
16857+__noreturn void machine_real_restart(unsigned int type)
16858 {
16859 void *restart_va;
16860 unsigned long restart_pa;
16861- void (*restart_lowmem)(unsigned int);
16862+ void (* __noreturn restart_lowmem)(unsigned int);
16863 u64 *lowmem_gdt;
16864
16865+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16866+ struct desc_struct *gdt;
16867+#endif
16868+
16869 local_irq_disable();
16870
16871 /* Write zero to CMOS register number 0x0f, which the BIOS POST
16872@@ -356,14 +360,14 @@ void machine_real_restart(unsigned int type)
16873 boot)". This seems like a fairly standard thing that gets set by
16874 REBOOT.COM programs, and the previous reset routine did this
16875 too. */
16876- *((unsigned short *)0x472) = reboot_mode;
16877+ *(unsigned short *)(__va(0x472)) = reboot_mode;
16878
16879 /* Patch the GDT in the low memory trampoline */
16880 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
16881
16882 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
16883 restart_pa = virt_to_phys(restart_va);
16884- restart_lowmem = (void (*)(unsigned int))restart_pa;
16885+ restart_lowmem = (void *)restart_pa;
16886
16887 /* GDT[0]: GDT self-pointer */
16888 lowmem_gdt[0] =
16889@@ -374,7 +378,33 @@ void machine_real_restart(unsigned int type)
16890 GDT_ENTRY(0x009b, restart_pa, 0xffff);
16891
16892 /* Jump to the identity-mapped low memory code */
16893+
16894+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16895+ gdt = get_cpu_gdt_table(smp_processor_id());
16896+ pax_open_kernel();
16897+#ifdef CONFIG_PAX_MEMORY_UDEREF
16898+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
16899+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
16900+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
16901+#endif
16902+#ifdef CONFIG_PAX_KERNEXEC
16903+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
16904+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
16905+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
16906+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
16907+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
16908+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
16909+#endif
16910+ pax_close_kernel();
16911+#endif
16912+
16913+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16914+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
16915+ unreachable();
16916+#else
16917 restart_lowmem(type);
16918+#endif
16919+
16920 }
16921 #ifdef CONFIG_APM_MODULE
16922 EXPORT_SYMBOL(machine_real_restart);
16923@@ -540,7 +570,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
16924 * try to force a triple fault and then cycle between hitting the keyboard
16925 * controller and doing that
16926 */
16927-static void native_machine_emergency_restart(void)
16928+__noreturn static void native_machine_emergency_restart(void)
16929 {
16930 int i;
16931 int attempt = 0;
16932@@ -664,13 +694,13 @@ void native_machine_shutdown(void)
16933 #endif
16934 }
16935
16936-static void __machine_emergency_restart(int emergency)
16937+static __noreturn void __machine_emergency_restart(int emergency)
16938 {
16939 reboot_emergency = emergency;
16940 machine_ops.emergency_restart();
16941 }
16942
16943-static void native_machine_restart(char *__unused)
16944+static __noreturn void native_machine_restart(char *__unused)
16945 {
16946 printk("machine restart\n");
16947
16948@@ -679,7 +709,7 @@ static void native_machine_restart(char *__unused)
16949 __machine_emergency_restart(0);
16950 }
16951
16952-static void native_machine_halt(void)
16953+static __noreturn void native_machine_halt(void)
16954 {
16955 /* stop other cpus and apics */
16956 machine_shutdown();
16957@@ -690,7 +720,7 @@ static void native_machine_halt(void)
16958 stop_this_cpu(NULL);
16959 }
16960
16961-static void native_machine_power_off(void)
16962+__noreturn static void native_machine_power_off(void)
16963 {
16964 if (pm_power_off) {
16965 if (!reboot_force)
16966@@ -699,6 +729,7 @@ static void native_machine_power_off(void)
16967 }
16968 /* a fallback in case there is no PM info available */
16969 tboot_shutdown(TB_SHUTDOWN_HALT);
16970+ unreachable();
16971 }
16972
16973 struct machine_ops machine_ops = {
16974diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
16975index 7a6f3b3..bed145d7 100644
16976--- a/arch/x86/kernel/relocate_kernel_64.S
16977+++ b/arch/x86/kernel/relocate_kernel_64.S
16978@@ -11,6 +11,7 @@
16979 #include <asm/kexec.h>
16980 #include <asm/processor-flags.h>
16981 #include <asm/pgtable_types.h>
16982+#include <asm/alternative-asm.h>
16983
16984 /*
16985 * Must be relocatable PIC code callable as a C function
16986@@ -160,13 +161,14 @@ identity_mapped:
16987 xorq %rbp, %rbp
16988 xorq %r8, %r8
16989 xorq %r9, %r9
16990- xorq %r10, %r9
16991+ xorq %r10, %r10
16992 xorq %r11, %r11
16993 xorq %r12, %r12
16994 xorq %r13, %r13
16995 xorq %r14, %r14
16996 xorq %r15, %r15
16997
16998+ pax_force_retaddr 0, 1
16999 ret
17000
17001 1:
17002diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
17003index cf0ef98..e3f780b 100644
17004--- a/arch/x86/kernel/setup.c
17005+++ b/arch/x86/kernel/setup.c
17006@@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
17007
17008 switch (data->type) {
17009 case SETUP_E820_EXT:
17010- parse_e820_ext(data);
17011+ parse_e820_ext((struct setup_data __force_kernel *)data);
17012 break;
17013 case SETUP_DTB:
17014 add_dtb(pa_data);
17015@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
17016 * area (640->1Mb) as ram even though it is not.
17017 * take them out.
17018 */
17019- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
17020+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
17021 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
17022 }
17023
17024@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
17025
17026 if (!boot_params.hdr.root_flags)
17027 root_mountflags &= ~MS_RDONLY;
17028- init_mm.start_code = (unsigned long) _text;
17029- init_mm.end_code = (unsigned long) _etext;
17030+ init_mm.start_code = ktla_ktva((unsigned long) _text);
17031+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
17032 init_mm.end_data = (unsigned long) _edata;
17033 init_mm.brk = _brk_end;
17034
17035- code_resource.start = virt_to_phys(_text);
17036- code_resource.end = virt_to_phys(_etext)-1;
17037- data_resource.start = virt_to_phys(_etext);
17038+ code_resource.start = virt_to_phys(ktla_ktva(_text));
17039+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
17040+ data_resource.start = virt_to_phys(_sdata);
17041 data_resource.end = virt_to_phys(_edata)-1;
17042 bss_resource.start = virt_to_phys(&__bss_start);
17043 bss_resource.end = virt_to_phys(&__bss_stop)-1;
17044diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
17045index 71f4727..16dc9f7 100644
17046--- a/arch/x86/kernel/setup_percpu.c
17047+++ b/arch/x86/kernel/setup_percpu.c
17048@@ -21,19 +21,17 @@
17049 #include <asm/cpu.h>
17050 #include <asm/stackprotector.h>
17051
17052-DEFINE_PER_CPU(int, cpu_number);
17053+#ifdef CONFIG_SMP
17054+DEFINE_PER_CPU(unsigned int, cpu_number);
17055 EXPORT_PER_CPU_SYMBOL(cpu_number);
17056+#endif
17057
17058-#ifdef CONFIG_X86_64
17059 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
17060-#else
17061-#define BOOT_PERCPU_OFFSET 0
17062-#endif
17063
17064 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
17065 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
17066
17067-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
17068+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
17069 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
17070 };
17071 EXPORT_SYMBOL(__per_cpu_offset);
17072@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
17073 {
17074 #ifdef CONFIG_X86_32
17075 struct desc_struct gdt;
17076+ unsigned long base = per_cpu_offset(cpu);
17077
17078- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
17079- 0x2 | DESCTYPE_S, 0x8);
17080- gdt.s = 1;
17081+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
17082+ 0x83 | DESCTYPE_S, 0xC);
17083 write_gdt_entry(get_cpu_gdt_table(cpu),
17084 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
17085 #endif
17086@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
17087 /* alrighty, percpu areas up and running */
17088 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
17089 for_each_possible_cpu(cpu) {
17090+#ifdef CONFIG_CC_STACKPROTECTOR
17091+#ifdef CONFIG_X86_32
17092+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
17093+#endif
17094+#endif
17095 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
17096 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
17097 per_cpu(cpu_number, cpu) = cpu;
17098@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
17099 */
17100 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
17101 #endif
17102+#ifdef CONFIG_CC_STACKPROTECTOR
17103+#ifdef CONFIG_X86_32
17104+ if (!cpu)
17105+ per_cpu(stack_canary.canary, cpu) = canary;
17106+#endif
17107+#endif
17108 /*
17109 * Up to this point, the boot CPU has been using .init.data
17110 * area. Reload any changed state for the boot CPU.
17111diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
17112index 54ddaeb2..22c3bdc 100644
17113--- a/arch/x86/kernel/signal.c
17114+++ b/arch/x86/kernel/signal.c
17115@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
17116 * Align the stack pointer according to the i386 ABI,
17117 * i.e. so that on function entry ((sp + 4) & 15) == 0.
17118 */
17119- sp = ((sp + 4) & -16ul) - 4;
17120+ sp = ((sp - 12) & -16ul) - 4;
17121 #else /* !CONFIG_X86_32 */
17122 sp = round_down(sp, 16) - 8;
17123 #endif
17124@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
17125 * Return an always-bogus address instead so we will die with SIGSEGV.
17126 */
17127 if (onsigstack && !likely(on_sig_stack(sp)))
17128- return (void __user *)-1L;
17129+ return (__force void __user *)-1L;
17130
17131 /* save i387 state */
17132 if (used_math() && save_i387_xstate(*fpstate) < 0)
17133- return (void __user *)-1L;
17134+ return (__force void __user *)-1L;
17135
17136 return (void __user *)sp;
17137 }
17138@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
17139 }
17140
17141 if (current->mm->context.vdso)
17142- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
17143+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
17144 else
17145- restorer = &frame->retcode;
17146+ restorer = (void __user *)&frame->retcode;
17147 if (ka->sa.sa_flags & SA_RESTORER)
17148 restorer = ka->sa.sa_restorer;
17149
17150@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
17151 * reasons and because gdb uses it as a signature to notice
17152 * signal handler stack frames.
17153 */
17154- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
17155+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
17156
17157 if (err)
17158 return -EFAULT;
17159@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
17160 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
17161
17162 /* Set up to return from userspace. */
17163- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
17164+ if (current->mm->context.vdso)
17165+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
17166+ else
17167+ restorer = (void __user *)&frame->retcode;
17168 if (ka->sa.sa_flags & SA_RESTORER)
17169 restorer = ka->sa.sa_restorer;
17170 put_user_ex(restorer, &frame->pretcode);
17171@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
17172 * reasons and because gdb uses it as a signature to notice
17173 * signal handler stack frames.
17174 */
17175- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
17176+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
17177 } put_user_catch(err);
17178
17179 if (err)
17180@@ -769,7 +772,7 @@ static void do_signal(struct pt_regs *regs)
17181 * X86_32: vm86 regs switched out by assembly code before reaching
17182 * here, so testing against kernel CS suffices.
17183 */
17184- if (!user_mode(regs))
17185+ if (!user_mode_novm(regs))
17186 return;
17187
17188 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
17189diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
17190index 9f548cb..caf76f7 100644
17191--- a/arch/x86/kernel/smpboot.c
17192+++ b/arch/x86/kernel/smpboot.c
17193@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
17194 set_idle_for_cpu(cpu, c_idle.idle);
17195 do_rest:
17196 per_cpu(current_task, cpu) = c_idle.idle;
17197+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
17198 #ifdef CONFIG_X86_32
17199 /* Stack for startup_32 can be just as for start_secondary onwards */
17200 irq_ctx_init(cpu);
17201 #else
17202 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
17203 initial_gs = per_cpu_offset(cpu);
17204- per_cpu(kernel_stack, cpu) =
17205- (unsigned long)task_stack_page(c_idle.idle) -
17206- KERNEL_STACK_OFFSET + THREAD_SIZE;
17207+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
17208 #endif
17209+
17210+ pax_open_kernel();
17211 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
17212+ pax_close_kernel();
17213+
17214 initial_code = (unsigned long)start_secondary;
17215 stack_start = c_idle.idle->thread.sp;
17216
17217@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
17218
17219 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
17220
17221+#ifdef CONFIG_PAX_PER_CPU_PGD
17222+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
17223+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
17224+ KERNEL_PGD_PTRS);
17225+#endif
17226+
17227 err = do_boot_cpu(apicid, cpu);
17228 if (err) {
17229 pr_debug("do_boot_cpu failed %d\n", err);
17230diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
17231index c346d11..d43b163 100644
17232--- a/arch/x86/kernel/step.c
17233+++ b/arch/x86/kernel/step.c
17234@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
17235 struct desc_struct *desc;
17236 unsigned long base;
17237
17238- seg &= ~7UL;
17239+ seg >>= 3;
17240
17241 mutex_lock(&child->mm->context.lock);
17242- if (unlikely((seg >> 3) >= child->mm->context.size))
17243+ if (unlikely(seg >= child->mm->context.size))
17244 addr = -1L; /* bogus selector, access would fault */
17245 else {
17246 desc = child->mm->context.ldt + seg;
17247@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
17248 addr += base;
17249 }
17250 mutex_unlock(&child->mm->context.lock);
17251- }
17252+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
17253+ addr = ktla_ktva(addr);
17254
17255 return addr;
17256 }
17257@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
17258 unsigned char opcode[15];
17259 unsigned long addr = convert_ip_to_linear(child, regs);
17260
17261+ if (addr == -EINVAL)
17262+ return 0;
17263+
17264 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
17265 for (i = 0; i < copied; i++) {
17266 switch (opcode[i]) {
17267diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
17268index 0b0cb5f..db6b9ed 100644
17269--- a/arch/x86/kernel/sys_i386_32.c
17270+++ b/arch/x86/kernel/sys_i386_32.c
17271@@ -24,17 +24,224 @@
17272
17273 #include <asm/syscalls.h>
17274
17275-/*
17276- * Do a system call from kernel instead of calling sys_execve so we
17277- * end up with proper pt_regs.
17278- */
17279-int kernel_execve(const char *filename,
17280- const char *const argv[],
17281- const char *const envp[])
17282+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
17283 {
17284- long __res;
17285- asm volatile ("int $0x80"
17286- : "=a" (__res)
17287- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
17288- return __res;
17289+ unsigned long pax_task_size = TASK_SIZE;
17290+
17291+#ifdef CONFIG_PAX_SEGMEXEC
17292+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
17293+ pax_task_size = SEGMEXEC_TASK_SIZE;
17294+#endif
17295+
17296+ if (len > pax_task_size || addr > pax_task_size - len)
17297+ return -EINVAL;
17298+
17299+ return 0;
17300+}
17301+
17302+unsigned long
17303+arch_get_unmapped_area(struct file *filp, unsigned long addr,
17304+ unsigned long len, unsigned long pgoff, unsigned long flags)
17305+{
17306+ struct mm_struct *mm = current->mm;
17307+ struct vm_area_struct *vma;
17308+ unsigned long start_addr, pax_task_size = TASK_SIZE;
17309+
17310+#ifdef CONFIG_PAX_SEGMEXEC
17311+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17312+ pax_task_size = SEGMEXEC_TASK_SIZE;
17313+#endif
17314+
17315+ pax_task_size -= PAGE_SIZE;
17316+
17317+ if (len > pax_task_size)
17318+ return -ENOMEM;
17319+
17320+ if (flags & MAP_FIXED)
17321+ return addr;
17322+
17323+#ifdef CONFIG_PAX_RANDMMAP
17324+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17325+#endif
17326+
17327+ if (addr) {
17328+ addr = PAGE_ALIGN(addr);
17329+ if (pax_task_size - len >= addr) {
17330+ vma = find_vma(mm, addr);
17331+ if (check_heap_stack_gap(vma, addr, len))
17332+ return addr;
17333+ }
17334+ }
17335+ if (len > mm->cached_hole_size) {
17336+ start_addr = addr = mm->free_area_cache;
17337+ } else {
17338+ start_addr = addr = mm->mmap_base;
17339+ mm->cached_hole_size = 0;
17340+ }
17341+
17342+#ifdef CONFIG_PAX_PAGEEXEC
17343+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
17344+ start_addr = 0x00110000UL;
17345+
17346+#ifdef CONFIG_PAX_RANDMMAP
17347+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17348+ start_addr += mm->delta_mmap & 0x03FFF000UL;
17349+#endif
17350+
17351+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
17352+ start_addr = addr = mm->mmap_base;
17353+ else
17354+ addr = start_addr;
17355+ }
17356+#endif
17357+
17358+full_search:
17359+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
17360+ /* At this point: (!vma || addr < vma->vm_end). */
17361+ if (pax_task_size - len < addr) {
17362+ /*
17363+ * Start a new search - just in case we missed
17364+ * some holes.
17365+ */
17366+ if (start_addr != mm->mmap_base) {
17367+ start_addr = addr = mm->mmap_base;
17368+ mm->cached_hole_size = 0;
17369+ goto full_search;
17370+ }
17371+ return -ENOMEM;
17372+ }
17373+ if (check_heap_stack_gap(vma, addr, len))
17374+ break;
17375+ if (addr + mm->cached_hole_size < vma->vm_start)
17376+ mm->cached_hole_size = vma->vm_start - addr;
17377+ addr = vma->vm_end;
17378+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
17379+ start_addr = addr = mm->mmap_base;
17380+ mm->cached_hole_size = 0;
17381+ goto full_search;
17382+ }
17383+ }
17384+
17385+ /*
17386+ * Remember the place where we stopped the search:
17387+ */
17388+ mm->free_area_cache = addr + len;
17389+ return addr;
17390+}
17391+
17392+unsigned long
17393+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17394+ const unsigned long len, const unsigned long pgoff,
17395+ const unsigned long flags)
17396+{
17397+ struct vm_area_struct *vma;
17398+ struct mm_struct *mm = current->mm;
17399+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
17400+
17401+#ifdef CONFIG_PAX_SEGMEXEC
17402+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17403+ pax_task_size = SEGMEXEC_TASK_SIZE;
17404+#endif
17405+
17406+ pax_task_size -= PAGE_SIZE;
17407+
17408+ /* requested length too big for entire address space */
17409+ if (len > pax_task_size)
17410+ return -ENOMEM;
17411+
17412+ if (flags & MAP_FIXED)
17413+ return addr;
17414+
17415+#ifdef CONFIG_PAX_PAGEEXEC
17416+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
17417+ goto bottomup;
17418+#endif
17419+
17420+#ifdef CONFIG_PAX_RANDMMAP
17421+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17422+#endif
17423+
17424+ /* requesting a specific address */
17425+ if (addr) {
17426+ addr = PAGE_ALIGN(addr);
17427+ if (pax_task_size - len >= addr) {
17428+ vma = find_vma(mm, addr);
17429+ if (check_heap_stack_gap(vma, addr, len))
17430+ return addr;
17431+ }
17432+ }
17433+
17434+ /* check if free_area_cache is useful for us */
17435+ if (len <= mm->cached_hole_size) {
17436+ mm->cached_hole_size = 0;
17437+ mm->free_area_cache = mm->mmap_base;
17438+ }
17439+
17440+ /* either no address requested or can't fit in requested address hole */
17441+ addr = mm->free_area_cache;
17442+
17443+ /* make sure it can fit in the remaining address space */
17444+ if (addr > len) {
17445+ vma = find_vma(mm, addr-len);
17446+ if (check_heap_stack_gap(vma, addr - len, len))
17447+ /* remember the address as a hint for next time */
17448+ return (mm->free_area_cache = addr-len);
17449+ }
17450+
17451+ if (mm->mmap_base < len)
17452+ goto bottomup;
17453+
17454+ addr = mm->mmap_base-len;
17455+
17456+ do {
17457+ /*
17458+ * Lookup failure means no vma is above this address,
17459+ * else if new region fits below vma->vm_start,
17460+ * return with success:
17461+ */
17462+ vma = find_vma(mm, addr);
17463+ if (check_heap_stack_gap(vma, addr, len))
17464+ /* remember the address as a hint for next time */
17465+ return (mm->free_area_cache = addr);
17466+
17467+ /* remember the largest hole we saw so far */
17468+ if (addr + mm->cached_hole_size < vma->vm_start)
17469+ mm->cached_hole_size = vma->vm_start - addr;
17470+
17471+ /* try just below the current vma->vm_start */
17472+ addr = skip_heap_stack_gap(vma, len);
17473+ } while (!IS_ERR_VALUE(addr));
17474+
17475+bottomup:
17476+ /*
17477+ * A failed mmap() very likely causes application failure,
17478+ * so fall back to the bottom-up function here. This scenario
17479+ * can happen with large stack limits and large mmap()
17480+ * allocations.
17481+ */
17482+
17483+#ifdef CONFIG_PAX_SEGMEXEC
17484+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17485+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17486+ else
17487+#endif
17488+
17489+ mm->mmap_base = TASK_UNMAPPED_BASE;
17490+
17491+#ifdef CONFIG_PAX_RANDMMAP
17492+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17493+ mm->mmap_base += mm->delta_mmap;
17494+#endif
17495+
17496+ mm->free_area_cache = mm->mmap_base;
17497+ mm->cached_hole_size = ~0UL;
17498+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17499+ /*
17500+ * Restore the topdown base:
17501+ */
17502+ mm->mmap_base = base;
17503+ mm->free_area_cache = base;
17504+ mm->cached_hole_size = ~0UL;
17505+
17506+ return addr;
17507 }
17508diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
17509index 0514890..3dbebce 100644
17510--- a/arch/x86/kernel/sys_x86_64.c
17511+++ b/arch/x86/kernel/sys_x86_64.c
17512@@ -95,8 +95,8 @@ out:
17513 return error;
17514 }
17515
17516-static void find_start_end(unsigned long flags, unsigned long *begin,
17517- unsigned long *end)
17518+static void find_start_end(struct mm_struct *mm, unsigned long flags,
17519+ unsigned long *begin, unsigned long *end)
17520 {
17521 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17522 unsigned long new_begin;
17523@@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
17524 *begin = new_begin;
17525 }
17526 } else {
17527- *begin = TASK_UNMAPPED_BASE;
17528+ *begin = mm->mmap_base;
17529 *end = TASK_SIZE;
17530 }
17531 }
17532@@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
17533 if (flags & MAP_FIXED)
17534 return addr;
17535
17536- find_start_end(flags, &begin, &end);
17537+ find_start_end(mm, flags, &begin, &end);
17538
17539 if (len > end)
17540 return -ENOMEM;
17541
17542+#ifdef CONFIG_PAX_RANDMMAP
17543+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17544+#endif
17545+
17546 if (addr) {
17547 addr = PAGE_ALIGN(addr);
17548 vma = find_vma(mm, addr);
17549- if (end - len >= addr &&
17550- (!vma || addr + len <= vma->vm_start))
17551+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17552 return addr;
17553 }
17554 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17555@@ -172,7 +175,7 @@ full_search:
17556 }
17557 return -ENOMEM;
17558 }
17559- if (!vma || addr + len <= vma->vm_start) {
17560+ if (check_heap_stack_gap(vma, addr, len)) {
17561 /*
17562 * Remember the place where we stopped the search:
17563 */
17564@@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17565 {
17566 struct vm_area_struct *vma;
17567 struct mm_struct *mm = current->mm;
17568- unsigned long addr = addr0;
17569+ unsigned long base = mm->mmap_base, addr = addr0;
17570
17571 /* requested length too big for entire address space */
17572 if (len > TASK_SIZE)
17573@@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17574 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17575 goto bottomup;
17576
17577+#ifdef CONFIG_PAX_RANDMMAP
17578+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17579+#endif
17580+
17581 /* requesting a specific address */
17582 if (addr) {
17583 addr = PAGE_ALIGN(addr);
17584- vma = find_vma(mm, addr);
17585- if (TASK_SIZE - len >= addr &&
17586- (!vma || addr + len <= vma->vm_start))
17587- return addr;
17588+ if (TASK_SIZE - len >= addr) {
17589+ vma = find_vma(mm, addr);
17590+ if (check_heap_stack_gap(vma, addr, len))
17591+ return addr;
17592+ }
17593 }
17594
17595 /* check if free_area_cache is useful for us */
17596@@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17597 ALIGN_TOPDOWN);
17598
17599 vma = find_vma(mm, tmp_addr);
17600- if (!vma || tmp_addr + len <= vma->vm_start)
17601+ if (check_heap_stack_gap(vma, tmp_addr, len))
17602 /* remember the address as a hint for next time */
17603 return mm->free_area_cache = tmp_addr;
17604 }
17605@@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17606 * return with success:
17607 */
17608 vma = find_vma(mm, addr);
17609- if (!vma || addr+len <= vma->vm_start)
17610+ if (check_heap_stack_gap(vma, addr, len))
17611 /* remember the address as a hint for next time */
17612 return mm->free_area_cache = addr;
17613
17614@@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17615 mm->cached_hole_size = vma->vm_start - addr;
17616
17617 /* try just below the current vma->vm_start */
17618- addr = vma->vm_start-len;
17619- } while (len < vma->vm_start);
17620+ addr = skip_heap_stack_gap(vma, len);
17621+ } while (!IS_ERR_VALUE(addr));
17622
17623 bottomup:
17624 /*
17625@@ -270,13 +278,21 @@ bottomup:
17626 * can happen with large stack limits and large mmap()
17627 * allocations.
17628 */
17629+ mm->mmap_base = TASK_UNMAPPED_BASE;
17630+
17631+#ifdef CONFIG_PAX_RANDMMAP
17632+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17633+ mm->mmap_base += mm->delta_mmap;
17634+#endif
17635+
17636+ mm->free_area_cache = mm->mmap_base;
17637 mm->cached_hole_size = ~0UL;
17638- mm->free_area_cache = TASK_UNMAPPED_BASE;
17639 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17640 /*
17641 * Restore the topdown base:
17642 */
17643- mm->free_area_cache = mm->mmap_base;
17644+ mm->mmap_base = base;
17645+ mm->free_area_cache = base;
17646 mm->cached_hole_size = ~0UL;
17647
17648 return addr;
17649diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
17650index 9a0e312..e6f66f2 100644
17651--- a/arch/x86/kernel/syscall_table_32.S
17652+++ b/arch/x86/kernel/syscall_table_32.S
17653@@ -1,3 +1,4 @@
17654+.section .rodata,"a",@progbits
17655 ENTRY(sys_call_table)
17656 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
17657 .long sys_exit
17658diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
17659index e2410e2..4fe3fbc 100644
17660--- a/arch/x86/kernel/tboot.c
17661+++ b/arch/x86/kernel/tboot.c
17662@@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
17663
17664 void tboot_shutdown(u32 shutdown_type)
17665 {
17666- void (*shutdown)(void);
17667+ void (* __noreturn shutdown)(void);
17668
17669 if (!tboot_enabled())
17670 return;
17671@@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
17672
17673 switch_to_tboot_pt();
17674
17675- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17676+ shutdown = (void *)tboot->shutdown_entry;
17677 shutdown();
17678
17679 /* should not reach here */
17680@@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
17681 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17682 }
17683
17684-static atomic_t ap_wfs_count;
17685+static atomic_unchecked_t ap_wfs_count;
17686
17687 static int tboot_wait_for_aps(int num_aps)
17688 {
17689@@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
17690 {
17691 switch (action) {
17692 case CPU_DYING:
17693- atomic_inc(&ap_wfs_count);
17694+ atomic_inc_unchecked(&ap_wfs_count);
17695 if (num_online_cpus() == 1)
17696- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17697+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17698 return NOTIFY_BAD;
17699 break;
17700 }
17701@@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
17702
17703 tboot_create_trampoline();
17704
17705- atomic_set(&ap_wfs_count, 0);
17706+ atomic_set_unchecked(&ap_wfs_count, 0);
17707 register_hotcpu_notifier(&tboot_cpu_notifier);
17708 return 0;
17709 }
17710diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
17711index dd5fbf4..b7f2232 100644
17712--- a/arch/x86/kernel/time.c
17713+++ b/arch/x86/kernel/time.c
17714@@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
17715 {
17716 unsigned long pc = instruction_pointer(regs);
17717
17718- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17719+ if (!user_mode(regs) && in_lock_functions(pc)) {
17720 #ifdef CONFIG_FRAME_POINTER
17721- return *(unsigned long *)(regs->bp + sizeof(long));
17722+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17723 #else
17724 unsigned long *sp =
17725 (unsigned long *)kernel_stack_pointer(regs);
17726@@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
17727 * or above a saved flags. Eflags has bits 22-31 zero,
17728 * kernel addresses don't.
17729 */
17730+
17731+#ifdef CONFIG_PAX_KERNEXEC
17732+ return ktla_ktva(sp[0]);
17733+#else
17734 if (sp[0] >> 22)
17735 return sp[0];
17736 if (sp[1] >> 22)
17737 return sp[1];
17738 #endif
17739+
17740+#endif
17741 }
17742 return pc;
17743 }
17744diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
17745index 6bb7b85..dd853e1 100644
17746--- a/arch/x86/kernel/tls.c
17747+++ b/arch/x86/kernel/tls.c
17748@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
17749 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17750 return -EINVAL;
17751
17752+#ifdef CONFIG_PAX_SEGMEXEC
17753+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17754+ return -EINVAL;
17755+#endif
17756+
17757 set_tls_desc(p, idx, &info, 1);
17758
17759 return 0;
17760diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
17761index 451c0a7..e57f551 100644
17762--- a/arch/x86/kernel/trampoline_32.S
17763+++ b/arch/x86/kernel/trampoline_32.S
17764@@ -32,6 +32,12 @@
17765 #include <asm/segment.h>
17766 #include <asm/page_types.h>
17767
17768+#ifdef CONFIG_PAX_KERNEXEC
17769+#define ta(X) (X)
17770+#else
17771+#define ta(X) ((X) - __PAGE_OFFSET)
17772+#endif
17773+
17774 #ifdef CONFIG_SMP
17775
17776 .section ".x86_trampoline","a"
17777@@ -62,7 +68,7 @@ r_base = .
17778 inc %ax # protected mode (PE) bit
17779 lmsw %ax # into protected mode
17780 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17781- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17782+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
17783
17784 # These need to be in the same 64K segment as the above;
17785 # hence we don't use the boot_gdt_descr defined in head.S
17786diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
17787index 09ff517..df19fbff 100644
17788--- a/arch/x86/kernel/trampoline_64.S
17789+++ b/arch/x86/kernel/trampoline_64.S
17790@@ -90,7 +90,7 @@ startup_32:
17791 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17792 movl %eax, %ds
17793
17794- movl $X86_CR4_PAE, %eax
17795+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17796 movl %eax, %cr4 # Enable PAE mode
17797
17798 # Setup trampoline 4 level pagetables
17799@@ -138,7 +138,7 @@ tidt:
17800 # so the kernel can live anywhere
17801 .balign 4
17802 tgdt:
17803- .short tgdt_end - tgdt # gdt limit
17804+ .short tgdt_end - tgdt - 1 # gdt limit
17805 .long tgdt - r_base
17806 .short 0
17807 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17808diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
17809index 31d9d0f..e244dd9 100644
17810--- a/arch/x86/kernel/traps.c
17811+++ b/arch/x86/kernel/traps.c
17812@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
17813
17814 /* Do we ignore FPU interrupts ? */
17815 char ignore_fpu_irq;
17816-
17817-/*
17818- * The IDT has to be page-aligned to simplify the Pentium
17819- * F0 0F bug workaround.
17820- */
17821-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17822 #endif
17823
17824 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17825@@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
17826 }
17827
17828 static void __kprobes
17829-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17830+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17831 long error_code, siginfo_t *info)
17832 {
17833 struct task_struct *tsk = current;
17834
17835 #ifdef CONFIG_X86_32
17836- if (regs->flags & X86_VM_MASK) {
17837+ if (v8086_mode(regs)) {
17838 /*
17839 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17840 * On nmi (interrupt 2), do_trap should not be called.
17841@@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17842 }
17843 #endif
17844
17845- if (!user_mode(regs))
17846+ if (!user_mode_novm(regs))
17847 goto kernel_trap;
17848
17849 #ifdef CONFIG_X86_32
17850@@ -148,7 +142,7 @@ trap_signal:
17851 printk_ratelimit()) {
17852 printk(KERN_INFO
17853 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17854- tsk->comm, tsk->pid, str,
17855+ tsk->comm, task_pid_nr(tsk), str,
17856 regs->ip, regs->sp, error_code);
17857 print_vma_addr(" in ", regs->ip);
17858 printk("\n");
17859@@ -165,8 +159,20 @@ kernel_trap:
17860 if (!fixup_exception(regs)) {
17861 tsk->thread.error_code = error_code;
17862 tsk->thread.trap_no = trapnr;
17863+
17864+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17865+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17866+ str = "PAX: suspicious stack segment fault";
17867+#endif
17868+
17869 die(str, regs, error_code);
17870 }
17871+
17872+#ifdef CONFIG_PAX_REFCOUNT
17873+ if (trapnr == 4)
17874+ pax_report_refcount_overflow(regs);
17875+#endif
17876+
17877 return;
17878
17879 #ifdef CONFIG_X86_32
17880@@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
17881 conditional_sti(regs);
17882
17883 #ifdef CONFIG_X86_32
17884- if (regs->flags & X86_VM_MASK)
17885+ if (v8086_mode(regs))
17886 goto gp_in_vm86;
17887 #endif
17888
17889 tsk = current;
17890- if (!user_mode(regs))
17891+ if (!user_mode_novm(regs))
17892 goto gp_in_kernel;
17893
17894+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17895+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17896+ struct mm_struct *mm = tsk->mm;
17897+ unsigned long limit;
17898+
17899+ down_write(&mm->mmap_sem);
17900+ limit = mm->context.user_cs_limit;
17901+ if (limit < TASK_SIZE) {
17902+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17903+ up_write(&mm->mmap_sem);
17904+ return;
17905+ }
17906+ up_write(&mm->mmap_sem);
17907+ }
17908+#endif
17909+
17910 tsk->thread.error_code = error_code;
17911 tsk->thread.trap_no = 13;
17912
17913@@ -295,6 +317,13 @@ gp_in_kernel:
17914 if (notify_die(DIE_GPF, "general protection fault", regs,
17915 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17916 return;
17917+
17918+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17919+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17920+ die("PAX: suspicious general protection fault", regs, error_code);
17921+ else
17922+#endif
17923+
17924 die("general protection fault", regs, error_code);
17925 }
17926
17927@@ -414,7 +443,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17928 /* It's safe to allow irq's after DR6 has been saved */
17929 preempt_conditional_sti(regs);
17930
17931- if (regs->flags & X86_VM_MASK) {
17932+ if (v8086_mode(regs)) {
17933 handle_vm86_trap((struct kernel_vm86_regs *) regs,
17934 error_code, 1);
17935 preempt_conditional_cli(regs);
17936@@ -428,7 +457,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17937 * We already checked v86 mode above, so we can check for kernel mode
17938 * by just checking the CPL of CS.
17939 */
17940- if ((dr6 & DR_STEP) && !user_mode(regs)) {
17941+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
17942 tsk->thread.debugreg6 &= ~DR_STEP;
17943 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
17944 regs->flags &= ~X86_EFLAGS_TF;
17945@@ -457,7 +486,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
17946 return;
17947 conditional_sti(regs);
17948
17949- if (!user_mode_vm(regs))
17950+ if (!user_mode(regs))
17951 {
17952 if (!fixup_exception(regs)) {
17953 task->thread.error_code = error_code;
17954@@ -569,8 +598,8 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
17955 void __math_state_restore(struct task_struct *tsk)
17956 {
17957 /* We need a safe address that is cheap to find and that is already
17958- in L1. We've just brought in "tsk->thread.has_fpu", so use that */
17959-#define safe_address (tsk->thread.has_fpu)
17960+ in L1. */
17961+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
17962
17963 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
17964 is pending. Clear the x87 state here by setting it to fixed
17965diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
17966index b9242ba..50c5edd 100644
17967--- a/arch/x86/kernel/verify_cpu.S
17968+++ b/arch/x86/kernel/verify_cpu.S
17969@@ -20,6 +20,7 @@
17970 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17971 * arch/x86/kernel/trampoline_64.S: secondary processor verification
17972 * arch/x86/kernel/head_32.S: processor startup
17973+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17974 *
17975 * verify_cpu, returns the status of longmode and SSE in register %eax.
17976 * 0: Success 1: Failure
17977diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
17978index 863f875..4307295 100644
17979--- a/arch/x86/kernel/vm86_32.c
17980+++ b/arch/x86/kernel/vm86_32.c
17981@@ -41,6 +41,7 @@
17982 #include <linux/ptrace.h>
17983 #include <linux/audit.h>
17984 #include <linux/stddef.h>
17985+#include <linux/grsecurity.h>
17986
17987 #include <asm/uaccess.h>
17988 #include <asm/io.h>
17989@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
17990 do_exit(SIGSEGV);
17991 }
17992
17993- tss = &per_cpu(init_tss, get_cpu());
17994+ tss = init_tss + get_cpu();
17995 current->thread.sp0 = current->thread.saved_sp0;
17996 current->thread.sysenter_cs = __KERNEL_CS;
17997 load_sp0(tss, &current->thread);
17998@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
17999 struct task_struct *tsk;
18000 int tmp, ret = -EPERM;
18001
18002+#ifdef CONFIG_GRKERNSEC_VM86
18003+ if (!capable(CAP_SYS_RAWIO)) {
18004+ gr_handle_vm86();
18005+ goto out;
18006+ }
18007+#endif
18008+
18009 tsk = current;
18010 if (tsk->thread.saved_sp0)
18011 goto out;
18012@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
18013 int tmp, ret;
18014 struct vm86plus_struct __user *v86;
18015
18016+#ifdef CONFIG_GRKERNSEC_VM86
18017+ if (!capable(CAP_SYS_RAWIO)) {
18018+ gr_handle_vm86();
18019+ ret = -EPERM;
18020+ goto out;
18021+ }
18022+#endif
18023+
18024 tsk = current;
18025 switch (cmd) {
18026 case VM86_REQUEST_IRQ:
18027@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
18028 tsk->thread.saved_fs = info->regs32->fs;
18029 tsk->thread.saved_gs = get_user_gs(info->regs32);
18030
18031- tss = &per_cpu(init_tss, get_cpu());
18032+ tss = init_tss + get_cpu();
18033 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
18034 if (cpu_has_sep)
18035 tsk->thread.sysenter_cs = 0;
18036@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
18037 goto cannot_handle;
18038 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
18039 goto cannot_handle;
18040- intr_ptr = (unsigned long __user *) (i << 2);
18041+ intr_ptr = (__force unsigned long __user *) (i << 2);
18042 if (get_user(segoffs, intr_ptr))
18043 goto cannot_handle;
18044 if ((segoffs >> 16) == BIOSSEG)
18045diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
18046index 0f703f1..9e15f64 100644
18047--- a/arch/x86/kernel/vmlinux.lds.S
18048+++ b/arch/x86/kernel/vmlinux.lds.S
18049@@ -26,6 +26,13 @@
18050 #include <asm/page_types.h>
18051 #include <asm/cache.h>
18052 #include <asm/boot.h>
18053+#include <asm/segment.h>
18054+
18055+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18056+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
18057+#else
18058+#define __KERNEL_TEXT_OFFSET 0
18059+#endif
18060
18061 #undef i386 /* in case the preprocessor is a 32bit one */
18062
18063@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
18064
18065 PHDRS {
18066 text PT_LOAD FLAGS(5); /* R_E */
18067+#ifdef CONFIG_X86_32
18068+ module PT_LOAD FLAGS(5); /* R_E */
18069+#endif
18070+#ifdef CONFIG_XEN
18071+ rodata PT_LOAD FLAGS(5); /* R_E */
18072+#else
18073+ rodata PT_LOAD FLAGS(4); /* R__ */
18074+#endif
18075 data PT_LOAD FLAGS(6); /* RW_ */
18076-#ifdef CONFIG_X86_64
18077+ init.begin PT_LOAD FLAGS(6); /* RW_ */
18078 #ifdef CONFIG_SMP
18079 percpu PT_LOAD FLAGS(6); /* RW_ */
18080 #endif
18081+ text.init PT_LOAD FLAGS(5); /* R_E */
18082+ text.exit PT_LOAD FLAGS(5); /* R_E */
18083 init PT_LOAD FLAGS(7); /* RWE */
18084-#endif
18085 note PT_NOTE FLAGS(0); /* ___ */
18086 }
18087
18088 SECTIONS
18089 {
18090 #ifdef CONFIG_X86_32
18091- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
18092- phys_startup_32 = startup_32 - LOAD_OFFSET;
18093+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
18094 #else
18095- . = __START_KERNEL;
18096- phys_startup_64 = startup_64 - LOAD_OFFSET;
18097+ . = __START_KERNEL;
18098 #endif
18099
18100 /* Text and read-only data */
18101- .text : AT(ADDR(.text) - LOAD_OFFSET) {
18102- _text = .;
18103+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18104 /* bootstrapping code */
18105+#ifdef CONFIG_X86_32
18106+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18107+#else
18108+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18109+#endif
18110+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18111+ _text = .;
18112 HEAD_TEXT
18113 #ifdef CONFIG_X86_32
18114 . = ALIGN(PAGE_SIZE);
18115@@ -108,13 +128,47 @@ SECTIONS
18116 IRQENTRY_TEXT
18117 *(.fixup)
18118 *(.gnu.warning)
18119- /* End of text section */
18120- _etext = .;
18121 } :text = 0x9090
18122
18123- NOTES :text :note
18124+ . += __KERNEL_TEXT_OFFSET;
18125
18126- EXCEPTION_TABLE(16) :text = 0x9090
18127+#ifdef CONFIG_X86_32
18128+ . = ALIGN(PAGE_SIZE);
18129+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
18130+
18131+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
18132+ MODULES_EXEC_VADDR = .;
18133+ BYTE(0)
18134+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
18135+ . = ALIGN(HPAGE_SIZE);
18136+ MODULES_EXEC_END = . - 1;
18137+#endif
18138+
18139+ } :module
18140+#endif
18141+
18142+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
18143+ /* End of text section */
18144+ _etext = . - __KERNEL_TEXT_OFFSET;
18145+ }
18146+
18147+#ifdef CONFIG_X86_32
18148+ . = ALIGN(PAGE_SIZE);
18149+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
18150+ *(.idt)
18151+ . = ALIGN(PAGE_SIZE);
18152+ *(.empty_zero_page)
18153+ *(.initial_pg_fixmap)
18154+ *(.initial_pg_pmd)
18155+ *(.initial_page_table)
18156+ *(.swapper_pg_dir)
18157+ } :rodata
18158+#endif
18159+
18160+ . = ALIGN(PAGE_SIZE);
18161+ NOTES :rodata :note
18162+
18163+ EXCEPTION_TABLE(16) :rodata
18164
18165 #if defined(CONFIG_DEBUG_RODATA)
18166 /* .text should occupy whole number of pages */
18167@@ -126,16 +180,20 @@ SECTIONS
18168
18169 /* Data */
18170 .data : AT(ADDR(.data) - LOAD_OFFSET) {
18171+
18172+#ifdef CONFIG_PAX_KERNEXEC
18173+ . = ALIGN(HPAGE_SIZE);
18174+#else
18175+ . = ALIGN(PAGE_SIZE);
18176+#endif
18177+
18178 /* Start of data section */
18179 _sdata = .;
18180
18181 /* init_task */
18182 INIT_TASK_DATA(THREAD_SIZE)
18183
18184-#ifdef CONFIG_X86_32
18185- /* 32 bit has nosave before _edata */
18186 NOSAVE_DATA
18187-#endif
18188
18189 PAGE_ALIGNED_DATA(PAGE_SIZE)
18190
18191@@ -176,12 +234,19 @@ SECTIONS
18192 #endif /* CONFIG_X86_64 */
18193
18194 /* Init code and data - will be freed after init */
18195- . = ALIGN(PAGE_SIZE);
18196 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
18197+ BYTE(0)
18198+
18199+#ifdef CONFIG_PAX_KERNEXEC
18200+ . = ALIGN(HPAGE_SIZE);
18201+#else
18202+ . = ALIGN(PAGE_SIZE);
18203+#endif
18204+
18205 __init_begin = .; /* paired with __init_end */
18206- }
18207+ } :init.begin
18208
18209-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
18210+#ifdef CONFIG_SMP
18211 /*
18212 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
18213 * output PHDR, so the next output section - .init.text - should
18214@@ -190,12 +255,27 @@ SECTIONS
18215 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
18216 #endif
18217
18218- INIT_TEXT_SECTION(PAGE_SIZE)
18219-#ifdef CONFIG_X86_64
18220- :init
18221-#endif
18222+ . = ALIGN(PAGE_SIZE);
18223+ init_begin = .;
18224+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
18225+ VMLINUX_SYMBOL(_sinittext) = .;
18226+ INIT_TEXT
18227+ VMLINUX_SYMBOL(_einittext) = .;
18228+ . = ALIGN(PAGE_SIZE);
18229+ } :text.init
18230
18231- INIT_DATA_SECTION(16)
18232+ /*
18233+ * .exit.text is discard at runtime, not link time, to deal with
18234+ * references from .altinstructions and .eh_frame
18235+ */
18236+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18237+ EXIT_TEXT
18238+ . = ALIGN(16);
18239+ } :text.exit
18240+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18241+
18242+ . = ALIGN(PAGE_SIZE);
18243+ INIT_DATA_SECTION(16) :init
18244
18245 /*
18246 * Code and data for a variety of lowlevel trampolines, to be
18247@@ -269,19 +349,12 @@ SECTIONS
18248 }
18249
18250 . = ALIGN(8);
18251- /*
18252- * .exit.text is discard at runtime, not link time, to deal with
18253- * references from .altinstructions and .eh_frame
18254- */
18255- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18256- EXIT_TEXT
18257- }
18258
18259 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18260 EXIT_DATA
18261 }
18262
18263-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18264+#ifndef CONFIG_SMP
18265 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
18266 #endif
18267
18268@@ -300,16 +373,10 @@ SECTIONS
18269 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
18270 __smp_locks = .;
18271 *(.smp_locks)
18272- . = ALIGN(PAGE_SIZE);
18273 __smp_locks_end = .;
18274+ . = ALIGN(PAGE_SIZE);
18275 }
18276
18277-#ifdef CONFIG_X86_64
18278- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18279- NOSAVE_DATA
18280- }
18281-#endif
18282-
18283 /* BSS */
18284 . = ALIGN(PAGE_SIZE);
18285 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18286@@ -325,6 +392,7 @@ SECTIONS
18287 __brk_base = .;
18288 . += 64 * 1024; /* 64k alignment slop space */
18289 *(.brk_reservation) /* areas brk users have reserved */
18290+ . = ALIGN(HPAGE_SIZE);
18291 __brk_limit = .;
18292 }
18293
18294@@ -351,13 +419,12 @@ SECTIONS
18295 * for the boot processor.
18296 */
18297 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
18298-INIT_PER_CPU(gdt_page);
18299 INIT_PER_CPU(irq_stack_union);
18300
18301 /*
18302 * Build-time check on the image size:
18303 */
18304-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18305+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18306 "kernel image bigger than KERNEL_IMAGE_SIZE");
18307
18308 #ifdef CONFIG_SMP
18309diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
18310index e4d4a22..47ee71f 100644
18311--- a/arch/x86/kernel/vsyscall_64.c
18312+++ b/arch/x86/kernel/vsyscall_64.c
18313@@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
18314 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
18315 };
18316
18317-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
18318+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
18319
18320 static int __init vsyscall_setup(char *str)
18321 {
18322 if (str) {
18323 if (!strcmp("emulate", str))
18324 vsyscall_mode = EMULATE;
18325- else if (!strcmp("native", str))
18326- vsyscall_mode = NATIVE;
18327 else if (!strcmp("none", str))
18328 vsyscall_mode = NONE;
18329 else
18330@@ -178,7 +176,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18331
18332 tsk = current;
18333 if (seccomp_mode(&tsk->seccomp))
18334- do_exit(SIGKILL);
18335+ do_group_exit(SIGKILL);
18336
18337 switch (vsyscall_nr) {
18338 case 0:
18339@@ -220,8 +218,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18340 return true;
18341
18342 sigsegv:
18343- force_sig(SIGSEGV, current);
18344- return true;
18345+ do_group_exit(SIGKILL);
18346 }
18347
18348 /*
18349@@ -274,10 +271,7 @@ void __init map_vsyscall(void)
18350 extern char __vvar_page;
18351 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
18352
18353- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
18354- vsyscall_mode == NATIVE
18355- ? PAGE_KERNEL_VSYSCALL
18356- : PAGE_KERNEL_VVAR);
18357+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
18358 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
18359 (unsigned long)VSYSCALL_START);
18360
18361diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
18362index 9796c2f..f686fbf 100644
18363--- a/arch/x86/kernel/x8664_ksyms_64.c
18364+++ b/arch/x86/kernel/x8664_ksyms_64.c
18365@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
18366 EXPORT_SYMBOL(copy_user_generic_string);
18367 EXPORT_SYMBOL(copy_user_generic_unrolled);
18368 EXPORT_SYMBOL(__copy_user_nocache);
18369-EXPORT_SYMBOL(_copy_from_user);
18370-EXPORT_SYMBOL(_copy_to_user);
18371
18372 EXPORT_SYMBOL(copy_page);
18373 EXPORT_SYMBOL(clear_page);
18374diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
18375index 7110911..e8cdee5 100644
18376--- a/arch/x86/kernel/xsave.c
18377+++ b/arch/x86/kernel/xsave.c
18378@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
18379 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18380 return -EINVAL;
18381
18382- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18383+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18384 fx_sw_user->extended_size -
18385 FP_XSTATE_MAGIC2_SIZE));
18386 if (err)
18387@@ -266,7 +266,7 @@ fx_only:
18388 * the other extended state.
18389 */
18390 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18391- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18392+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
18393 }
18394
18395 /*
18396@@ -295,7 +295,7 @@ int restore_i387_xstate(void __user *buf)
18397 if (use_xsave())
18398 err = restore_user_xstate(buf);
18399 else
18400- err = fxrstor_checking((__force struct i387_fxsave_struct *)
18401+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
18402 buf);
18403 if (unlikely(err)) {
18404 /*
18405diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
18406index f1e3be1..588efc8 100644
18407--- a/arch/x86/kvm/emulate.c
18408+++ b/arch/x86/kvm/emulate.c
18409@@ -249,6 +249,7 @@ struct gprefix {
18410
18411 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
18412 do { \
18413+ unsigned long _tmp; \
18414 __asm__ __volatile__ ( \
18415 _PRE_EFLAGS("0", "4", "2") \
18416 _op _suffix " %"_x"3,%1; " \
18417@@ -263,8 +264,6 @@ struct gprefix {
18418 /* Raw emulation: instruction has two explicit operands. */
18419 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
18420 do { \
18421- unsigned long _tmp; \
18422- \
18423 switch ((ctxt)->dst.bytes) { \
18424 case 2: \
18425 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
18426@@ -280,7 +279,6 @@ struct gprefix {
18427
18428 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18429 do { \
18430- unsigned long _tmp; \
18431 switch ((ctxt)->dst.bytes) { \
18432 case 1: \
18433 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
18434diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
18435index 54abb40..a192606 100644
18436--- a/arch/x86/kvm/lapic.c
18437+++ b/arch/x86/kvm/lapic.c
18438@@ -53,7 +53,7 @@
18439 #define APIC_BUS_CYCLE_NS 1
18440
18441 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18442-#define apic_debug(fmt, arg...)
18443+#define apic_debug(fmt, arg...) do {} while (0)
18444
18445 #define APIC_LVT_NUM 6
18446 /* 14 is the version for Xeon and Pentium 8.4.8*/
18447diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
18448index f1b36cf..af8a124 100644
18449--- a/arch/x86/kvm/mmu.c
18450+++ b/arch/x86/kvm/mmu.c
18451@@ -3555,7 +3555,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18452
18453 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
18454
18455- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
18456+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
18457
18458 /*
18459 * Assume that the pte write on a page table of the same type
18460@@ -3587,7 +3587,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18461 }
18462
18463 spin_lock(&vcpu->kvm->mmu_lock);
18464- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18465+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18466 gentry = 0;
18467 kvm_mmu_free_some_pages(vcpu);
18468 ++vcpu->kvm->stat.mmu_pte_write;
18469diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
18470index 9299410..ade2f9b 100644
18471--- a/arch/x86/kvm/paging_tmpl.h
18472+++ b/arch/x86/kvm/paging_tmpl.h
18473@@ -197,7 +197,7 @@ retry_walk:
18474 if (unlikely(kvm_is_error_hva(host_addr)))
18475 goto error;
18476
18477- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
18478+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
18479 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
18480 goto error;
18481
18482@@ -705,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
18483 if (need_flush)
18484 kvm_flush_remote_tlbs(vcpu->kvm);
18485
18486- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
18487+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
18488
18489 spin_unlock(&vcpu->kvm->mmu_lock);
18490
18491diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
18492index e32243e..a6e6172 100644
18493--- a/arch/x86/kvm/svm.c
18494+++ b/arch/x86/kvm/svm.c
18495@@ -3400,7 +3400,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
18496 int cpu = raw_smp_processor_id();
18497
18498 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
18499+
18500+ pax_open_kernel();
18501 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
18502+ pax_close_kernel();
18503+
18504 load_TR_desc();
18505 }
18506
18507@@ -3778,6 +3782,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
18508 #endif
18509 #endif
18510
18511+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18512+ __set_fs(current_thread_info()->addr_limit);
18513+#endif
18514+
18515 reload_tss(vcpu);
18516
18517 local_irq_disable();
18518diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
18519index 4ea7678..b3a7084 100644
18520--- a/arch/x86/kvm/vmx.c
18521+++ b/arch/x86/kvm/vmx.c
18522@@ -1305,7 +1305,11 @@ static void reload_tss(void)
18523 struct desc_struct *descs;
18524
18525 descs = (void *)gdt->address;
18526+
18527+ pax_open_kernel();
18528 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18529+ pax_close_kernel();
18530+
18531 load_TR_desc();
18532 }
18533
18534@@ -2633,8 +2637,11 @@ static __init int hardware_setup(void)
18535 if (!cpu_has_vmx_flexpriority())
18536 flexpriority_enabled = 0;
18537
18538- if (!cpu_has_vmx_tpr_shadow())
18539- kvm_x86_ops->update_cr8_intercept = NULL;
18540+ if (!cpu_has_vmx_tpr_shadow()) {
18541+ pax_open_kernel();
18542+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18543+ pax_close_kernel();
18544+ }
18545
18546 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18547 kvm_disable_largepages();
18548@@ -3648,7 +3655,7 @@ static void vmx_set_constant_host_state(void)
18549 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
18550
18551 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
18552- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
18553+ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
18554
18555 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
18556 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
18557@@ -6169,6 +6176,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18558 "jmp .Lkvm_vmx_return \n\t"
18559 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18560 ".Lkvm_vmx_return: "
18561+
18562+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18563+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18564+ ".Lkvm_vmx_return2: "
18565+#endif
18566+
18567 /* Save guest registers, load host registers, keep flags */
18568 "mov %0, %c[wordsize](%%"R"sp) \n\t"
18569 "pop %0 \n\t"
18570@@ -6217,6 +6230,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18571 #endif
18572 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
18573 [wordsize]"i"(sizeof(ulong))
18574+
18575+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18576+ ,[cs]"i"(__KERNEL_CS)
18577+#endif
18578+
18579 : "cc", "memory"
18580 , R"ax", R"bx", R"di", R"si"
18581 #ifdef CONFIG_X86_64
18582@@ -6245,7 +6263,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18583 }
18584 }
18585
18586- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18587+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18588+
18589+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18590+ loadsegment(fs, __KERNEL_PERCPU);
18591+#endif
18592+
18593+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18594+ __set_fs(current_thread_info()->addr_limit);
18595+#endif
18596+
18597 vmx->loaded_vmcs->launched = 1;
18598
18599 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
18600diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
18601index 4c938da..4ddef65 100644
18602--- a/arch/x86/kvm/x86.c
18603+++ b/arch/x86/kvm/x86.c
18604@@ -1345,8 +1345,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
18605 {
18606 struct kvm *kvm = vcpu->kvm;
18607 int lm = is_long_mode(vcpu);
18608- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18609- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18610+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18611+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18612 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
18613 : kvm->arch.xen_hvm_config.blob_size_32;
18614 u32 page_num = data & ~PAGE_MASK;
18615@@ -2165,6 +2165,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
18616 if (n < msr_list.nmsrs)
18617 goto out;
18618 r = -EFAULT;
18619+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
18620+ goto out;
18621 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
18622 num_msrs_to_save * sizeof(u32)))
18623 goto out;
18624@@ -2340,15 +2342,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
18625 struct kvm_cpuid2 *cpuid,
18626 struct kvm_cpuid_entry2 __user *entries)
18627 {
18628- int r;
18629+ int r, i;
18630
18631 r = -E2BIG;
18632 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18633 goto out;
18634 r = -EFAULT;
18635- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18636- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18637+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18638 goto out;
18639+ for (i = 0; i < cpuid->nent; ++i) {
18640+ struct kvm_cpuid_entry2 cpuid_entry;
18641+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18642+ goto out;
18643+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
18644+ }
18645 vcpu->arch.cpuid_nent = cpuid->nent;
18646 kvm_apic_set_version(vcpu);
18647 kvm_x86_ops->cpuid_update(vcpu);
18648@@ -2363,15 +2370,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
18649 struct kvm_cpuid2 *cpuid,
18650 struct kvm_cpuid_entry2 __user *entries)
18651 {
18652- int r;
18653+ int r, i;
18654
18655 r = -E2BIG;
18656 if (cpuid->nent < vcpu->arch.cpuid_nent)
18657 goto out;
18658 r = -EFAULT;
18659- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18660- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18661+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18662 goto out;
18663+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18664+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18665+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18666+ goto out;
18667+ }
18668 return 0;
18669
18670 out:
18671@@ -2746,7 +2757,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
18672 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18673 struct kvm_interrupt *irq)
18674 {
18675- if (irq->irq < 0 || irq->irq >= 256)
18676+ if (irq->irq >= 256)
18677 return -EINVAL;
18678 if (irqchip_in_kernel(vcpu->kvm))
18679 return -ENXIO;
18680@@ -5162,7 +5173,7 @@ static void kvm_set_mmio_spte_mask(void)
18681 kvm_mmu_set_mmio_spte_mask(mask);
18682 }
18683
18684-int kvm_arch_init(void *opaque)
18685+int kvm_arch_init(const void *opaque)
18686 {
18687 int r;
18688 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18689diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
18690index cf4603b..7cdde38 100644
18691--- a/arch/x86/lguest/boot.c
18692+++ b/arch/x86/lguest/boot.c
18693@@ -1195,9 +1195,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
18694 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18695 * Launcher to reboot us.
18696 */
18697-static void lguest_restart(char *reason)
18698+static __noreturn void lguest_restart(char *reason)
18699 {
18700 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
18701+ BUG();
18702 }
18703
18704 /*G:050
18705diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
18706index 042f682..c92afb6 100644
18707--- a/arch/x86/lib/atomic64_32.c
18708+++ b/arch/x86/lib/atomic64_32.c
18709@@ -8,18 +8,30 @@
18710
18711 long long atomic64_read_cx8(long long, const atomic64_t *v);
18712 EXPORT_SYMBOL(atomic64_read_cx8);
18713+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18714+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
18715 long long atomic64_set_cx8(long long, const atomic64_t *v);
18716 EXPORT_SYMBOL(atomic64_set_cx8);
18717+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18718+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
18719 long long atomic64_xchg_cx8(long long, unsigned high);
18720 EXPORT_SYMBOL(atomic64_xchg_cx8);
18721 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
18722 EXPORT_SYMBOL(atomic64_add_return_cx8);
18723+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18724+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
18725 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
18726 EXPORT_SYMBOL(atomic64_sub_return_cx8);
18727+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18728+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
18729 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
18730 EXPORT_SYMBOL(atomic64_inc_return_cx8);
18731+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18732+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
18733 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
18734 EXPORT_SYMBOL(atomic64_dec_return_cx8);
18735+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18736+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
18737 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
18738 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
18739 int atomic64_inc_not_zero_cx8(atomic64_t *v);
18740@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
18741 #ifndef CONFIG_X86_CMPXCHG64
18742 long long atomic64_read_386(long long, const atomic64_t *v);
18743 EXPORT_SYMBOL(atomic64_read_386);
18744+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
18745+EXPORT_SYMBOL(atomic64_read_unchecked_386);
18746 long long atomic64_set_386(long long, const atomic64_t *v);
18747 EXPORT_SYMBOL(atomic64_set_386);
18748+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
18749+EXPORT_SYMBOL(atomic64_set_unchecked_386);
18750 long long atomic64_xchg_386(long long, unsigned high);
18751 EXPORT_SYMBOL(atomic64_xchg_386);
18752 long long atomic64_add_return_386(long long a, atomic64_t *v);
18753 EXPORT_SYMBOL(atomic64_add_return_386);
18754+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18755+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
18756 long long atomic64_sub_return_386(long long a, atomic64_t *v);
18757 EXPORT_SYMBOL(atomic64_sub_return_386);
18758+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18759+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
18760 long long atomic64_inc_return_386(long long a, atomic64_t *v);
18761 EXPORT_SYMBOL(atomic64_inc_return_386);
18762+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18763+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
18764 long long atomic64_dec_return_386(long long a, atomic64_t *v);
18765 EXPORT_SYMBOL(atomic64_dec_return_386);
18766+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18767+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
18768 long long atomic64_add_386(long long a, atomic64_t *v);
18769 EXPORT_SYMBOL(atomic64_add_386);
18770+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
18771+EXPORT_SYMBOL(atomic64_add_unchecked_386);
18772 long long atomic64_sub_386(long long a, atomic64_t *v);
18773 EXPORT_SYMBOL(atomic64_sub_386);
18774+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
18775+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
18776 long long atomic64_inc_386(long long a, atomic64_t *v);
18777 EXPORT_SYMBOL(atomic64_inc_386);
18778+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
18779+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
18780 long long atomic64_dec_386(long long a, atomic64_t *v);
18781 EXPORT_SYMBOL(atomic64_dec_386);
18782+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
18783+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
18784 long long atomic64_dec_if_positive_386(atomic64_t *v);
18785 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
18786 int atomic64_inc_not_zero_386(atomic64_t *v);
18787diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
18788index e8e7e0d..56fd1b0 100644
18789--- a/arch/x86/lib/atomic64_386_32.S
18790+++ b/arch/x86/lib/atomic64_386_32.S
18791@@ -48,6 +48,10 @@ BEGIN(read)
18792 movl (v), %eax
18793 movl 4(v), %edx
18794 RET_ENDP
18795+BEGIN(read_unchecked)
18796+ movl (v), %eax
18797+ movl 4(v), %edx
18798+RET_ENDP
18799 #undef v
18800
18801 #define v %esi
18802@@ -55,6 +59,10 @@ BEGIN(set)
18803 movl %ebx, (v)
18804 movl %ecx, 4(v)
18805 RET_ENDP
18806+BEGIN(set_unchecked)
18807+ movl %ebx, (v)
18808+ movl %ecx, 4(v)
18809+RET_ENDP
18810 #undef v
18811
18812 #define v %esi
18813@@ -70,6 +78,20 @@ RET_ENDP
18814 BEGIN(add)
18815 addl %eax, (v)
18816 adcl %edx, 4(v)
18817+
18818+#ifdef CONFIG_PAX_REFCOUNT
18819+ jno 0f
18820+ subl %eax, (v)
18821+ sbbl %edx, 4(v)
18822+ int $4
18823+0:
18824+ _ASM_EXTABLE(0b, 0b)
18825+#endif
18826+
18827+RET_ENDP
18828+BEGIN(add_unchecked)
18829+ addl %eax, (v)
18830+ adcl %edx, 4(v)
18831 RET_ENDP
18832 #undef v
18833
18834@@ -77,6 +99,24 @@ RET_ENDP
18835 BEGIN(add_return)
18836 addl (v), %eax
18837 adcl 4(v), %edx
18838+
18839+#ifdef CONFIG_PAX_REFCOUNT
18840+ into
18841+1234:
18842+ _ASM_EXTABLE(1234b, 2f)
18843+#endif
18844+
18845+ movl %eax, (v)
18846+ movl %edx, 4(v)
18847+
18848+#ifdef CONFIG_PAX_REFCOUNT
18849+2:
18850+#endif
18851+
18852+RET_ENDP
18853+BEGIN(add_return_unchecked)
18854+ addl (v), %eax
18855+ adcl 4(v), %edx
18856 movl %eax, (v)
18857 movl %edx, 4(v)
18858 RET_ENDP
18859@@ -86,6 +126,20 @@ RET_ENDP
18860 BEGIN(sub)
18861 subl %eax, (v)
18862 sbbl %edx, 4(v)
18863+
18864+#ifdef CONFIG_PAX_REFCOUNT
18865+ jno 0f
18866+ addl %eax, (v)
18867+ adcl %edx, 4(v)
18868+ int $4
18869+0:
18870+ _ASM_EXTABLE(0b, 0b)
18871+#endif
18872+
18873+RET_ENDP
18874+BEGIN(sub_unchecked)
18875+ subl %eax, (v)
18876+ sbbl %edx, 4(v)
18877 RET_ENDP
18878 #undef v
18879
18880@@ -96,6 +150,27 @@ BEGIN(sub_return)
18881 sbbl $0, %edx
18882 addl (v), %eax
18883 adcl 4(v), %edx
18884+
18885+#ifdef CONFIG_PAX_REFCOUNT
18886+ into
18887+1234:
18888+ _ASM_EXTABLE(1234b, 2f)
18889+#endif
18890+
18891+ movl %eax, (v)
18892+ movl %edx, 4(v)
18893+
18894+#ifdef CONFIG_PAX_REFCOUNT
18895+2:
18896+#endif
18897+
18898+RET_ENDP
18899+BEGIN(sub_return_unchecked)
18900+ negl %edx
18901+ negl %eax
18902+ sbbl $0, %edx
18903+ addl (v), %eax
18904+ adcl 4(v), %edx
18905 movl %eax, (v)
18906 movl %edx, 4(v)
18907 RET_ENDP
18908@@ -105,6 +180,20 @@ RET_ENDP
18909 BEGIN(inc)
18910 addl $1, (v)
18911 adcl $0, 4(v)
18912+
18913+#ifdef CONFIG_PAX_REFCOUNT
18914+ jno 0f
18915+ subl $1, (v)
18916+ sbbl $0, 4(v)
18917+ int $4
18918+0:
18919+ _ASM_EXTABLE(0b, 0b)
18920+#endif
18921+
18922+RET_ENDP
18923+BEGIN(inc_unchecked)
18924+ addl $1, (v)
18925+ adcl $0, 4(v)
18926 RET_ENDP
18927 #undef v
18928
18929@@ -114,6 +203,26 @@ BEGIN(inc_return)
18930 movl 4(v), %edx
18931 addl $1, %eax
18932 adcl $0, %edx
18933+
18934+#ifdef CONFIG_PAX_REFCOUNT
18935+ into
18936+1234:
18937+ _ASM_EXTABLE(1234b, 2f)
18938+#endif
18939+
18940+ movl %eax, (v)
18941+ movl %edx, 4(v)
18942+
18943+#ifdef CONFIG_PAX_REFCOUNT
18944+2:
18945+#endif
18946+
18947+RET_ENDP
18948+BEGIN(inc_return_unchecked)
18949+ movl (v), %eax
18950+ movl 4(v), %edx
18951+ addl $1, %eax
18952+ adcl $0, %edx
18953 movl %eax, (v)
18954 movl %edx, 4(v)
18955 RET_ENDP
18956@@ -123,6 +232,20 @@ RET_ENDP
18957 BEGIN(dec)
18958 subl $1, (v)
18959 sbbl $0, 4(v)
18960+
18961+#ifdef CONFIG_PAX_REFCOUNT
18962+ jno 0f
18963+ addl $1, (v)
18964+ adcl $0, 4(v)
18965+ int $4
18966+0:
18967+ _ASM_EXTABLE(0b, 0b)
18968+#endif
18969+
18970+RET_ENDP
18971+BEGIN(dec_unchecked)
18972+ subl $1, (v)
18973+ sbbl $0, 4(v)
18974 RET_ENDP
18975 #undef v
18976
18977@@ -132,6 +255,26 @@ BEGIN(dec_return)
18978 movl 4(v), %edx
18979 subl $1, %eax
18980 sbbl $0, %edx
18981+
18982+#ifdef CONFIG_PAX_REFCOUNT
18983+ into
18984+1234:
18985+ _ASM_EXTABLE(1234b, 2f)
18986+#endif
18987+
18988+ movl %eax, (v)
18989+ movl %edx, 4(v)
18990+
18991+#ifdef CONFIG_PAX_REFCOUNT
18992+2:
18993+#endif
18994+
18995+RET_ENDP
18996+BEGIN(dec_return_unchecked)
18997+ movl (v), %eax
18998+ movl 4(v), %edx
18999+ subl $1, %eax
19000+ sbbl $0, %edx
19001 movl %eax, (v)
19002 movl %edx, 4(v)
19003 RET_ENDP
19004@@ -143,6 +286,13 @@ BEGIN(add_unless)
19005 adcl %edx, %edi
19006 addl (v), %eax
19007 adcl 4(v), %edx
19008+
19009+#ifdef CONFIG_PAX_REFCOUNT
19010+ into
19011+1234:
19012+ _ASM_EXTABLE(1234b, 2f)
19013+#endif
19014+
19015 cmpl %eax, %esi
19016 je 3f
19017 1:
19018@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
19019 1:
19020 addl $1, %eax
19021 adcl $0, %edx
19022+
19023+#ifdef CONFIG_PAX_REFCOUNT
19024+ into
19025+1234:
19026+ _ASM_EXTABLE(1234b, 2f)
19027+#endif
19028+
19029 movl %eax, (v)
19030 movl %edx, 4(v)
19031 movl $1, %eax
19032@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
19033 movl 4(v), %edx
19034 subl $1, %eax
19035 sbbl $0, %edx
19036+
19037+#ifdef CONFIG_PAX_REFCOUNT
19038+ into
19039+1234:
19040+ _ASM_EXTABLE(1234b, 1f)
19041+#endif
19042+
19043 js 1f
19044 movl %eax, (v)
19045 movl %edx, 4(v)
19046diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
19047index 391a083..d658e9f 100644
19048--- a/arch/x86/lib/atomic64_cx8_32.S
19049+++ b/arch/x86/lib/atomic64_cx8_32.S
19050@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
19051 CFI_STARTPROC
19052
19053 read64 %ecx
19054+ pax_force_retaddr
19055 ret
19056 CFI_ENDPROC
19057 ENDPROC(atomic64_read_cx8)
19058
19059+ENTRY(atomic64_read_unchecked_cx8)
19060+ CFI_STARTPROC
19061+
19062+ read64 %ecx
19063+ pax_force_retaddr
19064+ ret
19065+ CFI_ENDPROC
19066+ENDPROC(atomic64_read_unchecked_cx8)
19067+
19068 ENTRY(atomic64_set_cx8)
19069 CFI_STARTPROC
19070
19071@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
19072 cmpxchg8b (%esi)
19073 jne 1b
19074
19075+ pax_force_retaddr
19076 ret
19077 CFI_ENDPROC
19078 ENDPROC(atomic64_set_cx8)
19079
19080+ENTRY(atomic64_set_unchecked_cx8)
19081+ CFI_STARTPROC
19082+
19083+1:
19084+/* we don't need LOCK_PREFIX since aligned 64-bit writes
19085+ * are atomic on 586 and newer */
19086+ cmpxchg8b (%esi)
19087+ jne 1b
19088+
19089+ pax_force_retaddr
19090+ ret
19091+ CFI_ENDPROC
19092+ENDPROC(atomic64_set_unchecked_cx8)
19093+
19094 ENTRY(atomic64_xchg_cx8)
19095 CFI_STARTPROC
19096
19097@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
19098 cmpxchg8b (%esi)
19099 jne 1b
19100
19101+ pax_force_retaddr
19102 ret
19103 CFI_ENDPROC
19104 ENDPROC(atomic64_xchg_cx8)
19105
19106-.macro addsub_return func ins insc
19107-ENTRY(atomic64_\func\()_return_cx8)
19108+.macro addsub_return func ins insc unchecked=""
19109+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
19110 CFI_STARTPROC
19111 SAVE ebp
19112 SAVE ebx
19113@@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
19114 movl %edx, %ecx
19115 \ins\()l %esi, %ebx
19116 \insc\()l %edi, %ecx
19117+
19118+.ifb \unchecked
19119+#ifdef CONFIG_PAX_REFCOUNT
19120+ into
19121+2:
19122+ _ASM_EXTABLE(2b, 3f)
19123+#endif
19124+.endif
19125+
19126 LOCK_PREFIX
19127 cmpxchg8b (%ebp)
19128 jne 1b
19129-
19130-10:
19131 movl %ebx, %eax
19132 movl %ecx, %edx
19133+
19134+.ifb \unchecked
19135+#ifdef CONFIG_PAX_REFCOUNT
19136+3:
19137+#endif
19138+.endif
19139+
19140 RESTORE edi
19141 RESTORE esi
19142 RESTORE ebx
19143 RESTORE ebp
19144+ pax_force_retaddr
19145 ret
19146 CFI_ENDPROC
19147-ENDPROC(atomic64_\func\()_return_cx8)
19148+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
19149 .endm
19150
19151 addsub_return add add adc
19152 addsub_return sub sub sbb
19153+addsub_return add add adc _unchecked
19154+addsub_return sub sub sbb _unchecked
19155
19156-.macro incdec_return func ins insc
19157-ENTRY(atomic64_\func\()_return_cx8)
19158+.macro incdec_return func ins insc unchecked
19159+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
19160 CFI_STARTPROC
19161 SAVE ebx
19162
19163@@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
19164 movl %edx, %ecx
19165 \ins\()l $1, %ebx
19166 \insc\()l $0, %ecx
19167+
19168+.ifb \unchecked
19169+#ifdef CONFIG_PAX_REFCOUNT
19170+ into
19171+2:
19172+ _ASM_EXTABLE(2b, 3f)
19173+#endif
19174+.endif
19175+
19176 LOCK_PREFIX
19177 cmpxchg8b (%esi)
19178 jne 1b
19179
19180-10:
19181 movl %ebx, %eax
19182 movl %ecx, %edx
19183+
19184+.ifb \unchecked
19185+#ifdef CONFIG_PAX_REFCOUNT
19186+3:
19187+#endif
19188+.endif
19189+
19190 RESTORE ebx
19191+ pax_force_retaddr
19192 ret
19193 CFI_ENDPROC
19194-ENDPROC(atomic64_\func\()_return_cx8)
19195+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
19196 .endm
19197
19198 incdec_return inc add adc
19199 incdec_return dec sub sbb
19200+incdec_return inc add adc _unchecked
19201+incdec_return dec sub sbb _unchecked
19202
19203 ENTRY(atomic64_dec_if_positive_cx8)
19204 CFI_STARTPROC
19205@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
19206 movl %edx, %ecx
19207 subl $1, %ebx
19208 sbb $0, %ecx
19209+
19210+#ifdef CONFIG_PAX_REFCOUNT
19211+ into
19212+1234:
19213+ _ASM_EXTABLE(1234b, 2f)
19214+#endif
19215+
19216 js 2f
19217 LOCK_PREFIX
19218 cmpxchg8b (%esi)
19219@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
19220 movl %ebx, %eax
19221 movl %ecx, %edx
19222 RESTORE ebx
19223+ pax_force_retaddr
19224 ret
19225 CFI_ENDPROC
19226 ENDPROC(atomic64_dec_if_positive_cx8)
19227@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
19228 movl %edx, %ecx
19229 addl %esi, %ebx
19230 adcl %edi, %ecx
19231+
19232+#ifdef CONFIG_PAX_REFCOUNT
19233+ into
19234+1234:
19235+ _ASM_EXTABLE(1234b, 3f)
19236+#endif
19237+
19238 LOCK_PREFIX
19239 cmpxchg8b (%ebp)
19240 jne 1b
19241@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
19242 CFI_ADJUST_CFA_OFFSET -8
19243 RESTORE ebx
19244 RESTORE ebp
19245+ pax_force_retaddr
19246 ret
19247 4:
19248 cmpl %edx, 4(%esp)
19249@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
19250 movl %edx, %ecx
19251 addl $1, %ebx
19252 adcl $0, %ecx
19253+
19254+#ifdef CONFIG_PAX_REFCOUNT
19255+ into
19256+1234:
19257+ _ASM_EXTABLE(1234b, 3f)
19258+#endif
19259+
19260 LOCK_PREFIX
19261 cmpxchg8b (%esi)
19262 jne 1b
19263@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
19264 movl $1, %eax
19265 3:
19266 RESTORE ebx
19267+ pax_force_retaddr
19268 ret
19269 4:
19270 testl %edx, %edx
19271diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
19272index 78d16a5..fbcf666 100644
19273--- a/arch/x86/lib/checksum_32.S
19274+++ b/arch/x86/lib/checksum_32.S
19275@@ -28,7 +28,8 @@
19276 #include <linux/linkage.h>
19277 #include <asm/dwarf2.h>
19278 #include <asm/errno.h>
19279-
19280+#include <asm/segment.h>
19281+
19282 /*
19283 * computes a partial checksum, e.g. for TCP/UDP fragments
19284 */
19285@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
19286
19287 #define ARGBASE 16
19288 #define FP 12
19289-
19290-ENTRY(csum_partial_copy_generic)
19291+
19292+ENTRY(csum_partial_copy_generic_to_user)
19293 CFI_STARTPROC
19294+
19295+#ifdef CONFIG_PAX_MEMORY_UDEREF
19296+ pushl_cfi %gs
19297+ popl_cfi %es
19298+ jmp csum_partial_copy_generic
19299+#endif
19300+
19301+ENTRY(csum_partial_copy_generic_from_user)
19302+
19303+#ifdef CONFIG_PAX_MEMORY_UDEREF
19304+ pushl_cfi %gs
19305+ popl_cfi %ds
19306+#endif
19307+
19308+ENTRY(csum_partial_copy_generic)
19309 subl $4,%esp
19310 CFI_ADJUST_CFA_OFFSET 4
19311 pushl_cfi %edi
19312@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
19313 jmp 4f
19314 SRC(1: movw (%esi), %bx )
19315 addl $2, %esi
19316-DST( movw %bx, (%edi) )
19317+DST( movw %bx, %es:(%edi) )
19318 addl $2, %edi
19319 addw %bx, %ax
19320 adcl $0, %eax
19321@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
19322 SRC(1: movl (%esi), %ebx )
19323 SRC( movl 4(%esi), %edx )
19324 adcl %ebx, %eax
19325-DST( movl %ebx, (%edi) )
19326+DST( movl %ebx, %es:(%edi) )
19327 adcl %edx, %eax
19328-DST( movl %edx, 4(%edi) )
19329+DST( movl %edx, %es:4(%edi) )
19330
19331 SRC( movl 8(%esi), %ebx )
19332 SRC( movl 12(%esi), %edx )
19333 adcl %ebx, %eax
19334-DST( movl %ebx, 8(%edi) )
19335+DST( movl %ebx, %es:8(%edi) )
19336 adcl %edx, %eax
19337-DST( movl %edx, 12(%edi) )
19338+DST( movl %edx, %es:12(%edi) )
19339
19340 SRC( movl 16(%esi), %ebx )
19341 SRC( movl 20(%esi), %edx )
19342 adcl %ebx, %eax
19343-DST( movl %ebx, 16(%edi) )
19344+DST( movl %ebx, %es:16(%edi) )
19345 adcl %edx, %eax
19346-DST( movl %edx, 20(%edi) )
19347+DST( movl %edx, %es:20(%edi) )
19348
19349 SRC( movl 24(%esi), %ebx )
19350 SRC( movl 28(%esi), %edx )
19351 adcl %ebx, %eax
19352-DST( movl %ebx, 24(%edi) )
19353+DST( movl %ebx, %es:24(%edi) )
19354 adcl %edx, %eax
19355-DST( movl %edx, 28(%edi) )
19356+DST( movl %edx, %es:28(%edi) )
19357
19358 lea 32(%esi), %esi
19359 lea 32(%edi), %edi
19360@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
19361 shrl $2, %edx # This clears CF
19362 SRC(3: movl (%esi), %ebx )
19363 adcl %ebx, %eax
19364-DST( movl %ebx, (%edi) )
19365+DST( movl %ebx, %es:(%edi) )
19366 lea 4(%esi), %esi
19367 lea 4(%edi), %edi
19368 dec %edx
19369@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
19370 jb 5f
19371 SRC( movw (%esi), %cx )
19372 leal 2(%esi), %esi
19373-DST( movw %cx, (%edi) )
19374+DST( movw %cx, %es:(%edi) )
19375 leal 2(%edi), %edi
19376 je 6f
19377 shll $16,%ecx
19378 SRC(5: movb (%esi), %cl )
19379-DST( movb %cl, (%edi) )
19380+DST( movb %cl, %es:(%edi) )
19381 6: addl %ecx, %eax
19382 adcl $0, %eax
19383 7:
19384@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
19385
19386 6001:
19387 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19388- movl $-EFAULT, (%ebx)
19389+ movl $-EFAULT, %ss:(%ebx)
19390
19391 # zero the complete destination - computing the rest
19392 # is too much work
19393@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
19394
19395 6002:
19396 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19397- movl $-EFAULT,(%ebx)
19398+ movl $-EFAULT,%ss:(%ebx)
19399 jmp 5000b
19400
19401 .previous
19402
19403+ pushl_cfi %ss
19404+ popl_cfi %ds
19405+ pushl_cfi %ss
19406+ popl_cfi %es
19407 popl_cfi %ebx
19408 CFI_RESTORE ebx
19409 popl_cfi %esi
19410@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
19411 popl_cfi %ecx # equivalent to addl $4,%esp
19412 ret
19413 CFI_ENDPROC
19414-ENDPROC(csum_partial_copy_generic)
19415+ENDPROC(csum_partial_copy_generic_to_user)
19416
19417 #else
19418
19419 /* Version for PentiumII/PPro */
19420
19421 #define ROUND1(x) \
19422+ nop; nop; nop; \
19423 SRC(movl x(%esi), %ebx ) ; \
19424 addl %ebx, %eax ; \
19425- DST(movl %ebx, x(%edi) ) ;
19426+ DST(movl %ebx, %es:x(%edi)) ;
19427
19428 #define ROUND(x) \
19429+ nop; nop; nop; \
19430 SRC(movl x(%esi), %ebx ) ; \
19431 adcl %ebx, %eax ; \
19432- DST(movl %ebx, x(%edi) ) ;
19433+ DST(movl %ebx, %es:x(%edi)) ;
19434
19435 #define ARGBASE 12
19436-
19437-ENTRY(csum_partial_copy_generic)
19438+
19439+ENTRY(csum_partial_copy_generic_to_user)
19440 CFI_STARTPROC
19441+
19442+#ifdef CONFIG_PAX_MEMORY_UDEREF
19443+ pushl_cfi %gs
19444+ popl_cfi %es
19445+ jmp csum_partial_copy_generic
19446+#endif
19447+
19448+ENTRY(csum_partial_copy_generic_from_user)
19449+
19450+#ifdef CONFIG_PAX_MEMORY_UDEREF
19451+ pushl_cfi %gs
19452+ popl_cfi %ds
19453+#endif
19454+
19455+ENTRY(csum_partial_copy_generic)
19456 pushl_cfi %ebx
19457 CFI_REL_OFFSET ebx, 0
19458 pushl_cfi %edi
19459@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
19460 subl %ebx, %edi
19461 lea -1(%esi),%edx
19462 andl $-32,%edx
19463- lea 3f(%ebx,%ebx), %ebx
19464+ lea 3f(%ebx,%ebx,2), %ebx
19465 testl %esi, %esi
19466 jmp *%ebx
19467 1: addl $64,%esi
19468@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
19469 jb 5f
19470 SRC( movw (%esi), %dx )
19471 leal 2(%esi), %esi
19472-DST( movw %dx, (%edi) )
19473+DST( movw %dx, %es:(%edi) )
19474 leal 2(%edi), %edi
19475 je 6f
19476 shll $16,%edx
19477 5:
19478 SRC( movb (%esi), %dl )
19479-DST( movb %dl, (%edi) )
19480+DST( movb %dl, %es:(%edi) )
19481 6: addl %edx, %eax
19482 adcl $0, %eax
19483 7:
19484 .section .fixup, "ax"
19485 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19486- movl $-EFAULT, (%ebx)
19487+ movl $-EFAULT, %ss:(%ebx)
19488 # zero the complete destination (computing the rest is too much work)
19489 movl ARGBASE+8(%esp),%edi # dst
19490 movl ARGBASE+12(%esp),%ecx # len
19491@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
19492 rep; stosb
19493 jmp 7b
19494 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19495- movl $-EFAULT, (%ebx)
19496+ movl $-EFAULT, %ss:(%ebx)
19497 jmp 7b
19498 .previous
19499
19500+#ifdef CONFIG_PAX_MEMORY_UDEREF
19501+ pushl_cfi %ss
19502+ popl_cfi %ds
19503+ pushl_cfi %ss
19504+ popl_cfi %es
19505+#endif
19506+
19507 popl_cfi %esi
19508 CFI_RESTORE esi
19509 popl_cfi %edi
19510@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
19511 CFI_RESTORE ebx
19512 ret
19513 CFI_ENDPROC
19514-ENDPROC(csum_partial_copy_generic)
19515+ENDPROC(csum_partial_copy_generic_to_user)
19516
19517 #undef ROUND
19518 #undef ROUND1
19519diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
19520index f2145cf..cea889d 100644
19521--- a/arch/x86/lib/clear_page_64.S
19522+++ b/arch/x86/lib/clear_page_64.S
19523@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
19524 movl $4096/8,%ecx
19525 xorl %eax,%eax
19526 rep stosq
19527+ pax_force_retaddr
19528 ret
19529 CFI_ENDPROC
19530 ENDPROC(clear_page_c)
19531@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
19532 movl $4096,%ecx
19533 xorl %eax,%eax
19534 rep stosb
19535+ pax_force_retaddr
19536 ret
19537 CFI_ENDPROC
19538 ENDPROC(clear_page_c_e)
19539@@ -43,6 +45,7 @@ ENTRY(clear_page)
19540 leaq 64(%rdi),%rdi
19541 jnz .Lloop
19542 nop
19543+ pax_force_retaddr
19544 ret
19545 CFI_ENDPROC
19546 .Lclear_page_end:
19547@@ -58,7 +61,7 @@ ENDPROC(clear_page)
19548
19549 #include <asm/cpufeature.h>
19550
19551- .section .altinstr_replacement,"ax"
19552+ .section .altinstr_replacement,"a"
19553 1: .byte 0xeb /* jmp <disp8> */
19554 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19555 2: .byte 0xeb /* jmp <disp8> */
19556diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
19557index 1e572c5..2a162cd 100644
19558--- a/arch/x86/lib/cmpxchg16b_emu.S
19559+++ b/arch/x86/lib/cmpxchg16b_emu.S
19560@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
19561
19562 popf
19563 mov $1, %al
19564+ pax_force_retaddr
19565 ret
19566
19567 not_same:
19568 popf
19569 xor %al,%al
19570+ pax_force_retaddr
19571 ret
19572
19573 CFI_ENDPROC
19574diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
19575index 01c805b..dccb07f 100644
19576--- a/arch/x86/lib/copy_page_64.S
19577+++ b/arch/x86/lib/copy_page_64.S
19578@@ -9,6 +9,7 @@ copy_page_c:
19579 CFI_STARTPROC
19580 movl $4096/8,%ecx
19581 rep movsq
19582+ pax_force_retaddr
19583 ret
19584 CFI_ENDPROC
19585 ENDPROC(copy_page_c)
19586@@ -39,7 +40,7 @@ ENTRY(copy_page)
19587 movq 16 (%rsi), %rdx
19588 movq 24 (%rsi), %r8
19589 movq 32 (%rsi), %r9
19590- movq 40 (%rsi), %r10
19591+ movq 40 (%rsi), %r13
19592 movq 48 (%rsi), %r11
19593 movq 56 (%rsi), %r12
19594
19595@@ -50,7 +51,7 @@ ENTRY(copy_page)
19596 movq %rdx, 16 (%rdi)
19597 movq %r8, 24 (%rdi)
19598 movq %r9, 32 (%rdi)
19599- movq %r10, 40 (%rdi)
19600+ movq %r13, 40 (%rdi)
19601 movq %r11, 48 (%rdi)
19602 movq %r12, 56 (%rdi)
19603
19604@@ -69,7 +70,7 @@ ENTRY(copy_page)
19605 movq 16 (%rsi), %rdx
19606 movq 24 (%rsi), %r8
19607 movq 32 (%rsi), %r9
19608- movq 40 (%rsi), %r10
19609+ movq 40 (%rsi), %r13
19610 movq 48 (%rsi), %r11
19611 movq 56 (%rsi), %r12
19612
19613@@ -78,7 +79,7 @@ ENTRY(copy_page)
19614 movq %rdx, 16 (%rdi)
19615 movq %r8, 24 (%rdi)
19616 movq %r9, 32 (%rdi)
19617- movq %r10, 40 (%rdi)
19618+ movq %r13, 40 (%rdi)
19619 movq %r11, 48 (%rdi)
19620 movq %r12, 56 (%rdi)
19621
19622@@ -95,6 +96,7 @@ ENTRY(copy_page)
19623 CFI_RESTORE r13
19624 addq $3*8,%rsp
19625 CFI_ADJUST_CFA_OFFSET -3*8
19626+ pax_force_retaddr
19627 ret
19628 .Lcopy_page_end:
19629 CFI_ENDPROC
19630@@ -105,7 +107,7 @@ ENDPROC(copy_page)
19631
19632 #include <asm/cpufeature.h>
19633
19634- .section .altinstr_replacement,"ax"
19635+ .section .altinstr_replacement,"a"
19636 1: .byte 0xeb /* jmp <disp8> */
19637 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19638 2:
19639diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
19640index 0248402..821c786 100644
19641--- a/arch/x86/lib/copy_user_64.S
19642+++ b/arch/x86/lib/copy_user_64.S
19643@@ -16,6 +16,7 @@
19644 #include <asm/thread_info.h>
19645 #include <asm/cpufeature.h>
19646 #include <asm/alternative-asm.h>
19647+#include <asm/pgtable.h>
19648
19649 /*
19650 * By placing feature2 after feature1 in altinstructions section, we logically
19651@@ -29,7 +30,7 @@
19652 .byte 0xe9 /* 32bit jump */
19653 .long \orig-1f /* by default jump to orig */
19654 1:
19655- .section .altinstr_replacement,"ax"
19656+ .section .altinstr_replacement,"a"
19657 2: .byte 0xe9 /* near jump with 32bit immediate */
19658 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
19659 3: .byte 0xe9 /* near jump with 32bit immediate */
19660@@ -71,47 +72,20 @@
19661 #endif
19662 .endm
19663
19664-/* Standard copy_to_user with segment limit checking */
19665-ENTRY(_copy_to_user)
19666- CFI_STARTPROC
19667- GET_THREAD_INFO(%rax)
19668- movq %rdi,%rcx
19669- addq %rdx,%rcx
19670- jc bad_to_user
19671- cmpq TI_addr_limit(%rax),%rcx
19672- ja bad_to_user
19673- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19674- copy_user_generic_unrolled,copy_user_generic_string, \
19675- copy_user_enhanced_fast_string
19676- CFI_ENDPROC
19677-ENDPROC(_copy_to_user)
19678-
19679-/* Standard copy_from_user with segment limit checking */
19680-ENTRY(_copy_from_user)
19681- CFI_STARTPROC
19682- GET_THREAD_INFO(%rax)
19683- movq %rsi,%rcx
19684- addq %rdx,%rcx
19685- jc bad_from_user
19686- cmpq TI_addr_limit(%rax),%rcx
19687- ja bad_from_user
19688- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19689- copy_user_generic_unrolled,copy_user_generic_string, \
19690- copy_user_enhanced_fast_string
19691- CFI_ENDPROC
19692-ENDPROC(_copy_from_user)
19693-
19694 .section .fixup,"ax"
19695 /* must zero dest */
19696 ENTRY(bad_from_user)
19697 bad_from_user:
19698 CFI_STARTPROC
19699+ testl %edx,%edx
19700+ js bad_to_user
19701 movl %edx,%ecx
19702 xorl %eax,%eax
19703 rep
19704 stosb
19705 bad_to_user:
19706 movl %edx,%eax
19707+ pax_force_retaddr
19708 ret
19709 CFI_ENDPROC
19710 ENDPROC(bad_from_user)
19711@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
19712 jz 17f
19713 1: movq (%rsi),%r8
19714 2: movq 1*8(%rsi),%r9
19715-3: movq 2*8(%rsi),%r10
19716+3: movq 2*8(%rsi),%rax
19717 4: movq 3*8(%rsi),%r11
19718 5: movq %r8,(%rdi)
19719 6: movq %r9,1*8(%rdi)
19720-7: movq %r10,2*8(%rdi)
19721+7: movq %rax,2*8(%rdi)
19722 8: movq %r11,3*8(%rdi)
19723 9: movq 4*8(%rsi),%r8
19724 10: movq 5*8(%rsi),%r9
19725-11: movq 6*8(%rsi),%r10
19726+11: movq 6*8(%rsi),%rax
19727 12: movq 7*8(%rsi),%r11
19728 13: movq %r8,4*8(%rdi)
19729 14: movq %r9,5*8(%rdi)
19730-15: movq %r10,6*8(%rdi)
19731+15: movq %rax,6*8(%rdi)
19732 16: movq %r11,7*8(%rdi)
19733 leaq 64(%rsi),%rsi
19734 leaq 64(%rdi),%rdi
19735@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
19736 decl %ecx
19737 jnz 21b
19738 23: xor %eax,%eax
19739+ pax_force_retaddr
19740 ret
19741
19742 .section .fixup,"ax"
19743@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
19744 3: rep
19745 movsb
19746 4: xorl %eax,%eax
19747+ pax_force_retaddr
19748 ret
19749
19750 .section .fixup,"ax"
19751@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
19752 1: rep
19753 movsb
19754 2: xorl %eax,%eax
19755+ pax_force_retaddr
19756 ret
19757
19758 .section .fixup,"ax"
19759diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
19760index cb0c112..e3a6895 100644
19761--- a/arch/x86/lib/copy_user_nocache_64.S
19762+++ b/arch/x86/lib/copy_user_nocache_64.S
19763@@ -8,12 +8,14 @@
19764
19765 #include <linux/linkage.h>
19766 #include <asm/dwarf2.h>
19767+#include <asm/alternative-asm.h>
19768
19769 #define FIX_ALIGNMENT 1
19770
19771 #include <asm/current.h>
19772 #include <asm/asm-offsets.h>
19773 #include <asm/thread_info.h>
19774+#include <asm/pgtable.h>
19775
19776 .macro ALIGN_DESTINATION
19777 #ifdef FIX_ALIGNMENT
19778@@ -50,6 +52,15 @@
19779 */
19780 ENTRY(__copy_user_nocache)
19781 CFI_STARTPROC
19782+
19783+#ifdef CONFIG_PAX_MEMORY_UDEREF
19784+ mov $PAX_USER_SHADOW_BASE,%rcx
19785+ cmp %rcx,%rsi
19786+ jae 1f
19787+ add %rcx,%rsi
19788+1:
19789+#endif
19790+
19791 cmpl $8,%edx
19792 jb 20f /* less then 8 bytes, go to byte copy loop */
19793 ALIGN_DESTINATION
19794@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
19795 jz 17f
19796 1: movq (%rsi),%r8
19797 2: movq 1*8(%rsi),%r9
19798-3: movq 2*8(%rsi),%r10
19799+3: movq 2*8(%rsi),%rax
19800 4: movq 3*8(%rsi),%r11
19801 5: movnti %r8,(%rdi)
19802 6: movnti %r9,1*8(%rdi)
19803-7: movnti %r10,2*8(%rdi)
19804+7: movnti %rax,2*8(%rdi)
19805 8: movnti %r11,3*8(%rdi)
19806 9: movq 4*8(%rsi),%r8
19807 10: movq 5*8(%rsi),%r9
19808-11: movq 6*8(%rsi),%r10
19809+11: movq 6*8(%rsi),%rax
19810 12: movq 7*8(%rsi),%r11
19811 13: movnti %r8,4*8(%rdi)
19812 14: movnti %r9,5*8(%rdi)
19813-15: movnti %r10,6*8(%rdi)
19814+15: movnti %rax,6*8(%rdi)
19815 16: movnti %r11,7*8(%rdi)
19816 leaq 64(%rsi),%rsi
19817 leaq 64(%rdi),%rdi
19818@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
19819 jnz 21b
19820 23: xorl %eax,%eax
19821 sfence
19822+ pax_force_retaddr
19823 ret
19824
19825 .section .fixup,"ax"
19826diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
19827index fb903b7..c92b7f7 100644
19828--- a/arch/x86/lib/csum-copy_64.S
19829+++ b/arch/x86/lib/csum-copy_64.S
19830@@ -8,6 +8,7 @@
19831 #include <linux/linkage.h>
19832 #include <asm/dwarf2.h>
19833 #include <asm/errno.h>
19834+#include <asm/alternative-asm.h>
19835
19836 /*
19837 * Checksum copy with exception handling.
19838@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
19839 CFI_RESTORE rbp
19840 addq $7*8, %rsp
19841 CFI_ADJUST_CFA_OFFSET -7*8
19842+ pax_force_retaddr 0, 1
19843 ret
19844 CFI_RESTORE_STATE
19845
19846diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
19847index 459b58a..9570bc7 100644
19848--- a/arch/x86/lib/csum-wrappers_64.c
19849+++ b/arch/x86/lib/csum-wrappers_64.c
19850@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
19851 len -= 2;
19852 }
19853 }
19854- isum = csum_partial_copy_generic((__force const void *)src,
19855+
19856+#ifdef CONFIG_PAX_MEMORY_UDEREF
19857+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19858+ src += PAX_USER_SHADOW_BASE;
19859+#endif
19860+
19861+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
19862 dst, len, isum, errp, NULL);
19863 if (unlikely(*errp))
19864 goto out_err;
19865@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
19866 }
19867
19868 *errp = 0;
19869- return csum_partial_copy_generic(src, (void __force *)dst,
19870+
19871+#ifdef CONFIG_PAX_MEMORY_UDEREF
19872+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19873+ dst += PAX_USER_SHADOW_BASE;
19874+#endif
19875+
19876+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
19877 len, isum, NULL, errp);
19878 }
19879 EXPORT_SYMBOL(csum_partial_copy_to_user);
19880diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
19881index 51f1504..ddac4c1 100644
19882--- a/arch/x86/lib/getuser.S
19883+++ b/arch/x86/lib/getuser.S
19884@@ -33,15 +33,38 @@
19885 #include <asm/asm-offsets.h>
19886 #include <asm/thread_info.h>
19887 #include <asm/asm.h>
19888+#include <asm/segment.h>
19889+#include <asm/pgtable.h>
19890+#include <asm/alternative-asm.h>
19891+
19892+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19893+#define __copyuser_seg gs;
19894+#else
19895+#define __copyuser_seg
19896+#endif
19897
19898 .text
19899 ENTRY(__get_user_1)
19900 CFI_STARTPROC
19901+
19902+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19903 GET_THREAD_INFO(%_ASM_DX)
19904 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19905 jae bad_get_user
19906-1: movzb (%_ASM_AX),%edx
19907+
19908+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19909+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19910+ cmp %_ASM_DX,%_ASM_AX
19911+ jae 1234f
19912+ add %_ASM_DX,%_ASM_AX
19913+1234:
19914+#endif
19915+
19916+#endif
19917+
19918+1: __copyuser_seg movzb (%_ASM_AX),%edx
19919 xor %eax,%eax
19920+ pax_force_retaddr
19921 ret
19922 CFI_ENDPROC
19923 ENDPROC(__get_user_1)
19924@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
19925 ENTRY(__get_user_2)
19926 CFI_STARTPROC
19927 add $1,%_ASM_AX
19928+
19929+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19930 jc bad_get_user
19931 GET_THREAD_INFO(%_ASM_DX)
19932 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19933 jae bad_get_user
19934-2: movzwl -1(%_ASM_AX),%edx
19935+
19936+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19937+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19938+ cmp %_ASM_DX,%_ASM_AX
19939+ jae 1234f
19940+ add %_ASM_DX,%_ASM_AX
19941+1234:
19942+#endif
19943+
19944+#endif
19945+
19946+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19947 xor %eax,%eax
19948+ pax_force_retaddr
19949 ret
19950 CFI_ENDPROC
19951 ENDPROC(__get_user_2)
19952@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
19953 ENTRY(__get_user_4)
19954 CFI_STARTPROC
19955 add $3,%_ASM_AX
19956+
19957+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19958 jc bad_get_user
19959 GET_THREAD_INFO(%_ASM_DX)
19960 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19961 jae bad_get_user
19962-3: mov -3(%_ASM_AX),%edx
19963+
19964+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19965+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19966+ cmp %_ASM_DX,%_ASM_AX
19967+ jae 1234f
19968+ add %_ASM_DX,%_ASM_AX
19969+1234:
19970+#endif
19971+
19972+#endif
19973+
19974+3: __copyuser_seg mov -3(%_ASM_AX),%edx
19975 xor %eax,%eax
19976+ pax_force_retaddr
19977 ret
19978 CFI_ENDPROC
19979 ENDPROC(__get_user_4)
19980@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
19981 GET_THREAD_INFO(%_ASM_DX)
19982 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19983 jae bad_get_user
19984+
19985+#ifdef CONFIG_PAX_MEMORY_UDEREF
19986+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19987+ cmp %_ASM_DX,%_ASM_AX
19988+ jae 1234f
19989+ add %_ASM_DX,%_ASM_AX
19990+1234:
19991+#endif
19992+
19993 4: movq -7(%_ASM_AX),%_ASM_DX
19994 xor %eax,%eax
19995+ pax_force_retaddr
19996 ret
19997 CFI_ENDPROC
19998 ENDPROC(__get_user_8)
19999@@ -91,6 +152,7 @@ bad_get_user:
20000 CFI_STARTPROC
20001 xor %edx,%edx
20002 mov $(-EFAULT),%_ASM_AX
20003+ pax_force_retaddr
20004 ret
20005 CFI_ENDPROC
20006 END(bad_get_user)
20007diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
20008index 374562e..a75830b 100644
20009--- a/arch/x86/lib/insn.c
20010+++ b/arch/x86/lib/insn.c
20011@@ -21,6 +21,11 @@
20012 #include <linux/string.h>
20013 #include <asm/inat.h>
20014 #include <asm/insn.h>
20015+#ifdef __KERNEL__
20016+#include <asm/pgtable_types.h>
20017+#else
20018+#define ktla_ktva(addr) addr
20019+#endif
20020
20021 /* Verify next sizeof(t) bytes can be on the same instruction */
20022 #define validate_next(t, insn, n) \
20023@@ -49,8 +54,8 @@
20024 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
20025 {
20026 memset(insn, 0, sizeof(*insn));
20027- insn->kaddr = kaddr;
20028- insn->next_byte = kaddr;
20029+ insn->kaddr = ktla_ktva(kaddr);
20030+ insn->next_byte = ktla_ktva(kaddr);
20031 insn->x86_64 = x86_64 ? 1 : 0;
20032 insn->opnd_bytes = 4;
20033 if (x86_64)
20034diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
20035index 05a95e7..326f2fa 100644
20036--- a/arch/x86/lib/iomap_copy_64.S
20037+++ b/arch/x86/lib/iomap_copy_64.S
20038@@ -17,6 +17,7 @@
20039
20040 #include <linux/linkage.h>
20041 #include <asm/dwarf2.h>
20042+#include <asm/alternative-asm.h>
20043
20044 /*
20045 * override generic version in lib/iomap_copy.c
20046@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
20047 CFI_STARTPROC
20048 movl %edx,%ecx
20049 rep movsd
20050+ pax_force_retaddr
20051 ret
20052 CFI_ENDPROC
20053 ENDPROC(__iowrite32_copy)
20054diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
20055index efbf2a0..8893637 100644
20056--- a/arch/x86/lib/memcpy_64.S
20057+++ b/arch/x86/lib/memcpy_64.S
20058@@ -34,6 +34,7 @@
20059 rep movsq
20060 movl %edx, %ecx
20061 rep movsb
20062+ pax_force_retaddr
20063 ret
20064 .Lmemcpy_e:
20065 .previous
20066@@ -51,6 +52,7 @@
20067
20068 movl %edx, %ecx
20069 rep movsb
20070+ pax_force_retaddr
20071 ret
20072 .Lmemcpy_e_e:
20073 .previous
20074@@ -81,13 +83,13 @@ ENTRY(memcpy)
20075 */
20076 movq 0*8(%rsi), %r8
20077 movq 1*8(%rsi), %r9
20078- movq 2*8(%rsi), %r10
20079+ movq 2*8(%rsi), %rcx
20080 movq 3*8(%rsi), %r11
20081 leaq 4*8(%rsi), %rsi
20082
20083 movq %r8, 0*8(%rdi)
20084 movq %r9, 1*8(%rdi)
20085- movq %r10, 2*8(%rdi)
20086+ movq %rcx, 2*8(%rdi)
20087 movq %r11, 3*8(%rdi)
20088 leaq 4*8(%rdi), %rdi
20089 jae .Lcopy_forward_loop
20090@@ -110,12 +112,12 @@ ENTRY(memcpy)
20091 subq $0x20, %rdx
20092 movq -1*8(%rsi), %r8
20093 movq -2*8(%rsi), %r9
20094- movq -3*8(%rsi), %r10
20095+ movq -3*8(%rsi), %rcx
20096 movq -4*8(%rsi), %r11
20097 leaq -4*8(%rsi), %rsi
20098 movq %r8, -1*8(%rdi)
20099 movq %r9, -2*8(%rdi)
20100- movq %r10, -3*8(%rdi)
20101+ movq %rcx, -3*8(%rdi)
20102 movq %r11, -4*8(%rdi)
20103 leaq -4*8(%rdi), %rdi
20104 jae .Lcopy_backward_loop
20105@@ -135,12 +137,13 @@ ENTRY(memcpy)
20106 */
20107 movq 0*8(%rsi), %r8
20108 movq 1*8(%rsi), %r9
20109- movq -2*8(%rsi, %rdx), %r10
20110+ movq -2*8(%rsi, %rdx), %rcx
20111 movq -1*8(%rsi, %rdx), %r11
20112 movq %r8, 0*8(%rdi)
20113 movq %r9, 1*8(%rdi)
20114- movq %r10, -2*8(%rdi, %rdx)
20115+ movq %rcx, -2*8(%rdi, %rdx)
20116 movq %r11, -1*8(%rdi, %rdx)
20117+ pax_force_retaddr
20118 retq
20119 .p2align 4
20120 .Lless_16bytes:
20121@@ -153,6 +156,7 @@ ENTRY(memcpy)
20122 movq -1*8(%rsi, %rdx), %r9
20123 movq %r8, 0*8(%rdi)
20124 movq %r9, -1*8(%rdi, %rdx)
20125+ pax_force_retaddr
20126 retq
20127 .p2align 4
20128 .Lless_8bytes:
20129@@ -166,6 +170,7 @@ ENTRY(memcpy)
20130 movl -4(%rsi, %rdx), %r8d
20131 movl %ecx, (%rdi)
20132 movl %r8d, -4(%rdi, %rdx)
20133+ pax_force_retaddr
20134 retq
20135 .p2align 4
20136 .Lless_3bytes:
20137@@ -183,6 +188,7 @@ ENTRY(memcpy)
20138 jnz .Lloop_1
20139
20140 .Lend:
20141+ pax_force_retaddr
20142 retq
20143 CFI_ENDPROC
20144 ENDPROC(memcpy)
20145diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
20146index ee16461..c39c199 100644
20147--- a/arch/x86/lib/memmove_64.S
20148+++ b/arch/x86/lib/memmove_64.S
20149@@ -61,13 +61,13 @@ ENTRY(memmove)
20150 5:
20151 sub $0x20, %rdx
20152 movq 0*8(%rsi), %r11
20153- movq 1*8(%rsi), %r10
20154+ movq 1*8(%rsi), %rcx
20155 movq 2*8(%rsi), %r9
20156 movq 3*8(%rsi), %r8
20157 leaq 4*8(%rsi), %rsi
20158
20159 movq %r11, 0*8(%rdi)
20160- movq %r10, 1*8(%rdi)
20161+ movq %rcx, 1*8(%rdi)
20162 movq %r9, 2*8(%rdi)
20163 movq %r8, 3*8(%rdi)
20164 leaq 4*8(%rdi), %rdi
20165@@ -81,10 +81,10 @@ ENTRY(memmove)
20166 4:
20167 movq %rdx, %rcx
20168 movq -8(%rsi, %rdx), %r11
20169- lea -8(%rdi, %rdx), %r10
20170+ lea -8(%rdi, %rdx), %r9
20171 shrq $3, %rcx
20172 rep movsq
20173- movq %r11, (%r10)
20174+ movq %r11, (%r9)
20175 jmp 13f
20176 .Lmemmove_end_forward:
20177
20178@@ -95,14 +95,14 @@ ENTRY(memmove)
20179 7:
20180 movq %rdx, %rcx
20181 movq (%rsi), %r11
20182- movq %rdi, %r10
20183+ movq %rdi, %r9
20184 leaq -8(%rsi, %rdx), %rsi
20185 leaq -8(%rdi, %rdx), %rdi
20186 shrq $3, %rcx
20187 std
20188 rep movsq
20189 cld
20190- movq %r11, (%r10)
20191+ movq %r11, (%r9)
20192 jmp 13f
20193
20194 /*
20195@@ -127,13 +127,13 @@ ENTRY(memmove)
20196 8:
20197 subq $0x20, %rdx
20198 movq -1*8(%rsi), %r11
20199- movq -2*8(%rsi), %r10
20200+ movq -2*8(%rsi), %rcx
20201 movq -3*8(%rsi), %r9
20202 movq -4*8(%rsi), %r8
20203 leaq -4*8(%rsi), %rsi
20204
20205 movq %r11, -1*8(%rdi)
20206- movq %r10, -2*8(%rdi)
20207+ movq %rcx, -2*8(%rdi)
20208 movq %r9, -3*8(%rdi)
20209 movq %r8, -4*8(%rdi)
20210 leaq -4*8(%rdi), %rdi
20211@@ -151,11 +151,11 @@ ENTRY(memmove)
20212 * Move data from 16 bytes to 31 bytes.
20213 */
20214 movq 0*8(%rsi), %r11
20215- movq 1*8(%rsi), %r10
20216+ movq 1*8(%rsi), %rcx
20217 movq -2*8(%rsi, %rdx), %r9
20218 movq -1*8(%rsi, %rdx), %r8
20219 movq %r11, 0*8(%rdi)
20220- movq %r10, 1*8(%rdi)
20221+ movq %rcx, 1*8(%rdi)
20222 movq %r9, -2*8(%rdi, %rdx)
20223 movq %r8, -1*8(%rdi, %rdx)
20224 jmp 13f
20225@@ -167,9 +167,9 @@ ENTRY(memmove)
20226 * Move data from 8 bytes to 15 bytes.
20227 */
20228 movq 0*8(%rsi), %r11
20229- movq -1*8(%rsi, %rdx), %r10
20230+ movq -1*8(%rsi, %rdx), %r9
20231 movq %r11, 0*8(%rdi)
20232- movq %r10, -1*8(%rdi, %rdx)
20233+ movq %r9, -1*8(%rdi, %rdx)
20234 jmp 13f
20235 10:
20236 cmpq $4, %rdx
20237@@ -178,9 +178,9 @@ ENTRY(memmove)
20238 * Move data from 4 bytes to 7 bytes.
20239 */
20240 movl (%rsi), %r11d
20241- movl -4(%rsi, %rdx), %r10d
20242+ movl -4(%rsi, %rdx), %r9d
20243 movl %r11d, (%rdi)
20244- movl %r10d, -4(%rdi, %rdx)
20245+ movl %r9d, -4(%rdi, %rdx)
20246 jmp 13f
20247 11:
20248 cmp $2, %rdx
20249@@ -189,9 +189,9 @@ ENTRY(memmove)
20250 * Move data from 2 bytes to 3 bytes.
20251 */
20252 movw (%rsi), %r11w
20253- movw -2(%rsi, %rdx), %r10w
20254+ movw -2(%rsi, %rdx), %r9w
20255 movw %r11w, (%rdi)
20256- movw %r10w, -2(%rdi, %rdx)
20257+ movw %r9w, -2(%rdi, %rdx)
20258 jmp 13f
20259 12:
20260 cmp $1, %rdx
20261@@ -202,6 +202,7 @@ ENTRY(memmove)
20262 movb (%rsi), %r11b
20263 movb %r11b, (%rdi)
20264 13:
20265+ pax_force_retaddr
20266 retq
20267 CFI_ENDPROC
20268
20269@@ -210,6 +211,7 @@ ENTRY(memmove)
20270 /* Forward moving data. */
20271 movq %rdx, %rcx
20272 rep movsb
20273+ pax_force_retaddr
20274 retq
20275 .Lmemmove_end_forward_efs:
20276 .previous
20277diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
20278index 79bd454..dff325a 100644
20279--- a/arch/x86/lib/memset_64.S
20280+++ b/arch/x86/lib/memset_64.S
20281@@ -31,6 +31,7 @@
20282 movl %r8d,%ecx
20283 rep stosb
20284 movq %r9,%rax
20285+ pax_force_retaddr
20286 ret
20287 .Lmemset_e:
20288 .previous
20289@@ -53,6 +54,7 @@
20290 movl %edx,%ecx
20291 rep stosb
20292 movq %r9,%rax
20293+ pax_force_retaddr
20294 ret
20295 .Lmemset_e_e:
20296 .previous
20297@@ -60,13 +62,13 @@
20298 ENTRY(memset)
20299 ENTRY(__memset)
20300 CFI_STARTPROC
20301- movq %rdi,%r10
20302 movq %rdx,%r11
20303
20304 /* expand byte value */
20305 movzbl %sil,%ecx
20306 movabs $0x0101010101010101,%rax
20307 mul %rcx /* with rax, clobbers rdx */
20308+ movq %rdi,%rdx
20309
20310 /* align dst */
20311 movl %edi,%r9d
20312@@ -120,7 +122,8 @@ ENTRY(__memset)
20313 jnz .Lloop_1
20314
20315 .Lende:
20316- movq %r10,%rax
20317+ movq %rdx,%rax
20318+ pax_force_retaddr
20319 ret
20320
20321 CFI_RESTORE_STATE
20322diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
20323index c9f2d9b..e7fd2c0 100644
20324--- a/arch/x86/lib/mmx_32.c
20325+++ b/arch/x86/lib/mmx_32.c
20326@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20327 {
20328 void *p;
20329 int i;
20330+ unsigned long cr0;
20331
20332 if (unlikely(in_interrupt()))
20333 return __memcpy(to, from, len);
20334@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20335 kernel_fpu_begin();
20336
20337 __asm__ __volatile__ (
20338- "1: prefetch (%0)\n" /* This set is 28 bytes */
20339- " prefetch 64(%0)\n"
20340- " prefetch 128(%0)\n"
20341- " prefetch 192(%0)\n"
20342- " prefetch 256(%0)\n"
20343+ "1: prefetch (%1)\n" /* This set is 28 bytes */
20344+ " prefetch 64(%1)\n"
20345+ " prefetch 128(%1)\n"
20346+ " prefetch 192(%1)\n"
20347+ " prefetch 256(%1)\n"
20348 "2: \n"
20349 ".section .fixup, \"ax\"\n"
20350- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20351+ "3: \n"
20352+
20353+#ifdef CONFIG_PAX_KERNEXEC
20354+ " movl %%cr0, %0\n"
20355+ " movl %0, %%eax\n"
20356+ " andl $0xFFFEFFFF, %%eax\n"
20357+ " movl %%eax, %%cr0\n"
20358+#endif
20359+
20360+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20361+
20362+#ifdef CONFIG_PAX_KERNEXEC
20363+ " movl %0, %%cr0\n"
20364+#endif
20365+
20366 " jmp 2b\n"
20367 ".previous\n"
20368 _ASM_EXTABLE(1b, 3b)
20369- : : "r" (from));
20370+ : "=&r" (cr0) : "r" (from) : "ax");
20371
20372 for ( ; i > 5; i--) {
20373 __asm__ __volatile__ (
20374- "1: prefetch 320(%0)\n"
20375- "2: movq (%0), %%mm0\n"
20376- " movq 8(%0), %%mm1\n"
20377- " movq 16(%0), %%mm2\n"
20378- " movq 24(%0), %%mm3\n"
20379- " movq %%mm0, (%1)\n"
20380- " movq %%mm1, 8(%1)\n"
20381- " movq %%mm2, 16(%1)\n"
20382- " movq %%mm3, 24(%1)\n"
20383- " movq 32(%0), %%mm0\n"
20384- " movq 40(%0), %%mm1\n"
20385- " movq 48(%0), %%mm2\n"
20386- " movq 56(%0), %%mm3\n"
20387- " movq %%mm0, 32(%1)\n"
20388- " movq %%mm1, 40(%1)\n"
20389- " movq %%mm2, 48(%1)\n"
20390- " movq %%mm3, 56(%1)\n"
20391+ "1: prefetch 320(%1)\n"
20392+ "2: movq (%1), %%mm0\n"
20393+ " movq 8(%1), %%mm1\n"
20394+ " movq 16(%1), %%mm2\n"
20395+ " movq 24(%1), %%mm3\n"
20396+ " movq %%mm0, (%2)\n"
20397+ " movq %%mm1, 8(%2)\n"
20398+ " movq %%mm2, 16(%2)\n"
20399+ " movq %%mm3, 24(%2)\n"
20400+ " movq 32(%1), %%mm0\n"
20401+ " movq 40(%1), %%mm1\n"
20402+ " movq 48(%1), %%mm2\n"
20403+ " movq 56(%1), %%mm3\n"
20404+ " movq %%mm0, 32(%2)\n"
20405+ " movq %%mm1, 40(%2)\n"
20406+ " movq %%mm2, 48(%2)\n"
20407+ " movq %%mm3, 56(%2)\n"
20408 ".section .fixup, \"ax\"\n"
20409- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20410+ "3:\n"
20411+
20412+#ifdef CONFIG_PAX_KERNEXEC
20413+ " movl %%cr0, %0\n"
20414+ " movl %0, %%eax\n"
20415+ " andl $0xFFFEFFFF, %%eax\n"
20416+ " movl %%eax, %%cr0\n"
20417+#endif
20418+
20419+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20420+
20421+#ifdef CONFIG_PAX_KERNEXEC
20422+ " movl %0, %%cr0\n"
20423+#endif
20424+
20425 " jmp 2b\n"
20426 ".previous\n"
20427 _ASM_EXTABLE(1b, 3b)
20428- : : "r" (from), "r" (to) : "memory");
20429+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20430
20431 from += 64;
20432 to += 64;
20433@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
20434 static void fast_copy_page(void *to, void *from)
20435 {
20436 int i;
20437+ unsigned long cr0;
20438
20439 kernel_fpu_begin();
20440
20441@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
20442 * but that is for later. -AV
20443 */
20444 __asm__ __volatile__(
20445- "1: prefetch (%0)\n"
20446- " prefetch 64(%0)\n"
20447- " prefetch 128(%0)\n"
20448- " prefetch 192(%0)\n"
20449- " prefetch 256(%0)\n"
20450+ "1: prefetch (%1)\n"
20451+ " prefetch 64(%1)\n"
20452+ " prefetch 128(%1)\n"
20453+ " prefetch 192(%1)\n"
20454+ " prefetch 256(%1)\n"
20455 "2: \n"
20456 ".section .fixup, \"ax\"\n"
20457- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20458+ "3: \n"
20459+
20460+#ifdef CONFIG_PAX_KERNEXEC
20461+ " movl %%cr0, %0\n"
20462+ " movl %0, %%eax\n"
20463+ " andl $0xFFFEFFFF, %%eax\n"
20464+ " movl %%eax, %%cr0\n"
20465+#endif
20466+
20467+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20468+
20469+#ifdef CONFIG_PAX_KERNEXEC
20470+ " movl %0, %%cr0\n"
20471+#endif
20472+
20473 " jmp 2b\n"
20474 ".previous\n"
20475- _ASM_EXTABLE(1b, 3b) : : "r" (from));
20476+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20477
20478 for (i = 0; i < (4096-320)/64; i++) {
20479 __asm__ __volatile__ (
20480- "1: prefetch 320(%0)\n"
20481- "2: movq (%0), %%mm0\n"
20482- " movntq %%mm0, (%1)\n"
20483- " movq 8(%0), %%mm1\n"
20484- " movntq %%mm1, 8(%1)\n"
20485- " movq 16(%0), %%mm2\n"
20486- " movntq %%mm2, 16(%1)\n"
20487- " movq 24(%0), %%mm3\n"
20488- " movntq %%mm3, 24(%1)\n"
20489- " movq 32(%0), %%mm4\n"
20490- " movntq %%mm4, 32(%1)\n"
20491- " movq 40(%0), %%mm5\n"
20492- " movntq %%mm5, 40(%1)\n"
20493- " movq 48(%0), %%mm6\n"
20494- " movntq %%mm6, 48(%1)\n"
20495- " movq 56(%0), %%mm7\n"
20496- " movntq %%mm7, 56(%1)\n"
20497+ "1: prefetch 320(%1)\n"
20498+ "2: movq (%1), %%mm0\n"
20499+ " movntq %%mm0, (%2)\n"
20500+ " movq 8(%1), %%mm1\n"
20501+ " movntq %%mm1, 8(%2)\n"
20502+ " movq 16(%1), %%mm2\n"
20503+ " movntq %%mm2, 16(%2)\n"
20504+ " movq 24(%1), %%mm3\n"
20505+ " movntq %%mm3, 24(%2)\n"
20506+ " movq 32(%1), %%mm4\n"
20507+ " movntq %%mm4, 32(%2)\n"
20508+ " movq 40(%1), %%mm5\n"
20509+ " movntq %%mm5, 40(%2)\n"
20510+ " movq 48(%1), %%mm6\n"
20511+ " movntq %%mm6, 48(%2)\n"
20512+ " movq 56(%1), %%mm7\n"
20513+ " movntq %%mm7, 56(%2)\n"
20514 ".section .fixup, \"ax\"\n"
20515- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20516+ "3:\n"
20517+
20518+#ifdef CONFIG_PAX_KERNEXEC
20519+ " movl %%cr0, %0\n"
20520+ " movl %0, %%eax\n"
20521+ " andl $0xFFFEFFFF, %%eax\n"
20522+ " movl %%eax, %%cr0\n"
20523+#endif
20524+
20525+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20526+
20527+#ifdef CONFIG_PAX_KERNEXEC
20528+ " movl %0, %%cr0\n"
20529+#endif
20530+
20531 " jmp 2b\n"
20532 ".previous\n"
20533- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
20534+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20535
20536 from += 64;
20537 to += 64;
20538@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
20539 static void fast_copy_page(void *to, void *from)
20540 {
20541 int i;
20542+ unsigned long cr0;
20543
20544 kernel_fpu_begin();
20545
20546 __asm__ __volatile__ (
20547- "1: prefetch (%0)\n"
20548- " prefetch 64(%0)\n"
20549- " prefetch 128(%0)\n"
20550- " prefetch 192(%0)\n"
20551- " prefetch 256(%0)\n"
20552+ "1: prefetch (%1)\n"
20553+ " prefetch 64(%1)\n"
20554+ " prefetch 128(%1)\n"
20555+ " prefetch 192(%1)\n"
20556+ " prefetch 256(%1)\n"
20557 "2: \n"
20558 ".section .fixup, \"ax\"\n"
20559- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20560+ "3: \n"
20561+
20562+#ifdef CONFIG_PAX_KERNEXEC
20563+ " movl %%cr0, %0\n"
20564+ " movl %0, %%eax\n"
20565+ " andl $0xFFFEFFFF, %%eax\n"
20566+ " movl %%eax, %%cr0\n"
20567+#endif
20568+
20569+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20570+
20571+#ifdef CONFIG_PAX_KERNEXEC
20572+ " movl %0, %%cr0\n"
20573+#endif
20574+
20575 " jmp 2b\n"
20576 ".previous\n"
20577- _ASM_EXTABLE(1b, 3b) : : "r" (from));
20578+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20579
20580 for (i = 0; i < 4096/64; i++) {
20581 __asm__ __volatile__ (
20582- "1: prefetch 320(%0)\n"
20583- "2: movq (%0), %%mm0\n"
20584- " movq 8(%0), %%mm1\n"
20585- " movq 16(%0), %%mm2\n"
20586- " movq 24(%0), %%mm3\n"
20587- " movq %%mm0, (%1)\n"
20588- " movq %%mm1, 8(%1)\n"
20589- " movq %%mm2, 16(%1)\n"
20590- " movq %%mm3, 24(%1)\n"
20591- " movq 32(%0), %%mm0\n"
20592- " movq 40(%0), %%mm1\n"
20593- " movq 48(%0), %%mm2\n"
20594- " movq 56(%0), %%mm3\n"
20595- " movq %%mm0, 32(%1)\n"
20596- " movq %%mm1, 40(%1)\n"
20597- " movq %%mm2, 48(%1)\n"
20598- " movq %%mm3, 56(%1)\n"
20599+ "1: prefetch 320(%1)\n"
20600+ "2: movq (%1), %%mm0\n"
20601+ " movq 8(%1), %%mm1\n"
20602+ " movq 16(%1), %%mm2\n"
20603+ " movq 24(%1), %%mm3\n"
20604+ " movq %%mm0, (%2)\n"
20605+ " movq %%mm1, 8(%2)\n"
20606+ " movq %%mm2, 16(%2)\n"
20607+ " movq %%mm3, 24(%2)\n"
20608+ " movq 32(%1), %%mm0\n"
20609+ " movq 40(%1), %%mm1\n"
20610+ " movq 48(%1), %%mm2\n"
20611+ " movq 56(%1), %%mm3\n"
20612+ " movq %%mm0, 32(%2)\n"
20613+ " movq %%mm1, 40(%2)\n"
20614+ " movq %%mm2, 48(%2)\n"
20615+ " movq %%mm3, 56(%2)\n"
20616 ".section .fixup, \"ax\"\n"
20617- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20618+ "3:\n"
20619+
20620+#ifdef CONFIG_PAX_KERNEXEC
20621+ " movl %%cr0, %0\n"
20622+ " movl %0, %%eax\n"
20623+ " andl $0xFFFEFFFF, %%eax\n"
20624+ " movl %%eax, %%cr0\n"
20625+#endif
20626+
20627+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20628+
20629+#ifdef CONFIG_PAX_KERNEXEC
20630+ " movl %0, %%cr0\n"
20631+#endif
20632+
20633 " jmp 2b\n"
20634 ".previous\n"
20635 _ASM_EXTABLE(1b, 3b)
20636- : : "r" (from), "r" (to) : "memory");
20637+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20638
20639 from += 64;
20640 to += 64;
20641diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
20642index 69fa106..adda88b 100644
20643--- a/arch/x86/lib/msr-reg.S
20644+++ b/arch/x86/lib/msr-reg.S
20645@@ -3,6 +3,7 @@
20646 #include <asm/dwarf2.h>
20647 #include <asm/asm.h>
20648 #include <asm/msr.h>
20649+#include <asm/alternative-asm.h>
20650
20651 #ifdef CONFIG_X86_64
20652 /*
20653@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
20654 CFI_STARTPROC
20655 pushq_cfi %rbx
20656 pushq_cfi %rbp
20657- movq %rdi, %r10 /* Save pointer */
20658+ movq %rdi, %r9 /* Save pointer */
20659 xorl %r11d, %r11d /* Return value */
20660 movl (%rdi), %eax
20661 movl 4(%rdi), %ecx
20662@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
20663 movl 28(%rdi), %edi
20664 CFI_REMEMBER_STATE
20665 1: \op
20666-2: movl %eax, (%r10)
20667+2: movl %eax, (%r9)
20668 movl %r11d, %eax /* Return value */
20669- movl %ecx, 4(%r10)
20670- movl %edx, 8(%r10)
20671- movl %ebx, 12(%r10)
20672- movl %ebp, 20(%r10)
20673- movl %esi, 24(%r10)
20674- movl %edi, 28(%r10)
20675+ movl %ecx, 4(%r9)
20676+ movl %edx, 8(%r9)
20677+ movl %ebx, 12(%r9)
20678+ movl %ebp, 20(%r9)
20679+ movl %esi, 24(%r9)
20680+ movl %edi, 28(%r9)
20681 popq_cfi %rbp
20682 popq_cfi %rbx
20683+ pax_force_retaddr
20684 ret
20685 3:
20686 CFI_RESTORE_STATE
20687diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
20688index 36b0d15..d381858 100644
20689--- a/arch/x86/lib/putuser.S
20690+++ b/arch/x86/lib/putuser.S
20691@@ -15,7 +15,9 @@
20692 #include <asm/thread_info.h>
20693 #include <asm/errno.h>
20694 #include <asm/asm.h>
20695-
20696+#include <asm/segment.h>
20697+#include <asm/pgtable.h>
20698+#include <asm/alternative-asm.h>
20699
20700 /*
20701 * __put_user_X
20702@@ -29,52 +31,119 @@
20703 * as they get called from within inline assembly.
20704 */
20705
20706-#define ENTER CFI_STARTPROC ; \
20707- GET_THREAD_INFO(%_ASM_BX)
20708-#define EXIT ret ; \
20709+#define ENTER CFI_STARTPROC
20710+#define EXIT pax_force_retaddr; ret ; \
20711 CFI_ENDPROC
20712
20713+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20714+#define _DEST %_ASM_CX,%_ASM_BX
20715+#else
20716+#define _DEST %_ASM_CX
20717+#endif
20718+
20719+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20720+#define __copyuser_seg gs;
20721+#else
20722+#define __copyuser_seg
20723+#endif
20724+
20725 .text
20726 ENTRY(__put_user_1)
20727 ENTER
20728+
20729+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20730+ GET_THREAD_INFO(%_ASM_BX)
20731 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
20732 jae bad_put_user
20733-1: movb %al,(%_ASM_CX)
20734+
20735+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20736+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20737+ cmp %_ASM_BX,%_ASM_CX
20738+ jb 1234f
20739+ xor %ebx,%ebx
20740+1234:
20741+#endif
20742+
20743+#endif
20744+
20745+1: __copyuser_seg movb %al,(_DEST)
20746 xor %eax,%eax
20747 EXIT
20748 ENDPROC(__put_user_1)
20749
20750 ENTRY(__put_user_2)
20751 ENTER
20752+
20753+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20754+ GET_THREAD_INFO(%_ASM_BX)
20755 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20756 sub $1,%_ASM_BX
20757 cmp %_ASM_BX,%_ASM_CX
20758 jae bad_put_user
20759-2: movw %ax,(%_ASM_CX)
20760+
20761+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20762+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20763+ cmp %_ASM_BX,%_ASM_CX
20764+ jb 1234f
20765+ xor %ebx,%ebx
20766+1234:
20767+#endif
20768+
20769+#endif
20770+
20771+2: __copyuser_seg movw %ax,(_DEST)
20772 xor %eax,%eax
20773 EXIT
20774 ENDPROC(__put_user_2)
20775
20776 ENTRY(__put_user_4)
20777 ENTER
20778+
20779+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20780+ GET_THREAD_INFO(%_ASM_BX)
20781 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20782 sub $3,%_ASM_BX
20783 cmp %_ASM_BX,%_ASM_CX
20784 jae bad_put_user
20785-3: movl %eax,(%_ASM_CX)
20786+
20787+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20788+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20789+ cmp %_ASM_BX,%_ASM_CX
20790+ jb 1234f
20791+ xor %ebx,%ebx
20792+1234:
20793+#endif
20794+
20795+#endif
20796+
20797+3: __copyuser_seg movl %eax,(_DEST)
20798 xor %eax,%eax
20799 EXIT
20800 ENDPROC(__put_user_4)
20801
20802 ENTRY(__put_user_8)
20803 ENTER
20804+
20805+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20806+ GET_THREAD_INFO(%_ASM_BX)
20807 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20808 sub $7,%_ASM_BX
20809 cmp %_ASM_BX,%_ASM_CX
20810 jae bad_put_user
20811-4: mov %_ASM_AX,(%_ASM_CX)
20812+
20813+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20814+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20815+ cmp %_ASM_BX,%_ASM_CX
20816+ jb 1234f
20817+ xor %ebx,%ebx
20818+1234:
20819+#endif
20820+
20821+#endif
20822+
20823+4: __copyuser_seg mov %_ASM_AX,(_DEST)
20824 #ifdef CONFIG_X86_32
20825-5: movl %edx,4(%_ASM_CX)
20826+5: __copyuser_seg movl %edx,4(_DEST)
20827 #endif
20828 xor %eax,%eax
20829 EXIT
20830diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
20831index 1cad221..de671ee 100644
20832--- a/arch/x86/lib/rwlock.S
20833+++ b/arch/x86/lib/rwlock.S
20834@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
20835 FRAME
20836 0: LOCK_PREFIX
20837 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20838+
20839+#ifdef CONFIG_PAX_REFCOUNT
20840+ jno 1234f
20841+ LOCK_PREFIX
20842+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20843+ int $4
20844+1234:
20845+ _ASM_EXTABLE(1234b, 1234b)
20846+#endif
20847+
20848 1: rep; nop
20849 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
20850 jne 1b
20851 LOCK_PREFIX
20852 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20853+
20854+#ifdef CONFIG_PAX_REFCOUNT
20855+ jno 1234f
20856+ LOCK_PREFIX
20857+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20858+ int $4
20859+1234:
20860+ _ASM_EXTABLE(1234b, 1234b)
20861+#endif
20862+
20863 jnz 0b
20864 ENDFRAME
20865+ pax_force_retaddr
20866 ret
20867 CFI_ENDPROC
20868 END(__write_lock_failed)
20869@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
20870 FRAME
20871 0: LOCK_PREFIX
20872 READ_LOCK_SIZE(inc) (%__lock_ptr)
20873+
20874+#ifdef CONFIG_PAX_REFCOUNT
20875+ jno 1234f
20876+ LOCK_PREFIX
20877+ READ_LOCK_SIZE(dec) (%__lock_ptr)
20878+ int $4
20879+1234:
20880+ _ASM_EXTABLE(1234b, 1234b)
20881+#endif
20882+
20883 1: rep; nop
20884 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
20885 js 1b
20886 LOCK_PREFIX
20887 READ_LOCK_SIZE(dec) (%__lock_ptr)
20888+
20889+#ifdef CONFIG_PAX_REFCOUNT
20890+ jno 1234f
20891+ LOCK_PREFIX
20892+ READ_LOCK_SIZE(inc) (%__lock_ptr)
20893+ int $4
20894+1234:
20895+ _ASM_EXTABLE(1234b, 1234b)
20896+#endif
20897+
20898 js 0b
20899 ENDFRAME
20900+ pax_force_retaddr
20901 ret
20902 CFI_ENDPROC
20903 END(__read_lock_failed)
20904diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
20905index 5dff5f0..cadebf4 100644
20906--- a/arch/x86/lib/rwsem.S
20907+++ b/arch/x86/lib/rwsem.S
20908@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
20909 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20910 CFI_RESTORE __ASM_REG(dx)
20911 restore_common_regs
20912+ pax_force_retaddr
20913 ret
20914 CFI_ENDPROC
20915 ENDPROC(call_rwsem_down_read_failed)
20916@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
20917 movq %rax,%rdi
20918 call rwsem_down_write_failed
20919 restore_common_regs
20920+ pax_force_retaddr
20921 ret
20922 CFI_ENDPROC
20923 ENDPROC(call_rwsem_down_write_failed)
20924@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
20925 movq %rax,%rdi
20926 call rwsem_wake
20927 restore_common_regs
20928-1: ret
20929+1: pax_force_retaddr
20930+ ret
20931 CFI_ENDPROC
20932 ENDPROC(call_rwsem_wake)
20933
20934@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
20935 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20936 CFI_RESTORE __ASM_REG(dx)
20937 restore_common_regs
20938+ pax_force_retaddr
20939 ret
20940 CFI_ENDPROC
20941 ENDPROC(call_rwsem_downgrade_wake)
20942diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
20943index a63efd6..ccecad8 100644
20944--- a/arch/x86/lib/thunk_64.S
20945+++ b/arch/x86/lib/thunk_64.S
20946@@ -8,6 +8,7 @@
20947 #include <linux/linkage.h>
20948 #include <asm/dwarf2.h>
20949 #include <asm/calling.h>
20950+#include <asm/alternative-asm.h>
20951
20952 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
20953 .macro THUNK name, func, put_ret_addr_in_rdi=0
20954@@ -41,5 +42,6 @@
20955 SAVE_ARGS
20956 restore:
20957 RESTORE_ARGS
20958+ pax_force_retaddr
20959 ret
20960 CFI_ENDPROC
20961diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
20962index e218d5d..35679b4 100644
20963--- a/arch/x86/lib/usercopy_32.c
20964+++ b/arch/x86/lib/usercopy_32.c
20965@@ -43,7 +43,7 @@ do { \
20966 __asm__ __volatile__( \
20967 " testl %1,%1\n" \
20968 " jz 2f\n" \
20969- "0: lodsb\n" \
20970+ "0: "__copyuser_seg"lodsb\n" \
20971 " stosb\n" \
20972 " testb %%al,%%al\n" \
20973 " jz 1f\n" \
20974@@ -128,10 +128,12 @@ do { \
20975 int __d0; \
20976 might_fault(); \
20977 __asm__ __volatile__( \
20978+ __COPYUSER_SET_ES \
20979 "0: rep; stosl\n" \
20980 " movl %2,%0\n" \
20981 "1: rep; stosb\n" \
20982 "2:\n" \
20983+ __COPYUSER_RESTORE_ES \
20984 ".section .fixup,\"ax\"\n" \
20985 "3: lea 0(%2,%0,4),%0\n" \
20986 " jmp 2b\n" \
20987@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
20988 might_fault();
20989
20990 __asm__ __volatile__(
20991+ __COPYUSER_SET_ES
20992 " testl %0, %0\n"
20993 " jz 3f\n"
20994 " andl %0,%%ecx\n"
20995@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
20996 " subl %%ecx,%0\n"
20997 " addl %0,%%eax\n"
20998 "1:\n"
20999+ __COPYUSER_RESTORE_ES
21000 ".section .fixup,\"ax\"\n"
21001 "2: xorl %%eax,%%eax\n"
21002 " jmp 1b\n"
21003@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
21004
21005 #ifdef CONFIG_X86_INTEL_USERCOPY
21006 static unsigned long
21007-__copy_user_intel(void __user *to, const void *from, unsigned long size)
21008+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
21009 {
21010 int d0, d1;
21011 __asm__ __volatile__(
21012@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
21013 " .align 2,0x90\n"
21014 "3: movl 0(%4), %%eax\n"
21015 "4: movl 4(%4), %%edx\n"
21016- "5: movl %%eax, 0(%3)\n"
21017- "6: movl %%edx, 4(%3)\n"
21018+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
21019+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
21020 "7: movl 8(%4), %%eax\n"
21021 "8: movl 12(%4),%%edx\n"
21022- "9: movl %%eax, 8(%3)\n"
21023- "10: movl %%edx, 12(%3)\n"
21024+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
21025+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
21026 "11: movl 16(%4), %%eax\n"
21027 "12: movl 20(%4), %%edx\n"
21028- "13: movl %%eax, 16(%3)\n"
21029- "14: movl %%edx, 20(%3)\n"
21030+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
21031+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
21032 "15: movl 24(%4), %%eax\n"
21033 "16: movl 28(%4), %%edx\n"
21034- "17: movl %%eax, 24(%3)\n"
21035- "18: movl %%edx, 28(%3)\n"
21036+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
21037+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
21038 "19: movl 32(%4), %%eax\n"
21039 "20: movl 36(%4), %%edx\n"
21040- "21: movl %%eax, 32(%3)\n"
21041- "22: movl %%edx, 36(%3)\n"
21042+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
21043+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
21044 "23: movl 40(%4), %%eax\n"
21045 "24: movl 44(%4), %%edx\n"
21046- "25: movl %%eax, 40(%3)\n"
21047- "26: movl %%edx, 44(%3)\n"
21048+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
21049+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
21050 "27: movl 48(%4), %%eax\n"
21051 "28: movl 52(%4), %%edx\n"
21052- "29: movl %%eax, 48(%3)\n"
21053- "30: movl %%edx, 52(%3)\n"
21054+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
21055+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
21056 "31: movl 56(%4), %%eax\n"
21057 "32: movl 60(%4), %%edx\n"
21058- "33: movl %%eax, 56(%3)\n"
21059- "34: movl %%edx, 60(%3)\n"
21060+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
21061+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
21062 " addl $-64, %0\n"
21063 " addl $64, %4\n"
21064 " addl $64, %3\n"
21065@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
21066 " shrl $2, %0\n"
21067 " andl $3, %%eax\n"
21068 " cld\n"
21069+ __COPYUSER_SET_ES
21070 "99: rep; movsl\n"
21071 "36: movl %%eax, %0\n"
21072 "37: rep; movsb\n"
21073 "100:\n"
21074+ __COPYUSER_RESTORE_ES
21075+ ".section .fixup,\"ax\"\n"
21076+ "101: lea 0(%%eax,%0,4),%0\n"
21077+ " jmp 100b\n"
21078+ ".previous\n"
21079+ ".section __ex_table,\"a\"\n"
21080+ " .align 4\n"
21081+ " .long 1b,100b\n"
21082+ " .long 2b,100b\n"
21083+ " .long 3b,100b\n"
21084+ " .long 4b,100b\n"
21085+ " .long 5b,100b\n"
21086+ " .long 6b,100b\n"
21087+ " .long 7b,100b\n"
21088+ " .long 8b,100b\n"
21089+ " .long 9b,100b\n"
21090+ " .long 10b,100b\n"
21091+ " .long 11b,100b\n"
21092+ " .long 12b,100b\n"
21093+ " .long 13b,100b\n"
21094+ " .long 14b,100b\n"
21095+ " .long 15b,100b\n"
21096+ " .long 16b,100b\n"
21097+ " .long 17b,100b\n"
21098+ " .long 18b,100b\n"
21099+ " .long 19b,100b\n"
21100+ " .long 20b,100b\n"
21101+ " .long 21b,100b\n"
21102+ " .long 22b,100b\n"
21103+ " .long 23b,100b\n"
21104+ " .long 24b,100b\n"
21105+ " .long 25b,100b\n"
21106+ " .long 26b,100b\n"
21107+ " .long 27b,100b\n"
21108+ " .long 28b,100b\n"
21109+ " .long 29b,100b\n"
21110+ " .long 30b,100b\n"
21111+ " .long 31b,100b\n"
21112+ " .long 32b,100b\n"
21113+ " .long 33b,100b\n"
21114+ " .long 34b,100b\n"
21115+ " .long 35b,100b\n"
21116+ " .long 36b,100b\n"
21117+ " .long 37b,100b\n"
21118+ " .long 99b,101b\n"
21119+ ".previous"
21120+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
21121+ : "1"(to), "2"(from), "0"(size)
21122+ : "eax", "edx", "memory");
21123+ return size;
21124+}
21125+
21126+static unsigned long
21127+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
21128+{
21129+ int d0, d1;
21130+ __asm__ __volatile__(
21131+ " .align 2,0x90\n"
21132+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
21133+ " cmpl $67, %0\n"
21134+ " jbe 3f\n"
21135+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
21136+ " .align 2,0x90\n"
21137+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
21138+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
21139+ "5: movl %%eax, 0(%3)\n"
21140+ "6: movl %%edx, 4(%3)\n"
21141+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
21142+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
21143+ "9: movl %%eax, 8(%3)\n"
21144+ "10: movl %%edx, 12(%3)\n"
21145+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
21146+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
21147+ "13: movl %%eax, 16(%3)\n"
21148+ "14: movl %%edx, 20(%3)\n"
21149+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
21150+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
21151+ "17: movl %%eax, 24(%3)\n"
21152+ "18: movl %%edx, 28(%3)\n"
21153+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
21154+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
21155+ "21: movl %%eax, 32(%3)\n"
21156+ "22: movl %%edx, 36(%3)\n"
21157+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
21158+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
21159+ "25: movl %%eax, 40(%3)\n"
21160+ "26: movl %%edx, 44(%3)\n"
21161+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
21162+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
21163+ "29: movl %%eax, 48(%3)\n"
21164+ "30: movl %%edx, 52(%3)\n"
21165+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
21166+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
21167+ "33: movl %%eax, 56(%3)\n"
21168+ "34: movl %%edx, 60(%3)\n"
21169+ " addl $-64, %0\n"
21170+ " addl $64, %4\n"
21171+ " addl $64, %3\n"
21172+ " cmpl $63, %0\n"
21173+ " ja 1b\n"
21174+ "35: movl %0, %%eax\n"
21175+ " shrl $2, %0\n"
21176+ " andl $3, %%eax\n"
21177+ " cld\n"
21178+ "99: rep; "__copyuser_seg" movsl\n"
21179+ "36: movl %%eax, %0\n"
21180+ "37: rep; "__copyuser_seg" movsb\n"
21181+ "100:\n"
21182 ".section .fixup,\"ax\"\n"
21183 "101: lea 0(%%eax,%0,4),%0\n"
21184 " jmp 100b\n"
21185@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
21186 int d0, d1;
21187 __asm__ __volatile__(
21188 " .align 2,0x90\n"
21189- "0: movl 32(%4), %%eax\n"
21190+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21191 " cmpl $67, %0\n"
21192 " jbe 2f\n"
21193- "1: movl 64(%4), %%eax\n"
21194+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21195 " .align 2,0x90\n"
21196- "2: movl 0(%4), %%eax\n"
21197- "21: movl 4(%4), %%edx\n"
21198+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21199+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21200 " movl %%eax, 0(%3)\n"
21201 " movl %%edx, 4(%3)\n"
21202- "3: movl 8(%4), %%eax\n"
21203- "31: movl 12(%4),%%edx\n"
21204+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21205+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21206 " movl %%eax, 8(%3)\n"
21207 " movl %%edx, 12(%3)\n"
21208- "4: movl 16(%4), %%eax\n"
21209- "41: movl 20(%4), %%edx\n"
21210+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21211+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21212 " movl %%eax, 16(%3)\n"
21213 " movl %%edx, 20(%3)\n"
21214- "10: movl 24(%4), %%eax\n"
21215- "51: movl 28(%4), %%edx\n"
21216+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21217+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21218 " movl %%eax, 24(%3)\n"
21219 " movl %%edx, 28(%3)\n"
21220- "11: movl 32(%4), %%eax\n"
21221- "61: movl 36(%4), %%edx\n"
21222+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21223+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21224 " movl %%eax, 32(%3)\n"
21225 " movl %%edx, 36(%3)\n"
21226- "12: movl 40(%4), %%eax\n"
21227- "71: movl 44(%4), %%edx\n"
21228+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21229+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21230 " movl %%eax, 40(%3)\n"
21231 " movl %%edx, 44(%3)\n"
21232- "13: movl 48(%4), %%eax\n"
21233- "81: movl 52(%4), %%edx\n"
21234+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21235+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21236 " movl %%eax, 48(%3)\n"
21237 " movl %%edx, 52(%3)\n"
21238- "14: movl 56(%4), %%eax\n"
21239- "91: movl 60(%4), %%edx\n"
21240+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21241+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21242 " movl %%eax, 56(%3)\n"
21243 " movl %%edx, 60(%3)\n"
21244 " addl $-64, %0\n"
21245@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
21246 " shrl $2, %0\n"
21247 " andl $3, %%eax\n"
21248 " cld\n"
21249- "6: rep; movsl\n"
21250+ "6: rep; "__copyuser_seg" movsl\n"
21251 " movl %%eax,%0\n"
21252- "7: rep; movsb\n"
21253+ "7: rep; "__copyuser_seg" movsb\n"
21254 "8:\n"
21255 ".section .fixup,\"ax\"\n"
21256 "9: lea 0(%%eax,%0,4),%0\n"
21257@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21258
21259 __asm__ __volatile__(
21260 " .align 2,0x90\n"
21261- "0: movl 32(%4), %%eax\n"
21262+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21263 " cmpl $67, %0\n"
21264 " jbe 2f\n"
21265- "1: movl 64(%4), %%eax\n"
21266+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21267 " .align 2,0x90\n"
21268- "2: movl 0(%4), %%eax\n"
21269- "21: movl 4(%4), %%edx\n"
21270+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21271+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21272 " movnti %%eax, 0(%3)\n"
21273 " movnti %%edx, 4(%3)\n"
21274- "3: movl 8(%4), %%eax\n"
21275- "31: movl 12(%4),%%edx\n"
21276+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21277+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21278 " movnti %%eax, 8(%3)\n"
21279 " movnti %%edx, 12(%3)\n"
21280- "4: movl 16(%4), %%eax\n"
21281- "41: movl 20(%4), %%edx\n"
21282+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21283+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21284 " movnti %%eax, 16(%3)\n"
21285 " movnti %%edx, 20(%3)\n"
21286- "10: movl 24(%4), %%eax\n"
21287- "51: movl 28(%4), %%edx\n"
21288+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21289+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21290 " movnti %%eax, 24(%3)\n"
21291 " movnti %%edx, 28(%3)\n"
21292- "11: movl 32(%4), %%eax\n"
21293- "61: movl 36(%4), %%edx\n"
21294+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21295+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21296 " movnti %%eax, 32(%3)\n"
21297 " movnti %%edx, 36(%3)\n"
21298- "12: movl 40(%4), %%eax\n"
21299- "71: movl 44(%4), %%edx\n"
21300+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21301+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21302 " movnti %%eax, 40(%3)\n"
21303 " movnti %%edx, 44(%3)\n"
21304- "13: movl 48(%4), %%eax\n"
21305- "81: movl 52(%4), %%edx\n"
21306+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21307+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21308 " movnti %%eax, 48(%3)\n"
21309 " movnti %%edx, 52(%3)\n"
21310- "14: movl 56(%4), %%eax\n"
21311- "91: movl 60(%4), %%edx\n"
21312+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21313+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21314 " movnti %%eax, 56(%3)\n"
21315 " movnti %%edx, 60(%3)\n"
21316 " addl $-64, %0\n"
21317@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21318 " shrl $2, %0\n"
21319 " andl $3, %%eax\n"
21320 " cld\n"
21321- "6: rep; movsl\n"
21322+ "6: rep; "__copyuser_seg" movsl\n"
21323 " movl %%eax,%0\n"
21324- "7: rep; movsb\n"
21325+ "7: rep; "__copyuser_seg" movsb\n"
21326 "8:\n"
21327 ".section .fixup,\"ax\"\n"
21328 "9: lea 0(%%eax,%0,4),%0\n"
21329@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
21330
21331 __asm__ __volatile__(
21332 " .align 2,0x90\n"
21333- "0: movl 32(%4), %%eax\n"
21334+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21335 " cmpl $67, %0\n"
21336 " jbe 2f\n"
21337- "1: movl 64(%4), %%eax\n"
21338+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21339 " .align 2,0x90\n"
21340- "2: movl 0(%4), %%eax\n"
21341- "21: movl 4(%4), %%edx\n"
21342+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21343+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21344 " movnti %%eax, 0(%3)\n"
21345 " movnti %%edx, 4(%3)\n"
21346- "3: movl 8(%4), %%eax\n"
21347- "31: movl 12(%4),%%edx\n"
21348+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21349+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21350 " movnti %%eax, 8(%3)\n"
21351 " movnti %%edx, 12(%3)\n"
21352- "4: movl 16(%4), %%eax\n"
21353- "41: movl 20(%4), %%edx\n"
21354+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21355+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21356 " movnti %%eax, 16(%3)\n"
21357 " movnti %%edx, 20(%3)\n"
21358- "10: movl 24(%4), %%eax\n"
21359- "51: movl 28(%4), %%edx\n"
21360+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21361+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21362 " movnti %%eax, 24(%3)\n"
21363 " movnti %%edx, 28(%3)\n"
21364- "11: movl 32(%4), %%eax\n"
21365- "61: movl 36(%4), %%edx\n"
21366+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21367+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21368 " movnti %%eax, 32(%3)\n"
21369 " movnti %%edx, 36(%3)\n"
21370- "12: movl 40(%4), %%eax\n"
21371- "71: movl 44(%4), %%edx\n"
21372+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21373+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21374 " movnti %%eax, 40(%3)\n"
21375 " movnti %%edx, 44(%3)\n"
21376- "13: movl 48(%4), %%eax\n"
21377- "81: movl 52(%4), %%edx\n"
21378+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21379+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21380 " movnti %%eax, 48(%3)\n"
21381 " movnti %%edx, 52(%3)\n"
21382- "14: movl 56(%4), %%eax\n"
21383- "91: movl 60(%4), %%edx\n"
21384+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21385+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21386 " movnti %%eax, 56(%3)\n"
21387 " movnti %%edx, 60(%3)\n"
21388 " addl $-64, %0\n"
21389@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
21390 " shrl $2, %0\n"
21391 " andl $3, %%eax\n"
21392 " cld\n"
21393- "6: rep; movsl\n"
21394+ "6: rep; "__copyuser_seg" movsl\n"
21395 " movl %%eax,%0\n"
21396- "7: rep; movsb\n"
21397+ "7: rep; "__copyuser_seg" movsb\n"
21398 "8:\n"
21399 ".section .fixup,\"ax\"\n"
21400 "9: lea 0(%%eax,%0,4),%0\n"
21401@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
21402 */
21403 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
21404 unsigned long size);
21405-unsigned long __copy_user_intel(void __user *to, const void *from,
21406+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
21407+ unsigned long size);
21408+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
21409 unsigned long size);
21410 unsigned long __copy_user_zeroing_intel_nocache(void *to,
21411 const void __user *from, unsigned long size);
21412 #endif /* CONFIG_X86_INTEL_USERCOPY */
21413
21414 /* Generic arbitrary sized copy. */
21415-#define __copy_user(to, from, size) \
21416+#define __copy_user(to, from, size, prefix, set, restore) \
21417 do { \
21418 int __d0, __d1, __d2; \
21419 __asm__ __volatile__( \
21420+ set \
21421 " cmp $7,%0\n" \
21422 " jbe 1f\n" \
21423 " movl %1,%0\n" \
21424 " negl %0\n" \
21425 " andl $7,%0\n" \
21426 " subl %0,%3\n" \
21427- "4: rep; movsb\n" \
21428+ "4: rep; "prefix"movsb\n" \
21429 " movl %3,%0\n" \
21430 " shrl $2,%0\n" \
21431 " andl $3,%3\n" \
21432 " .align 2,0x90\n" \
21433- "0: rep; movsl\n" \
21434+ "0: rep; "prefix"movsl\n" \
21435 " movl %3,%0\n" \
21436- "1: rep; movsb\n" \
21437+ "1: rep; "prefix"movsb\n" \
21438 "2:\n" \
21439+ restore \
21440 ".section .fixup,\"ax\"\n" \
21441 "5: addl %3,%0\n" \
21442 " jmp 2b\n" \
21443@@ -682,14 +799,14 @@ do { \
21444 " negl %0\n" \
21445 " andl $7,%0\n" \
21446 " subl %0,%3\n" \
21447- "4: rep; movsb\n" \
21448+ "4: rep; "__copyuser_seg"movsb\n" \
21449 " movl %3,%0\n" \
21450 " shrl $2,%0\n" \
21451 " andl $3,%3\n" \
21452 " .align 2,0x90\n" \
21453- "0: rep; movsl\n" \
21454+ "0: rep; "__copyuser_seg"movsl\n" \
21455 " movl %3,%0\n" \
21456- "1: rep; movsb\n" \
21457+ "1: rep; "__copyuser_seg"movsb\n" \
21458 "2:\n" \
21459 ".section .fixup,\"ax\"\n" \
21460 "5: addl %3,%0\n" \
21461@@ -775,9 +892,9 @@ survive:
21462 }
21463 #endif
21464 if (movsl_is_ok(to, from, n))
21465- __copy_user(to, from, n);
21466+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
21467 else
21468- n = __copy_user_intel(to, from, n);
21469+ n = __generic_copy_to_user_intel(to, from, n);
21470 return n;
21471 }
21472 EXPORT_SYMBOL(__copy_to_user_ll);
21473@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
21474 unsigned long n)
21475 {
21476 if (movsl_is_ok(to, from, n))
21477- __copy_user(to, from, n);
21478+ __copy_user(to, from, n, __copyuser_seg, "", "");
21479 else
21480- n = __copy_user_intel((void __user *)to,
21481- (const void *)from, n);
21482+ n = __generic_copy_from_user_intel(to, from, n);
21483 return n;
21484 }
21485 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
21486@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
21487 if (n > 64 && cpu_has_xmm2)
21488 n = __copy_user_intel_nocache(to, from, n);
21489 else
21490- __copy_user(to, from, n);
21491+ __copy_user(to, from, n, __copyuser_seg, "", "");
21492 #else
21493- __copy_user(to, from, n);
21494+ __copy_user(to, from, n, __copyuser_seg, "", "");
21495 #endif
21496 return n;
21497 }
21498 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
21499
21500-/**
21501- * copy_to_user: - Copy a block of data into user space.
21502- * @to: Destination address, in user space.
21503- * @from: Source address, in kernel space.
21504- * @n: Number of bytes to copy.
21505- *
21506- * Context: User context only. This function may sleep.
21507- *
21508- * Copy data from kernel space to user space.
21509- *
21510- * Returns number of bytes that could not be copied.
21511- * On success, this will be zero.
21512- */
21513-unsigned long
21514-copy_to_user(void __user *to, const void *from, unsigned long n)
21515-{
21516- if (access_ok(VERIFY_WRITE, to, n))
21517- n = __copy_to_user(to, from, n);
21518- return n;
21519-}
21520-EXPORT_SYMBOL(copy_to_user);
21521-
21522-/**
21523- * copy_from_user: - Copy a block of data from user space.
21524- * @to: Destination address, in kernel space.
21525- * @from: Source address, in user space.
21526- * @n: Number of bytes to copy.
21527- *
21528- * Context: User context only. This function may sleep.
21529- *
21530- * Copy data from user space to kernel space.
21531- *
21532- * Returns number of bytes that could not be copied.
21533- * On success, this will be zero.
21534- *
21535- * If some data could not be copied, this function will pad the copied
21536- * data to the requested size using zero bytes.
21537- */
21538-unsigned long
21539-_copy_from_user(void *to, const void __user *from, unsigned long n)
21540-{
21541- if (access_ok(VERIFY_READ, from, n))
21542- n = __copy_from_user(to, from, n);
21543- else
21544- memset(to, 0, n);
21545- return n;
21546-}
21547-EXPORT_SYMBOL(_copy_from_user);
21548-
21549 void copy_from_user_overflow(void)
21550 {
21551 WARN(1, "Buffer overflow detected!\n");
21552 }
21553 EXPORT_SYMBOL(copy_from_user_overflow);
21554+
21555+void copy_to_user_overflow(void)
21556+{
21557+ WARN(1, "Buffer overflow detected!\n");
21558+}
21559+EXPORT_SYMBOL(copy_to_user_overflow);
21560+
21561+#ifdef CONFIG_PAX_MEMORY_UDEREF
21562+void __set_fs(mm_segment_t x)
21563+{
21564+ switch (x.seg) {
21565+ case 0:
21566+ loadsegment(gs, 0);
21567+ break;
21568+ case TASK_SIZE_MAX:
21569+ loadsegment(gs, __USER_DS);
21570+ break;
21571+ case -1UL:
21572+ loadsegment(gs, __KERNEL_DS);
21573+ break;
21574+ default:
21575+ BUG();
21576+ }
21577+ return;
21578+}
21579+EXPORT_SYMBOL(__set_fs);
21580+
21581+void set_fs(mm_segment_t x)
21582+{
21583+ current_thread_info()->addr_limit = x;
21584+ __set_fs(x);
21585+}
21586+EXPORT_SYMBOL(set_fs);
21587+#endif
21588diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
21589index b7c2849..8633ad8 100644
21590--- a/arch/x86/lib/usercopy_64.c
21591+++ b/arch/x86/lib/usercopy_64.c
21592@@ -42,6 +42,12 @@ long
21593 __strncpy_from_user(char *dst, const char __user *src, long count)
21594 {
21595 long res;
21596+
21597+#ifdef CONFIG_PAX_MEMORY_UDEREF
21598+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21599+ src += PAX_USER_SHADOW_BASE;
21600+#endif
21601+
21602 __do_strncpy_from_user(dst, src, count, res);
21603 return res;
21604 }
21605@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
21606 {
21607 long __d0;
21608 might_fault();
21609+
21610+#ifdef CONFIG_PAX_MEMORY_UDEREF
21611+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
21612+ addr += PAX_USER_SHADOW_BASE;
21613+#endif
21614+
21615 /* no memory constraint because it doesn't change any memory gcc knows
21616 about */
21617 asm volatile(
21618@@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
21619 }
21620 EXPORT_SYMBOL(strlen_user);
21621
21622-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
21623+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
21624 {
21625- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21626- return copy_user_generic((__force void *)to, (__force void *)from, len);
21627- }
21628- return len;
21629+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21630+
21631+#ifdef CONFIG_PAX_MEMORY_UDEREF
21632+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
21633+ to += PAX_USER_SHADOW_BASE;
21634+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
21635+ from += PAX_USER_SHADOW_BASE;
21636+#endif
21637+
21638+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
21639+ }
21640+ return len;
21641 }
21642 EXPORT_SYMBOL(copy_in_user);
21643
21644@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
21645 * it is not necessary to optimize tail handling.
21646 */
21647 unsigned long
21648-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
21649+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
21650 {
21651 char c;
21652 unsigned zero_len;
21653diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
21654index d0474ad..36e9257 100644
21655--- a/arch/x86/mm/extable.c
21656+++ b/arch/x86/mm/extable.c
21657@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
21658 const struct exception_table_entry *fixup;
21659
21660 #ifdef CONFIG_PNPBIOS
21661- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
21662+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
21663 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
21664 extern u32 pnp_bios_is_utter_crap;
21665 pnp_bios_is_utter_crap = 1;
21666diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
21667index 5db0490..2ddce45 100644
21668--- a/arch/x86/mm/fault.c
21669+++ b/arch/x86/mm/fault.c
21670@@ -13,11 +13,18 @@
21671 #include <linux/perf_event.h> /* perf_sw_event */
21672 #include <linux/hugetlb.h> /* hstate_index_to_shift */
21673 #include <linux/prefetch.h> /* prefetchw */
21674+#include <linux/unistd.h>
21675+#include <linux/compiler.h>
21676
21677 #include <asm/traps.h> /* dotraplinkage, ... */
21678 #include <asm/pgalloc.h> /* pgd_*(), ... */
21679 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
21680 #include <asm/fixmap.h> /* VSYSCALL_START */
21681+#include <asm/tlbflush.h>
21682+
21683+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21684+#include <asm/stacktrace.h>
21685+#endif
21686
21687 /*
21688 * Page fault error code bits:
21689@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
21690 int ret = 0;
21691
21692 /* kprobe_running() needs smp_processor_id() */
21693- if (kprobes_built_in() && !user_mode_vm(regs)) {
21694+ if (kprobes_built_in() && !user_mode(regs)) {
21695 preempt_disable();
21696 if (kprobe_running() && kprobe_fault_handler(regs, 14))
21697 ret = 1;
21698@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
21699 return !instr_lo || (instr_lo>>1) == 1;
21700 case 0x00:
21701 /* Prefetch instruction is 0x0F0D or 0x0F18 */
21702- if (probe_kernel_address(instr, opcode))
21703+ if (user_mode(regs)) {
21704+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21705+ return 0;
21706+ } else if (probe_kernel_address(instr, opcode))
21707 return 0;
21708
21709 *prefetch = (instr_lo == 0xF) &&
21710@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
21711 while (instr < max_instr) {
21712 unsigned char opcode;
21713
21714- if (probe_kernel_address(instr, opcode))
21715+ if (user_mode(regs)) {
21716+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21717+ break;
21718+ } else if (probe_kernel_address(instr, opcode))
21719 break;
21720
21721 instr++;
21722@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
21723 force_sig_info(si_signo, &info, tsk);
21724 }
21725
21726+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21727+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
21728+#endif
21729+
21730+#ifdef CONFIG_PAX_EMUTRAMP
21731+static int pax_handle_fetch_fault(struct pt_regs *regs);
21732+#endif
21733+
21734+#ifdef CONFIG_PAX_PAGEEXEC
21735+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
21736+{
21737+ pgd_t *pgd;
21738+ pud_t *pud;
21739+ pmd_t *pmd;
21740+
21741+ pgd = pgd_offset(mm, address);
21742+ if (!pgd_present(*pgd))
21743+ return NULL;
21744+ pud = pud_offset(pgd, address);
21745+ if (!pud_present(*pud))
21746+ return NULL;
21747+ pmd = pmd_offset(pud, address);
21748+ if (!pmd_present(*pmd))
21749+ return NULL;
21750+ return pmd;
21751+}
21752+#endif
21753+
21754 DEFINE_SPINLOCK(pgd_lock);
21755 LIST_HEAD(pgd_list);
21756
21757@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
21758 for (address = VMALLOC_START & PMD_MASK;
21759 address >= TASK_SIZE && address < FIXADDR_TOP;
21760 address += PMD_SIZE) {
21761+
21762+#ifdef CONFIG_PAX_PER_CPU_PGD
21763+ unsigned long cpu;
21764+#else
21765 struct page *page;
21766+#endif
21767
21768 spin_lock(&pgd_lock);
21769+
21770+#ifdef CONFIG_PAX_PER_CPU_PGD
21771+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
21772+ pgd_t *pgd = get_cpu_pgd(cpu);
21773+ pmd_t *ret;
21774+#else
21775 list_for_each_entry(page, &pgd_list, lru) {
21776+ pgd_t *pgd = page_address(page);
21777 spinlock_t *pgt_lock;
21778 pmd_t *ret;
21779
21780@@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
21781 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
21782
21783 spin_lock(pgt_lock);
21784- ret = vmalloc_sync_one(page_address(page), address);
21785+#endif
21786+
21787+ ret = vmalloc_sync_one(pgd, address);
21788+
21789+#ifndef CONFIG_PAX_PER_CPU_PGD
21790 spin_unlock(pgt_lock);
21791+#endif
21792
21793 if (!ret)
21794 break;
21795@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21796 * an interrupt in the middle of a task switch..
21797 */
21798 pgd_paddr = read_cr3();
21799+
21800+#ifdef CONFIG_PAX_PER_CPU_PGD
21801+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
21802+#endif
21803+
21804 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
21805 if (!pmd_k)
21806 return -1;
21807@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21808 * happen within a race in page table update. In the later
21809 * case just flush:
21810 */
21811+
21812+#ifdef CONFIG_PAX_PER_CPU_PGD
21813+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
21814+ pgd = pgd_offset_cpu(smp_processor_id(), address);
21815+#else
21816 pgd = pgd_offset(current->active_mm, address);
21817+#endif
21818+
21819 pgd_ref = pgd_offset_k(address);
21820 if (pgd_none(*pgd_ref))
21821 return -1;
21822@@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
21823 static int is_errata100(struct pt_regs *regs, unsigned long address)
21824 {
21825 #ifdef CONFIG_X86_64
21826- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
21827+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
21828 return 1;
21829 #endif
21830 return 0;
21831@@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
21832 }
21833
21834 static const char nx_warning[] = KERN_CRIT
21835-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
21836+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
21837
21838 static void
21839 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21840@@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21841 if (!oops_may_print())
21842 return;
21843
21844- if (error_code & PF_INSTR) {
21845+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
21846 unsigned int level;
21847
21848 pte_t *pte = lookup_address(address, &level);
21849
21850 if (pte && pte_present(*pte) && !pte_exec(*pte))
21851- printk(nx_warning, current_uid());
21852+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
21853 }
21854
21855+#ifdef CONFIG_PAX_KERNEXEC
21856+ if (init_mm.start_code <= address && address < init_mm.end_code) {
21857+ if (current->signal->curr_ip)
21858+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21859+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
21860+ else
21861+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21862+ current->comm, task_pid_nr(current), current_uid(), current_euid());
21863+ }
21864+#endif
21865+
21866 printk(KERN_ALERT "BUG: unable to handle kernel ");
21867 if (address < PAGE_SIZE)
21868 printk(KERN_CONT "NULL pointer dereference");
21869@@ -739,6 +820,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
21870 }
21871 #endif
21872
21873+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21874+ if (pax_is_fetch_fault(regs, error_code, address)) {
21875+
21876+#ifdef CONFIG_PAX_EMUTRAMP
21877+ switch (pax_handle_fetch_fault(regs)) {
21878+ case 2:
21879+ return;
21880+ }
21881+#endif
21882+
21883+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21884+ do_group_exit(SIGKILL);
21885+ }
21886+#endif
21887+
21888 if (unlikely(show_unhandled_signals))
21889 show_signal_msg(regs, error_code, address, tsk);
21890
21891@@ -835,7 +931,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
21892 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
21893 printk(KERN_ERR
21894 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
21895- tsk->comm, tsk->pid, address);
21896+ tsk->comm, task_pid_nr(tsk), address);
21897 code = BUS_MCEERR_AR;
21898 }
21899 #endif
21900@@ -890,6 +986,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
21901 return 1;
21902 }
21903
21904+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21905+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
21906+{
21907+ pte_t *pte;
21908+ pmd_t *pmd;
21909+ spinlock_t *ptl;
21910+ unsigned char pte_mask;
21911+
21912+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
21913+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
21914+ return 0;
21915+
21916+ /* PaX: it's our fault, let's handle it if we can */
21917+
21918+ /* PaX: take a look at read faults before acquiring any locks */
21919+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21920+ /* instruction fetch attempt from a protected page in user mode */
21921+ up_read(&mm->mmap_sem);
21922+
21923+#ifdef CONFIG_PAX_EMUTRAMP
21924+ switch (pax_handle_fetch_fault(regs)) {
21925+ case 2:
21926+ return 1;
21927+ }
21928+#endif
21929+
21930+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21931+ do_group_exit(SIGKILL);
21932+ }
21933+
21934+ pmd = pax_get_pmd(mm, address);
21935+ if (unlikely(!pmd))
21936+ return 0;
21937+
21938+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21939+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21940+ pte_unmap_unlock(pte, ptl);
21941+ return 0;
21942+ }
21943+
21944+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21945+ /* write attempt to a protected page in user mode */
21946+ pte_unmap_unlock(pte, ptl);
21947+ return 0;
21948+ }
21949+
21950+#ifdef CONFIG_SMP
21951+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21952+#else
21953+ if (likely(address > get_limit(regs->cs)))
21954+#endif
21955+ {
21956+ set_pte(pte, pte_mkread(*pte));
21957+ __flush_tlb_one(address);
21958+ pte_unmap_unlock(pte, ptl);
21959+ up_read(&mm->mmap_sem);
21960+ return 1;
21961+ }
21962+
21963+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21964+
21965+ /*
21966+ * PaX: fill DTLB with user rights and retry
21967+ */
21968+ __asm__ __volatile__ (
21969+ "orb %2,(%1)\n"
21970+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21971+/*
21972+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21973+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21974+ * page fault when examined during a TLB load attempt. this is true not only
21975+ * for PTEs holding a non-present entry but also present entries that will
21976+ * raise a page fault (such as those set up by PaX, or the copy-on-write
21977+ * mechanism). in effect it means that we do *not* need to flush the TLBs
21978+ * for our target pages since their PTEs are simply not in the TLBs at all.
21979+
21980+ * the best thing in omitting it is that we gain around 15-20% speed in the
21981+ * fast path of the page fault handler and can get rid of tracing since we
21982+ * can no longer flush unintended entries.
21983+ */
21984+ "invlpg (%0)\n"
21985+#endif
21986+ __copyuser_seg"testb $0,(%0)\n"
21987+ "xorb %3,(%1)\n"
21988+ :
21989+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21990+ : "memory", "cc");
21991+ pte_unmap_unlock(pte, ptl);
21992+ up_read(&mm->mmap_sem);
21993+ return 1;
21994+}
21995+#endif
21996+
21997 /*
21998 * Handle a spurious fault caused by a stale TLB entry.
21999 *
22000@@ -962,6 +1151,9 @@ int show_unhandled_signals = 1;
22001 static inline int
22002 access_error(unsigned long error_code, struct vm_area_struct *vma)
22003 {
22004+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
22005+ return 1;
22006+
22007 if (error_code & PF_WRITE) {
22008 /* write, present and write, not present: */
22009 if (unlikely(!(vma->vm_flags & VM_WRITE)))
22010@@ -995,18 +1187,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
22011 {
22012 struct vm_area_struct *vma;
22013 struct task_struct *tsk;
22014- unsigned long address;
22015 struct mm_struct *mm;
22016 int fault;
22017 int write = error_code & PF_WRITE;
22018 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
22019 (write ? FAULT_FLAG_WRITE : 0);
22020
22021- tsk = current;
22022- mm = tsk->mm;
22023-
22024 /* Get the faulting address: */
22025- address = read_cr2();
22026+ unsigned long address = read_cr2();
22027+
22028+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22029+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
22030+ if (!search_exception_tables(regs->ip)) {
22031+ bad_area_nosemaphore(regs, error_code, address);
22032+ return;
22033+ }
22034+ if (address < PAX_USER_SHADOW_BASE) {
22035+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
22036+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
22037+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
22038+ } else
22039+ address -= PAX_USER_SHADOW_BASE;
22040+ }
22041+#endif
22042+
22043+ tsk = current;
22044+ mm = tsk->mm;
22045
22046 /*
22047 * Detect and handle instructions that would cause a page fault for
22048@@ -1067,7 +1273,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
22049 * User-mode registers count as a user access even for any
22050 * potential system fault or CPU buglet:
22051 */
22052- if (user_mode_vm(regs)) {
22053+ if (user_mode(regs)) {
22054 local_irq_enable();
22055 error_code |= PF_USER;
22056 } else {
22057@@ -1122,6 +1328,11 @@ retry:
22058 might_sleep();
22059 }
22060
22061+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
22062+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
22063+ return;
22064+#endif
22065+
22066 vma = find_vma(mm, address);
22067 if (unlikely(!vma)) {
22068 bad_area(regs, error_code, address);
22069@@ -1133,18 +1344,24 @@ retry:
22070 bad_area(regs, error_code, address);
22071 return;
22072 }
22073- if (error_code & PF_USER) {
22074- /*
22075- * Accessing the stack below %sp is always a bug.
22076- * The large cushion allows instructions like enter
22077- * and pusha to work. ("enter $65535, $31" pushes
22078- * 32 pointers and then decrements %sp by 65535.)
22079- */
22080- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
22081- bad_area(regs, error_code, address);
22082- return;
22083- }
22084+ /*
22085+ * Accessing the stack below %sp is always a bug.
22086+ * The large cushion allows instructions like enter
22087+ * and pusha to work. ("enter $65535, $31" pushes
22088+ * 32 pointers and then decrements %sp by 65535.)
22089+ */
22090+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
22091+ bad_area(regs, error_code, address);
22092+ return;
22093 }
22094+
22095+#ifdef CONFIG_PAX_SEGMEXEC
22096+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
22097+ bad_area(regs, error_code, address);
22098+ return;
22099+ }
22100+#endif
22101+
22102 if (unlikely(expand_stack(vma, address))) {
22103 bad_area(regs, error_code, address);
22104 return;
22105@@ -1199,3 +1416,292 @@ good_area:
22106
22107 up_read(&mm->mmap_sem);
22108 }
22109+
22110+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22111+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
22112+{
22113+ struct mm_struct *mm = current->mm;
22114+ unsigned long ip = regs->ip;
22115+
22116+ if (v8086_mode(regs))
22117+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
22118+
22119+#ifdef CONFIG_PAX_PAGEEXEC
22120+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
22121+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
22122+ return true;
22123+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
22124+ return true;
22125+ return false;
22126+ }
22127+#endif
22128+
22129+#ifdef CONFIG_PAX_SEGMEXEC
22130+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
22131+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
22132+ return true;
22133+ return false;
22134+ }
22135+#endif
22136+
22137+ return false;
22138+}
22139+#endif
22140+
22141+#ifdef CONFIG_PAX_EMUTRAMP
22142+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
22143+{
22144+ int err;
22145+
22146+ do { /* PaX: libffi trampoline emulation */
22147+ unsigned char mov, jmp;
22148+ unsigned int addr1, addr2;
22149+
22150+#ifdef CONFIG_X86_64
22151+ if ((regs->ip + 9) >> 32)
22152+ break;
22153+#endif
22154+
22155+ err = get_user(mov, (unsigned char __user *)regs->ip);
22156+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
22157+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
22158+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
22159+
22160+ if (err)
22161+ break;
22162+
22163+ if (mov == 0xB8 && jmp == 0xE9) {
22164+ regs->ax = addr1;
22165+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
22166+ return 2;
22167+ }
22168+ } while (0);
22169+
22170+ do { /* PaX: gcc trampoline emulation #1 */
22171+ unsigned char mov1, mov2;
22172+ unsigned short jmp;
22173+ unsigned int addr1, addr2;
22174+
22175+#ifdef CONFIG_X86_64
22176+ if ((regs->ip + 11) >> 32)
22177+ break;
22178+#endif
22179+
22180+ err = get_user(mov1, (unsigned char __user *)regs->ip);
22181+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
22182+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
22183+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
22184+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
22185+
22186+ if (err)
22187+ break;
22188+
22189+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
22190+ regs->cx = addr1;
22191+ regs->ax = addr2;
22192+ regs->ip = addr2;
22193+ return 2;
22194+ }
22195+ } while (0);
22196+
22197+ do { /* PaX: gcc trampoline emulation #2 */
22198+ unsigned char mov, jmp;
22199+ unsigned int addr1, addr2;
22200+
22201+#ifdef CONFIG_X86_64
22202+ if ((regs->ip + 9) >> 32)
22203+ break;
22204+#endif
22205+
22206+ err = get_user(mov, (unsigned char __user *)regs->ip);
22207+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
22208+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
22209+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
22210+
22211+ if (err)
22212+ break;
22213+
22214+ if (mov == 0xB9 && jmp == 0xE9) {
22215+ regs->cx = addr1;
22216+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
22217+ return 2;
22218+ }
22219+ } while (0);
22220+
22221+ return 1; /* PaX in action */
22222+}
22223+
22224+#ifdef CONFIG_X86_64
22225+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
22226+{
22227+ int err;
22228+
22229+ do { /* PaX: libffi trampoline emulation */
22230+ unsigned short mov1, mov2, jmp1;
22231+ unsigned char stcclc, jmp2;
22232+ unsigned long addr1, addr2;
22233+
22234+ err = get_user(mov1, (unsigned short __user *)regs->ip);
22235+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
22236+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
22237+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
22238+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
22239+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
22240+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
22241+
22242+ if (err)
22243+ break;
22244+
22245+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22246+ regs->r11 = addr1;
22247+ regs->r10 = addr2;
22248+ if (stcclc == 0xF8)
22249+ regs->flags &= ~X86_EFLAGS_CF;
22250+ else
22251+ regs->flags |= X86_EFLAGS_CF;
22252+ regs->ip = addr1;
22253+ return 2;
22254+ }
22255+ } while (0);
22256+
22257+ do { /* PaX: gcc trampoline emulation #1 */
22258+ unsigned short mov1, mov2, jmp1;
22259+ unsigned char jmp2;
22260+ unsigned int addr1;
22261+ unsigned long addr2;
22262+
22263+ err = get_user(mov1, (unsigned short __user *)regs->ip);
22264+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
22265+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
22266+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
22267+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
22268+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
22269+
22270+ if (err)
22271+ break;
22272+
22273+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22274+ regs->r11 = addr1;
22275+ regs->r10 = addr2;
22276+ regs->ip = addr1;
22277+ return 2;
22278+ }
22279+ } while (0);
22280+
22281+ do { /* PaX: gcc trampoline emulation #2 */
22282+ unsigned short mov1, mov2, jmp1;
22283+ unsigned char jmp2;
22284+ unsigned long addr1, addr2;
22285+
22286+ err = get_user(mov1, (unsigned short __user *)regs->ip);
22287+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
22288+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
22289+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
22290+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
22291+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
22292+
22293+ if (err)
22294+ break;
22295+
22296+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22297+ regs->r11 = addr1;
22298+ regs->r10 = addr2;
22299+ regs->ip = addr1;
22300+ return 2;
22301+ }
22302+ } while (0);
22303+
22304+ return 1; /* PaX in action */
22305+}
22306+#endif
22307+
22308+/*
22309+ * PaX: decide what to do with offenders (regs->ip = fault address)
22310+ *
22311+ * returns 1 when task should be killed
22312+ * 2 when gcc trampoline was detected
22313+ */
22314+static int pax_handle_fetch_fault(struct pt_regs *regs)
22315+{
22316+ if (v8086_mode(regs))
22317+ return 1;
22318+
22319+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
22320+ return 1;
22321+
22322+#ifdef CONFIG_X86_32
22323+ return pax_handle_fetch_fault_32(regs);
22324+#else
22325+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
22326+ return pax_handle_fetch_fault_32(regs);
22327+ else
22328+ return pax_handle_fetch_fault_64(regs);
22329+#endif
22330+}
22331+#endif
22332+
22333+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22334+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
22335+{
22336+ long i;
22337+
22338+ printk(KERN_ERR "PAX: bytes at PC: ");
22339+ for (i = 0; i < 20; i++) {
22340+ unsigned char c;
22341+ if (get_user(c, (unsigned char __force_user *)pc+i))
22342+ printk(KERN_CONT "?? ");
22343+ else
22344+ printk(KERN_CONT "%02x ", c);
22345+ }
22346+ printk("\n");
22347+
22348+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
22349+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
22350+ unsigned long c;
22351+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
22352+#ifdef CONFIG_X86_32
22353+ printk(KERN_CONT "???????? ");
22354+#else
22355+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
22356+ printk(KERN_CONT "???????? ???????? ");
22357+ else
22358+ printk(KERN_CONT "???????????????? ");
22359+#endif
22360+ } else {
22361+#ifdef CONFIG_X86_64
22362+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
22363+ printk(KERN_CONT "%08x ", (unsigned int)c);
22364+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
22365+ } else
22366+#endif
22367+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
22368+ }
22369+ }
22370+ printk("\n");
22371+}
22372+#endif
22373+
22374+/**
22375+ * probe_kernel_write(): safely attempt to write to a location
22376+ * @dst: address to write to
22377+ * @src: pointer to the data that shall be written
22378+ * @size: size of the data chunk
22379+ *
22380+ * Safely write to address @dst from the buffer at @src. If a kernel fault
22381+ * happens, handle that and return -EFAULT.
22382+ */
22383+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
22384+{
22385+ long ret;
22386+ mm_segment_t old_fs = get_fs();
22387+
22388+ set_fs(KERNEL_DS);
22389+ pagefault_disable();
22390+ pax_open_kernel();
22391+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
22392+ pax_close_kernel();
22393+ pagefault_enable();
22394+ set_fs(old_fs);
22395+
22396+ return ret ? -EFAULT : 0;
22397+}
22398diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
22399index dd74e46..7d26398 100644
22400--- a/arch/x86/mm/gup.c
22401+++ b/arch/x86/mm/gup.c
22402@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
22403 addr = start;
22404 len = (unsigned long) nr_pages << PAGE_SHIFT;
22405 end = start + len;
22406- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22407+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22408 (void __user *)start, len)))
22409 return 0;
22410
22411diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
22412index f4f29b1..5cac4fb 100644
22413--- a/arch/x86/mm/highmem_32.c
22414+++ b/arch/x86/mm/highmem_32.c
22415@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
22416 idx = type + KM_TYPE_NR*smp_processor_id();
22417 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22418 BUG_ON(!pte_none(*(kmap_pte-idx)));
22419+
22420+ pax_open_kernel();
22421 set_pte(kmap_pte-idx, mk_pte(page, prot));
22422+ pax_close_kernel();
22423+
22424 arch_flush_lazy_mmu_mode();
22425
22426 return (void *)vaddr;
22427diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
22428index f581a18..29efd37 100644
22429--- a/arch/x86/mm/hugetlbpage.c
22430+++ b/arch/x86/mm/hugetlbpage.c
22431@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
22432 struct hstate *h = hstate_file(file);
22433 struct mm_struct *mm = current->mm;
22434 struct vm_area_struct *vma;
22435- unsigned long start_addr;
22436+ unsigned long start_addr, pax_task_size = TASK_SIZE;
22437+
22438+#ifdef CONFIG_PAX_SEGMEXEC
22439+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22440+ pax_task_size = SEGMEXEC_TASK_SIZE;
22441+#endif
22442+
22443+ pax_task_size -= PAGE_SIZE;
22444
22445 if (len > mm->cached_hole_size) {
22446- start_addr = mm->free_area_cache;
22447+ start_addr = mm->free_area_cache;
22448 } else {
22449- start_addr = TASK_UNMAPPED_BASE;
22450- mm->cached_hole_size = 0;
22451+ start_addr = mm->mmap_base;
22452+ mm->cached_hole_size = 0;
22453 }
22454
22455 full_search:
22456@@ -280,26 +287,27 @@ full_search:
22457
22458 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
22459 /* At this point: (!vma || addr < vma->vm_end). */
22460- if (TASK_SIZE - len < addr) {
22461+ if (pax_task_size - len < addr) {
22462 /*
22463 * Start a new search - just in case we missed
22464 * some holes.
22465 */
22466- if (start_addr != TASK_UNMAPPED_BASE) {
22467- start_addr = TASK_UNMAPPED_BASE;
22468+ if (start_addr != mm->mmap_base) {
22469+ start_addr = mm->mmap_base;
22470 mm->cached_hole_size = 0;
22471 goto full_search;
22472 }
22473 return -ENOMEM;
22474 }
22475- if (!vma || addr + len <= vma->vm_start) {
22476- mm->free_area_cache = addr + len;
22477- return addr;
22478- }
22479+ if (check_heap_stack_gap(vma, addr, len))
22480+ break;
22481 if (addr + mm->cached_hole_size < vma->vm_start)
22482 mm->cached_hole_size = vma->vm_start - addr;
22483 addr = ALIGN(vma->vm_end, huge_page_size(h));
22484 }
22485+
22486+ mm->free_area_cache = addr + len;
22487+ return addr;
22488 }
22489
22490 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22491@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22492 {
22493 struct hstate *h = hstate_file(file);
22494 struct mm_struct *mm = current->mm;
22495- struct vm_area_struct *vma, *prev_vma;
22496- unsigned long base = mm->mmap_base, addr = addr0;
22497+ struct vm_area_struct *vma;
22498+ unsigned long base = mm->mmap_base, addr;
22499 unsigned long largest_hole = mm->cached_hole_size;
22500- int first_time = 1;
22501
22502 /* don't allow allocations above current base */
22503 if (mm->free_area_cache > base)
22504@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22505 largest_hole = 0;
22506 mm->free_area_cache = base;
22507 }
22508-try_again:
22509+
22510 /* make sure it can fit in the remaining address space */
22511 if (mm->free_area_cache < len)
22512 goto fail;
22513
22514 /* either no address requested or can't fit in requested address hole */
22515- addr = (mm->free_area_cache - len) & huge_page_mask(h);
22516+ addr = (mm->free_area_cache - len);
22517 do {
22518+ addr &= huge_page_mask(h);
22519+ vma = find_vma(mm, addr);
22520 /*
22521 * Lookup failure means no vma is above this address,
22522 * i.e. return with success:
22523- */
22524- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
22525- return addr;
22526-
22527- /*
22528 * new region fits between prev_vma->vm_end and
22529 * vma->vm_start, use it:
22530 */
22531- if (addr + len <= vma->vm_start &&
22532- (!prev_vma || (addr >= prev_vma->vm_end))) {
22533+ if (check_heap_stack_gap(vma, addr, len)) {
22534 /* remember the address as a hint for next time */
22535- mm->cached_hole_size = largest_hole;
22536- return (mm->free_area_cache = addr);
22537- } else {
22538- /* pull free_area_cache down to the first hole */
22539- if (mm->free_area_cache == vma->vm_end) {
22540- mm->free_area_cache = vma->vm_start;
22541- mm->cached_hole_size = largest_hole;
22542- }
22543+ mm->cached_hole_size = largest_hole;
22544+ return (mm->free_area_cache = addr);
22545+ }
22546+ /* pull free_area_cache down to the first hole */
22547+ if (mm->free_area_cache == vma->vm_end) {
22548+ mm->free_area_cache = vma->vm_start;
22549+ mm->cached_hole_size = largest_hole;
22550 }
22551
22552 /* remember the largest hole we saw so far */
22553 if (addr + largest_hole < vma->vm_start)
22554- largest_hole = vma->vm_start - addr;
22555+ largest_hole = vma->vm_start - addr;
22556
22557 /* try just below the current vma->vm_start */
22558- addr = (vma->vm_start - len) & huge_page_mask(h);
22559- } while (len <= vma->vm_start);
22560+ addr = skip_heap_stack_gap(vma, len);
22561+ } while (!IS_ERR_VALUE(addr));
22562
22563 fail:
22564 /*
22565- * if hint left us with no space for the requested
22566- * mapping then try again:
22567- */
22568- if (first_time) {
22569- mm->free_area_cache = base;
22570- largest_hole = 0;
22571- first_time = 0;
22572- goto try_again;
22573- }
22574- /*
22575 * A failed mmap() very likely causes application failure,
22576 * so fall back to the bottom-up function here. This scenario
22577 * can happen with large stack limits and large mmap()
22578 * allocations.
22579 */
22580- mm->free_area_cache = TASK_UNMAPPED_BASE;
22581+
22582+#ifdef CONFIG_PAX_SEGMEXEC
22583+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22584+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
22585+ else
22586+#endif
22587+
22588+ mm->mmap_base = TASK_UNMAPPED_BASE;
22589+
22590+#ifdef CONFIG_PAX_RANDMMAP
22591+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22592+ mm->mmap_base += mm->delta_mmap;
22593+#endif
22594+
22595+ mm->free_area_cache = mm->mmap_base;
22596 mm->cached_hole_size = ~0UL;
22597 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
22598 len, pgoff, flags);
22599@@ -386,6 +392,7 @@ fail:
22600 /*
22601 * Restore the topdown base:
22602 */
22603+ mm->mmap_base = base;
22604 mm->free_area_cache = base;
22605 mm->cached_hole_size = ~0UL;
22606
22607@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22608 struct hstate *h = hstate_file(file);
22609 struct mm_struct *mm = current->mm;
22610 struct vm_area_struct *vma;
22611+ unsigned long pax_task_size = TASK_SIZE;
22612
22613 if (len & ~huge_page_mask(h))
22614 return -EINVAL;
22615- if (len > TASK_SIZE)
22616+
22617+#ifdef CONFIG_PAX_SEGMEXEC
22618+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22619+ pax_task_size = SEGMEXEC_TASK_SIZE;
22620+#endif
22621+
22622+ pax_task_size -= PAGE_SIZE;
22623+
22624+ if (len > pax_task_size)
22625 return -ENOMEM;
22626
22627 if (flags & MAP_FIXED) {
22628@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22629 if (addr) {
22630 addr = ALIGN(addr, huge_page_size(h));
22631 vma = find_vma(mm, addr);
22632- if (TASK_SIZE - len >= addr &&
22633- (!vma || addr + len <= vma->vm_start))
22634+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
22635 return addr;
22636 }
22637 if (mm->get_unmapped_area == arch_get_unmapped_area)
22638diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
22639index 87488b9..399f416 100644
22640--- a/arch/x86/mm/init.c
22641+++ b/arch/x86/mm/init.c
22642@@ -15,6 +15,7 @@
22643 #include <asm/tlbflush.h>
22644 #include <asm/tlb.h>
22645 #include <asm/proto.h>
22646+#include <asm/desc.h>
22647
22648 unsigned long __initdata pgt_buf_start;
22649 unsigned long __meminitdata pgt_buf_end;
22650@@ -31,7 +32,7 @@ int direct_gbpages
22651 static void __init find_early_table_space(unsigned long end, int use_pse,
22652 int use_gbpages)
22653 {
22654- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
22655+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
22656 phys_addr_t base;
22657
22658 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
22659@@ -312,8 +313,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
22660 */
22661 int devmem_is_allowed(unsigned long pagenr)
22662 {
22663+#ifdef CONFIG_GRKERNSEC_KMEM
22664+ /* allow BDA */
22665+ if (!pagenr)
22666+ return 1;
22667+ /* allow EBDA */
22668+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22669+ return 1;
22670+#else
22671+ if (!pagenr)
22672+ return 1;
22673+#ifdef CONFIG_VM86
22674+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
22675+ return 1;
22676+#endif
22677+#endif
22678+
22679+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22680+ return 1;
22681+#ifdef CONFIG_GRKERNSEC_KMEM
22682+ /* throw out everything else below 1MB */
22683 if (pagenr <= 256)
22684- return 1;
22685+ return 0;
22686+#endif
22687 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22688 return 0;
22689 if (!page_is_ram(pagenr))
22690@@ -372,6 +394,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
22691
22692 void free_initmem(void)
22693 {
22694+
22695+#ifdef CONFIG_PAX_KERNEXEC
22696+#ifdef CONFIG_X86_32
22697+ /* PaX: limit KERNEL_CS to actual size */
22698+ unsigned long addr, limit;
22699+ struct desc_struct d;
22700+ int cpu;
22701+
22702+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22703+ limit = (limit - 1UL) >> PAGE_SHIFT;
22704+
22705+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22706+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
22707+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22708+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22709+ }
22710+
22711+ /* PaX: make KERNEL_CS read-only */
22712+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22713+ if (!paravirt_enabled())
22714+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22715+/*
22716+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22717+ pgd = pgd_offset_k(addr);
22718+ pud = pud_offset(pgd, addr);
22719+ pmd = pmd_offset(pud, addr);
22720+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22721+ }
22722+*/
22723+#ifdef CONFIG_X86_PAE
22724+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22725+/*
22726+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22727+ pgd = pgd_offset_k(addr);
22728+ pud = pud_offset(pgd, addr);
22729+ pmd = pmd_offset(pud, addr);
22730+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22731+ }
22732+*/
22733+#endif
22734+
22735+#ifdef CONFIG_MODULES
22736+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22737+#endif
22738+
22739+#else
22740+ pgd_t *pgd;
22741+ pud_t *pud;
22742+ pmd_t *pmd;
22743+ unsigned long addr, end;
22744+
22745+ /* PaX: make kernel code/rodata read-only, rest non-executable */
22746+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22747+ pgd = pgd_offset_k(addr);
22748+ pud = pud_offset(pgd, addr);
22749+ pmd = pmd_offset(pud, addr);
22750+ if (!pmd_present(*pmd))
22751+ continue;
22752+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22753+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22754+ else
22755+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22756+ }
22757+
22758+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22759+ end = addr + KERNEL_IMAGE_SIZE;
22760+ for (; addr < end; addr += PMD_SIZE) {
22761+ pgd = pgd_offset_k(addr);
22762+ pud = pud_offset(pgd, addr);
22763+ pmd = pmd_offset(pud, addr);
22764+ if (!pmd_present(*pmd))
22765+ continue;
22766+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22767+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22768+ }
22769+#endif
22770+
22771+ flush_tlb_all();
22772+#endif
22773+
22774 free_init_pages("unused kernel memory",
22775 (unsigned long)(&__init_begin),
22776 (unsigned long)(&__init_end));
22777diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
22778index 29f7c6d..b46b35b 100644
22779--- a/arch/x86/mm/init_32.c
22780+++ b/arch/x86/mm/init_32.c
22781@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
22782 }
22783
22784 /*
22785- * Creates a middle page table and puts a pointer to it in the
22786- * given global directory entry. This only returns the gd entry
22787- * in non-PAE compilation mode, since the middle layer is folded.
22788- */
22789-static pmd_t * __init one_md_table_init(pgd_t *pgd)
22790-{
22791- pud_t *pud;
22792- pmd_t *pmd_table;
22793-
22794-#ifdef CONFIG_X86_PAE
22795- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
22796- if (after_bootmem)
22797- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
22798- else
22799- pmd_table = (pmd_t *)alloc_low_page();
22800- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
22801- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
22802- pud = pud_offset(pgd, 0);
22803- BUG_ON(pmd_table != pmd_offset(pud, 0));
22804-
22805- return pmd_table;
22806- }
22807-#endif
22808- pud = pud_offset(pgd, 0);
22809- pmd_table = pmd_offset(pud, 0);
22810-
22811- return pmd_table;
22812-}
22813-
22814-/*
22815 * Create a page table and place a pointer to it in a middle page
22816 * directory entry:
22817 */
22818@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
22819 page_table = (pte_t *)alloc_low_page();
22820
22821 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
22822+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22823+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
22824+#else
22825 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
22826+#endif
22827 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
22828 }
22829
22830 return pte_offset_kernel(pmd, 0);
22831 }
22832
22833+static pmd_t * __init one_md_table_init(pgd_t *pgd)
22834+{
22835+ pud_t *pud;
22836+ pmd_t *pmd_table;
22837+
22838+ pud = pud_offset(pgd, 0);
22839+ pmd_table = pmd_offset(pud, 0);
22840+
22841+ return pmd_table;
22842+}
22843+
22844 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
22845 {
22846 int pgd_idx = pgd_index(vaddr);
22847@@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22848 int pgd_idx, pmd_idx;
22849 unsigned long vaddr;
22850 pgd_t *pgd;
22851+ pud_t *pud;
22852 pmd_t *pmd;
22853 pte_t *pte = NULL;
22854
22855@@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22856 pgd = pgd_base + pgd_idx;
22857
22858 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
22859- pmd = one_md_table_init(pgd);
22860- pmd = pmd + pmd_index(vaddr);
22861+ pud = pud_offset(pgd, vaddr);
22862+ pmd = pmd_offset(pud, vaddr);
22863+
22864+#ifdef CONFIG_X86_PAE
22865+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22866+#endif
22867+
22868 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
22869 pmd++, pmd_idx++) {
22870 pte = page_table_kmap_check(one_page_table_init(pmd),
22871@@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22872 }
22873 }
22874
22875-static inline int is_kernel_text(unsigned long addr)
22876+static inline int is_kernel_text(unsigned long start, unsigned long end)
22877 {
22878- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
22879- return 1;
22880- return 0;
22881+ if ((start > ktla_ktva((unsigned long)_etext) ||
22882+ end <= ktla_ktva((unsigned long)_stext)) &&
22883+ (start > ktla_ktva((unsigned long)_einittext) ||
22884+ end <= ktla_ktva((unsigned long)_sinittext)) &&
22885+
22886+#ifdef CONFIG_ACPI_SLEEP
22887+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
22888+#endif
22889+
22890+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
22891+ return 0;
22892+ return 1;
22893 }
22894
22895 /*
22896@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
22897 unsigned long last_map_addr = end;
22898 unsigned long start_pfn, end_pfn;
22899 pgd_t *pgd_base = swapper_pg_dir;
22900- int pgd_idx, pmd_idx, pte_ofs;
22901+ unsigned int pgd_idx, pmd_idx, pte_ofs;
22902 unsigned long pfn;
22903 pgd_t *pgd;
22904+ pud_t *pud;
22905 pmd_t *pmd;
22906 pte_t *pte;
22907 unsigned pages_2m, pages_4k;
22908@@ -281,8 +282,13 @@ repeat:
22909 pfn = start_pfn;
22910 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22911 pgd = pgd_base + pgd_idx;
22912- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
22913- pmd = one_md_table_init(pgd);
22914+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
22915+ pud = pud_offset(pgd, 0);
22916+ pmd = pmd_offset(pud, 0);
22917+
22918+#ifdef CONFIG_X86_PAE
22919+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22920+#endif
22921
22922 if (pfn >= end_pfn)
22923 continue;
22924@@ -294,14 +300,13 @@ repeat:
22925 #endif
22926 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
22927 pmd++, pmd_idx++) {
22928- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
22929+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
22930
22931 /*
22932 * Map with big pages if possible, otherwise
22933 * create normal page tables:
22934 */
22935 if (use_pse) {
22936- unsigned int addr2;
22937 pgprot_t prot = PAGE_KERNEL_LARGE;
22938 /*
22939 * first pass will use the same initial
22940@@ -311,11 +316,7 @@ repeat:
22941 __pgprot(PTE_IDENT_ATTR |
22942 _PAGE_PSE);
22943
22944- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
22945- PAGE_OFFSET + PAGE_SIZE-1;
22946-
22947- if (is_kernel_text(addr) ||
22948- is_kernel_text(addr2))
22949+ if (is_kernel_text(address, address + PMD_SIZE))
22950 prot = PAGE_KERNEL_LARGE_EXEC;
22951
22952 pages_2m++;
22953@@ -332,7 +333,7 @@ repeat:
22954 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22955 pte += pte_ofs;
22956 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
22957- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
22958+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
22959 pgprot_t prot = PAGE_KERNEL;
22960 /*
22961 * first pass will use the same initial
22962@@ -340,7 +341,7 @@ repeat:
22963 */
22964 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
22965
22966- if (is_kernel_text(addr))
22967+ if (is_kernel_text(address, address + PAGE_SIZE))
22968 prot = PAGE_KERNEL_EXEC;
22969
22970 pages_4k++;
22971@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
22972
22973 pud = pud_offset(pgd, va);
22974 pmd = pmd_offset(pud, va);
22975- if (!pmd_present(*pmd))
22976+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
22977 break;
22978
22979 pte = pte_offset_kernel(pmd, va);
22980@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_range_init(void)
22981
22982 static void __init pagetable_init(void)
22983 {
22984- pgd_t *pgd_base = swapper_pg_dir;
22985-
22986- permanent_kmaps_init(pgd_base);
22987+ permanent_kmaps_init(swapper_pg_dir);
22988 }
22989
22990-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22991+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22992 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22993
22994 /* user-defined highmem size */
22995@@ -757,6 +756,12 @@ void __init mem_init(void)
22996
22997 pci_iommu_alloc();
22998
22999+#ifdef CONFIG_PAX_PER_CPU_PGD
23000+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
23001+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
23002+ KERNEL_PGD_PTRS);
23003+#endif
23004+
23005 #ifdef CONFIG_FLATMEM
23006 BUG_ON(!mem_map);
23007 #endif
23008@@ -774,7 +779,7 @@ void __init mem_init(void)
23009 set_highmem_pages_init();
23010
23011 codesize = (unsigned long) &_etext - (unsigned long) &_text;
23012- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
23013+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
23014 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
23015
23016 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
23017@@ -815,10 +820,10 @@ void __init mem_init(void)
23018 ((unsigned long)&__init_end -
23019 (unsigned long)&__init_begin) >> 10,
23020
23021- (unsigned long)&_etext, (unsigned long)&_edata,
23022- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
23023+ (unsigned long)&_sdata, (unsigned long)&_edata,
23024+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
23025
23026- (unsigned long)&_text, (unsigned long)&_etext,
23027+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
23028 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
23029
23030 /*
23031@@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
23032 if (!kernel_set_to_readonly)
23033 return;
23034
23035+ start = ktla_ktva(start);
23036 pr_debug("Set kernel text: %lx - %lx for read write\n",
23037 start, start+size);
23038
23039@@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
23040 if (!kernel_set_to_readonly)
23041 return;
23042
23043+ start = ktla_ktva(start);
23044 pr_debug("Set kernel text: %lx - %lx for read only\n",
23045 start, start+size);
23046
23047@@ -938,6 +945,7 @@ void mark_rodata_ro(void)
23048 unsigned long start = PFN_ALIGN(_text);
23049 unsigned long size = PFN_ALIGN(_etext) - start;
23050
23051+ start = ktla_ktva(start);
23052 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
23053 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
23054 size >> 10);
23055diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
23056index bbaaa00..796fa65 100644
23057--- a/arch/x86/mm/init_64.c
23058+++ b/arch/x86/mm/init_64.c
23059@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
23060 * around without checking the pgd every time.
23061 */
23062
23063-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
23064+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
23065 EXPORT_SYMBOL_GPL(__supported_pte_mask);
23066
23067 int force_personality32;
23068@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
23069
23070 for (address = start; address <= end; address += PGDIR_SIZE) {
23071 const pgd_t *pgd_ref = pgd_offset_k(address);
23072+
23073+#ifdef CONFIG_PAX_PER_CPU_PGD
23074+ unsigned long cpu;
23075+#else
23076 struct page *page;
23077+#endif
23078
23079 if (pgd_none(*pgd_ref))
23080 continue;
23081
23082 spin_lock(&pgd_lock);
23083+
23084+#ifdef CONFIG_PAX_PER_CPU_PGD
23085+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23086+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
23087+#else
23088 list_for_each_entry(page, &pgd_list, lru) {
23089 pgd_t *pgd;
23090 spinlock_t *pgt_lock;
23091@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
23092 /* the pgt_lock only for Xen */
23093 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
23094 spin_lock(pgt_lock);
23095+#endif
23096
23097 if (pgd_none(*pgd))
23098 set_pgd(pgd, *pgd_ref);
23099@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
23100 BUG_ON(pgd_page_vaddr(*pgd)
23101 != pgd_page_vaddr(*pgd_ref));
23102
23103+#ifndef CONFIG_PAX_PER_CPU_PGD
23104 spin_unlock(pgt_lock);
23105+#endif
23106+
23107 }
23108 spin_unlock(&pgd_lock);
23109 }
23110@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
23111 pmd = fill_pmd(pud, vaddr);
23112 pte = fill_pte(pmd, vaddr);
23113
23114+ pax_open_kernel();
23115 set_pte(pte, new_pte);
23116+ pax_close_kernel();
23117
23118 /*
23119 * It's enough to flush this one mapping.
23120@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
23121 pgd = pgd_offset_k((unsigned long)__va(phys));
23122 if (pgd_none(*pgd)) {
23123 pud = (pud_t *) spp_getpage();
23124- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
23125- _PAGE_USER));
23126+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
23127 }
23128 pud = pud_offset(pgd, (unsigned long)__va(phys));
23129 if (pud_none(*pud)) {
23130 pmd = (pmd_t *) spp_getpage();
23131- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
23132- _PAGE_USER));
23133+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
23134 }
23135 pmd = pmd_offset(pud, phys);
23136 BUG_ON(!pmd_none(*pmd));
23137@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
23138 if (pfn >= pgt_buf_top)
23139 panic("alloc_low_page: ran out of memory");
23140
23141- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
23142+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
23143 clear_page(adr);
23144 *phys = pfn * PAGE_SIZE;
23145 return adr;
23146@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
23147
23148 phys = __pa(virt);
23149 left = phys & (PAGE_SIZE - 1);
23150- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
23151+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
23152 adr = (void *)(((unsigned long)adr) | left);
23153
23154 return adr;
23155@@ -693,6 +707,12 @@ void __init mem_init(void)
23156
23157 pci_iommu_alloc();
23158
23159+#ifdef CONFIG_PAX_PER_CPU_PGD
23160+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
23161+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
23162+ KERNEL_PGD_PTRS);
23163+#endif
23164+
23165 /* clear_bss() already clear the empty_zero_page */
23166
23167 reservedpages = 0;
23168@@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
23169 static struct vm_area_struct gate_vma = {
23170 .vm_start = VSYSCALL_START,
23171 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
23172- .vm_page_prot = PAGE_READONLY_EXEC,
23173- .vm_flags = VM_READ | VM_EXEC
23174+ .vm_page_prot = PAGE_READONLY,
23175+ .vm_flags = VM_READ
23176 };
23177
23178 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
23179@@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long addr)
23180
23181 const char *arch_vma_name(struct vm_area_struct *vma)
23182 {
23183- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
23184+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
23185 return "[vdso]";
23186 if (vma == &gate_vma)
23187 return "[vsyscall]";
23188diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
23189index 7b179b4..6bd1777 100644
23190--- a/arch/x86/mm/iomap_32.c
23191+++ b/arch/x86/mm/iomap_32.c
23192@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
23193 type = kmap_atomic_idx_push();
23194 idx = type + KM_TYPE_NR * smp_processor_id();
23195 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
23196+
23197+ pax_open_kernel();
23198 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
23199+ pax_close_kernel();
23200+
23201 arch_flush_lazy_mmu_mode();
23202
23203 return (void *)vaddr;
23204diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
23205index be1ef57..55f0160 100644
23206--- a/arch/x86/mm/ioremap.c
23207+++ b/arch/x86/mm/ioremap.c
23208@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
23209 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
23210 int is_ram = page_is_ram(pfn);
23211
23212- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
23213+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
23214 return NULL;
23215 WARN_ON_ONCE(is_ram);
23216 }
23217@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
23218
23219 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
23220 if (page_is_ram(start >> PAGE_SHIFT))
23221+#ifdef CONFIG_HIGHMEM
23222+ if ((start >> PAGE_SHIFT) < max_low_pfn)
23223+#endif
23224 return __va(phys);
23225
23226 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
23227@@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
23228 early_param("early_ioremap_debug", early_ioremap_debug_setup);
23229
23230 static __initdata int after_paging_init;
23231-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
23232+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
23233
23234 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
23235 {
23236@@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
23237 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
23238
23239 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
23240- memset(bm_pte, 0, sizeof(bm_pte));
23241- pmd_populate_kernel(&init_mm, pmd, bm_pte);
23242+ pmd_populate_user(&init_mm, pmd, bm_pte);
23243
23244 /*
23245 * The boot-ioremap range spans multiple pmds, for which
23246diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
23247index d87dd6d..bf3fa66 100644
23248--- a/arch/x86/mm/kmemcheck/kmemcheck.c
23249+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
23250@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
23251 * memory (e.g. tracked pages)? For now, we need this to avoid
23252 * invoking kmemcheck for PnP BIOS calls.
23253 */
23254- if (regs->flags & X86_VM_MASK)
23255+ if (v8086_mode(regs))
23256 return false;
23257- if (regs->cs != __KERNEL_CS)
23258+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
23259 return false;
23260
23261 pte = kmemcheck_pte_lookup(address);
23262diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
23263index 845df68..1d8d29f 100644
23264--- a/arch/x86/mm/mmap.c
23265+++ b/arch/x86/mm/mmap.c
23266@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
23267 * Leave an at least ~128 MB hole with possible stack randomization.
23268 */
23269 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
23270-#define MAX_GAP (TASK_SIZE/6*5)
23271+#define MAX_GAP (pax_task_size/6*5)
23272
23273 static int mmap_is_legacy(void)
23274 {
23275@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
23276 return rnd << PAGE_SHIFT;
23277 }
23278
23279-static unsigned long mmap_base(void)
23280+static unsigned long mmap_base(struct mm_struct *mm)
23281 {
23282 unsigned long gap = rlimit(RLIMIT_STACK);
23283+ unsigned long pax_task_size = TASK_SIZE;
23284+
23285+#ifdef CONFIG_PAX_SEGMEXEC
23286+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23287+ pax_task_size = SEGMEXEC_TASK_SIZE;
23288+#endif
23289
23290 if (gap < MIN_GAP)
23291 gap = MIN_GAP;
23292 else if (gap > MAX_GAP)
23293 gap = MAX_GAP;
23294
23295- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
23296+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
23297 }
23298
23299 /*
23300 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
23301 * does, but not when emulating X86_32
23302 */
23303-static unsigned long mmap_legacy_base(void)
23304+static unsigned long mmap_legacy_base(struct mm_struct *mm)
23305 {
23306- if (mmap_is_ia32())
23307+ if (mmap_is_ia32()) {
23308+
23309+#ifdef CONFIG_PAX_SEGMEXEC
23310+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23311+ return SEGMEXEC_TASK_UNMAPPED_BASE;
23312+ else
23313+#endif
23314+
23315 return TASK_UNMAPPED_BASE;
23316- else
23317+ } else
23318 return TASK_UNMAPPED_BASE + mmap_rnd();
23319 }
23320
23321@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
23322 void arch_pick_mmap_layout(struct mm_struct *mm)
23323 {
23324 if (mmap_is_legacy()) {
23325- mm->mmap_base = mmap_legacy_base();
23326+ mm->mmap_base = mmap_legacy_base(mm);
23327+
23328+#ifdef CONFIG_PAX_RANDMMAP
23329+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23330+ mm->mmap_base += mm->delta_mmap;
23331+#endif
23332+
23333 mm->get_unmapped_area = arch_get_unmapped_area;
23334 mm->unmap_area = arch_unmap_area;
23335 } else {
23336- mm->mmap_base = mmap_base();
23337+ mm->mmap_base = mmap_base(mm);
23338+
23339+#ifdef CONFIG_PAX_RANDMMAP
23340+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23341+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
23342+#endif
23343+
23344 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
23345 mm->unmap_area = arch_unmap_area_topdown;
23346 }
23347diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
23348index de54b9b..799051e 100644
23349--- a/arch/x86/mm/mmio-mod.c
23350+++ b/arch/x86/mm/mmio-mod.c
23351@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
23352 break;
23353 default:
23354 {
23355- unsigned char *ip = (unsigned char *)instptr;
23356+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
23357 my_trace->opcode = MMIO_UNKNOWN_OP;
23358 my_trace->width = 0;
23359 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
23360@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
23361 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23362 void __iomem *addr)
23363 {
23364- static atomic_t next_id;
23365+ static atomic_unchecked_t next_id;
23366 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
23367 /* These are page-unaligned. */
23368 struct mmiotrace_map map = {
23369@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23370 .private = trace
23371 },
23372 .phys = offset,
23373- .id = atomic_inc_return(&next_id)
23374+ .id = atomic_inc_return_unchecked(&next_id)
23375 };
23376 map.map_id = trace->id;
23377
23378diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
23379index b008656..773eac2 100644
23380--- a/arch/x86/mm/pageattr-test.c
23381+++ b/arch/x86/mm/pageattr-test.c
23382@@ -36,7 +36,7 @@ enum {
23383
23384 static int pte_testbit(pte_t pte)
23385 {
23386- return pte_flags(pte) & _PAGE_UNUSED1;
23387+ return pte_flags(pte) & _PAGE_CPA_TEST;
23388 }
23389
23390 struct split_state {
23391diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
23392index f9e5267..77b1a40 100644
23393--- a/arch/x86/mm/pageattr.c
23394+++ b/arch/x86/mm/pageattr.c
23395@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23396 */
23397 #ifdef CONFIG_PCI_BIOS
23398 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
23399- pgprot_val(forbidden) |= _PAGE_NX;
23400+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23401 #endif
23402
23403 /*
23404@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23405 * Does not cover __inittext since that is gone later on. On
23406 * 64bit we do not enforce !NX on the low mapping
23407 */
23408- if (within(address, (unsigned long)_text, (unsigned long)_etext))
23409- pgprot_val(forbidden) |= _PAGE_NX;
23410+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
23411+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23412
23413+#ifdef CONFIG_DEBUG_RODATA
23414 /*
23415 * The .rodata section needs to be read-only. Using the pfn
23416 * catches all aliases.
23417@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23418 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
23419 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
23420 pgprot_val(forbidden) |= _PAGE_RW;
23421+#endif
23422
23423 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
23424 /*
23425@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23426 }
23427 #endif
23428
23429+#ifdef CONFIG_PAX_KERNEXEC
23430+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
23431+ pgprot_val(forbidden) |= _PAGE_RW;
23432+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23433+ }
23434+#endif
23435+
23436 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
23437
23438 return prot;
23439@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
23440 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
23441 {
23442 /* change init_mm */
23443+ pax_open_kernel();
23444 set_pte_atomic(kpte, pte);
23445+
23446 #ifdef CONFIG_X86_32
23447 if (!SHARED_KERNEL_PMD) {
23448+
23449+#ifdef CONFIG_PAX_PER_CPU_PGD
23450+ unsigned long cpu;
23451+#else
23452 struct page *page;
23453+#endif
23454
23455+#ifdef CONFIG_PAX_PER_CPU_PGD
23456+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23457+ pgd_t *pgd = get_cpu_pgd(cpu);
23458+#else
23459 list_for_each_entry(page, &pgd_list, lru) {
23460- pgd_t *pgd;
23461+ pgd_t *pgd = (pgd_t *)page_address(page);
23462+#endif
23463+
23464 pud_t *pud;
23465 pmd_t *pmd;
23466
23467- pgd = (pgd_t *)page_address(page) + pgd_index(address);
23468+ pgd += pgd_index(address);
23469 pud = pud_offset(pgd, address);
23470 pmd = pmd_offset(pud, address);
23471 set_pte_atomic((pte_t *)pmd, pte);
23472 }
23473 }
23474 #endif
23475+ pax_close_kernel();
23476 }
23477
23478 static int
23479diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
23480index f6ff57b..481690f 100644
23481--- a/arch/x86/mm/pat.c
23482+++ b/arch/x86/mm/pat.c
23483@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
23484
23485 if (!entry) {
23486 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
23487- current->comm, current->pid, start, end);
23488+ current->comm, task_pid_nr(current), start, end);
23489 return -EINVAL;
23490 }
23491
23492@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23493 while (cursor < to) {
23494 if (!devmem_is_allowed(pfn)) {
23495 printk(KERN_INFO
23496- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23497- current->comm, from, to);
23498+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
23499+ current->comm, from, to, cursor);
23500 return 0;
23501 }
23502 cursor += PAGE_SIZE;
23503@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
23504 printk(KERN_INFO
23505 "%s:%d ioremap_change_attr failed %s "
23506 "for %Lx-%Lx\n",
23507- current->comm, current->pid,
23508+ current->comm, task_pid_nr(current),
23509 cattr_name(flags),
23510 base, (unsigned long long)(base + size));
23511 return -EINVAL;
23512@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23513 if (want_flags != flags) {
23514 printk(KERN_WARNING
23515 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
23516- current->comm, current->pid,
23517+ current->comm, task_pid_nr(current),
23518 cattr_name(want_flags),
23519 (unsigned long long)paddr,
23520 (unsigned long long)(paddr + size),
23521@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23522 free_memtype(paddr, paddr + size);
23523 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
23524 " for %Lx-%Lx, got %s\n",
23525- current->comm, current->pid,
23526+ current->comm, task_pid_nr(current),
23527 cattr_name(want_flags),
23528 (unsigned long long)paddr,
23529 (unsigned long long)(paddr + size),
23530diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
23531index 9f0614d..92ae64a 100644
23532--- a/arch/x86/mm/pf_in.c
23533+++ b/arch/x86/mm/pf_in.c
23534@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
23535 int i;
23536 enum reason_type rv = OTHERS;
23537
23538- p = (unsigned char *)ins_addr;
23539+ p = (unsigned char *)ktla_ktva(ins_addr);
23540 p += skip_prefix(p, &prf);
23541 p += get_opcode(p, &opcode);
23542
23543@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
23544 struct prefix_bits prf;
23545 int i;
23546
23547- p = (unsigned char *)ins_addr;
23548+ p = (unsigned char *)ktla_ktva(ins_addr);
23549 p += skip_prefix(p, &prf);
23550 p += get_opcode(p, &opcode);
23551
23552@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
23553 struct prefix_bits prf;
23554 int i;
23555
23556- p = (unsigned char *)ins_addr;
23557+ p = (unsigned char *)ktla_ktva(ins_addr);
23558 p += skip_prefix(p, &prf);
23559 p += get_opcode(p, &opcode);
23560
23561@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
23562 struct prefix_bits prf;
23563 int i;
23564
23565- p = (unsigned char *)ins_addr;
23566+ p = (unsigned char *)ktla_ktva(ins_addr);
23567 p += skip_prefix(p, &prf);
23568 p += get_opcode(p, &opcode);
23569 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
23570@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
23571 struct prefix_bits prf;
23572 int i;
23573
23574- p = (unsigned char *)ins_addr;
23575+ p = (unsigned char *)ktla_ktva(ins_addr);
23576 p += skip_prefix(p, &prf);
23577 p += get_opcode(p, &opcode);
23578 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
23579diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
23580index 8573b83..c3b1a30 100644
23581--- a/arch/x86/mm/pgtable.c
23582+++ b/arch/x86/mm/pgtable.c
23583@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *pgd)
23584 list_del(&page->lru);
23585 }
23586
23587-#define UNSHARED_PTRS_PER_PGD \
23588- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23589+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23590+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
23591
23592+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23593+{
23594+ while (count--)
23595+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
23596+}
23597+#endif
23598
23599+#ifdef CONFIG_PAX_PER_CPU_PGD
23600+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23601+{
23602+ while (count--)
23603+
23604+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23605+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
23606+#else
23607+ *dst++ = *src++;
23608+#endif
23609+
23610+}
23611+#endif
23612+
23613+#ifdef CONFIG_X86_64
23614+#define pxd_t pud_t
23615+#define pyd_t pgd_t
23616+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
23617+#define pxd_free(mm, pud) pud_free((mm), (pud))
23618+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
23619+#define pyd_offset(mm, address) pgd_offset((mm), (address))
23620+#define PYD_SIZE PGDIR_SIZE
23621+#else
23622+#define pxd_t pmd_t
23623+#define pyd_t pud_t
23624+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
23625+#define pxd_free(mm, pud) pmd_free((mm), (pud))
23626+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
23627+#define pyd_offset(mm, address) pud_offset((mm), (address))
23628+#define PYD_SIZE PUD_SIZE
23629+#endif
23630+
23631+#ifdef CONFIG_PAX_PER_CPU_PGD
23632+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
23633+static inline void pgd_dtor(pgd_t *pgd) {}
23634+#else
23635 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
23636 {
23637 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
23638@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
23639 pgd_list_del(pgd);
23640 spin_unlock(&pgd_lock);
23641 }
23642+#endif
23643
23644 /*
23645 * List of all pgd's needed for non-PAE so it can invalidate entries
23646@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
23647 * -- wli
23648 */
23649
23650-#ifdef CONFIG_X86_PAE
23651+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
23652 /*
23653 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
23654 * updating the top-level pagetable entries to guarantee the
23655@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
23656 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
23657 * and initialize the kernel pmds here.
23658 */
23659-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
23660+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23661
23662 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23663 {
23664@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23665 */
23666 flush_tlb_mm(mm);
23667 }
23668+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
23669+#define PREALLOCATED_PXDS USER_PGD_PTRS
23670 #else /* !CONFIG_X86_PAE */
23671
23672 /* No need to prepopulate any pagetable entries in non-PAE modes. */
23673-#define PREALLOCATED_PMDS 0
23674+#define PREALLOCATED_PXDS 0
23675
23676 #endif /* CONFIG_X86_PAE */
23677
23678-static void free_pmds(pmd_t *pmds[])
23679+static void free_pxds(pxd_t *pxds[])
23680 {
23681 int i;
23682
23683- for(i = 0; i < PREALLOCATED_PMDS; i++)
23684- if (pmds[i])
23685- free_page((unsigned long)pmds[i]);
23686+ for(i = 0; i < PREALLOCATED_PXDS; i++)
23687+ if (pxds[i])
23688+ free_page((unsigned long)pxds[i]);
23689 }
23690
23691-static int preallocate_pmds(pmd_t *pmds[])
23692+static int preallocate_pxds(pxd_t *pxds[])
23693 {
23694 int i;
23695 bool failed = false;
23696
23697- for(i = 0; i < PREALLOCATED_PMDS; i++) {
23698- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
23699- if (pmd == NULL)
23700+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
23701+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
23702+ if (pxd == NULL)
23703 failed = true;
23704- pmds[i] = pmd;
23705+ pxds[i] = pxd;
23706 }
23707
23708 if (failed) {
23709- free_pmds(pmds);
23710+ free_pxds(pxds);
23711 return -ENOMEM;
23712 }
23713
23714@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[])
23715 * preallocate which never got a corresponding vma will need to be
23716 * freed manually.
23717 */
23718-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
23719+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
23720 {
23721 int i;
23722
23723- for(i = 0; i < PREALLOCATED_PMDS; i++) {
23724+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
23725 pgd_t pgd = pgdp[i];
23726
23727 if (pgd_val(pgd) != 0) {
23728- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
23729+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
23730
23731- pgdp[i] = native_make_pgd(0);
23732+ set_pgd(pgdp + i, native_make_pgd(0));
23733
23734- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
23735- pmd_free(mm, pmd);
23736+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
23737+ pxd_free(mm, pxd);
23738 }
23739 }
23740 }
23741
23742-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
23743+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
23744 {
23745- pud_t *pud;
23746+ pyd_t *pyd;
23747 unsigned long addr;
23748 int i;
23749
23750- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
23751+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
23752 return;
23753
23754- pud = pud_offset(pgd, 0);
23755+#ifdef CONFIG_X86_64
23756+ pyd = pyd_offset(mm, 0L);
23757+#else
23758+ pyd = pyd_offset(pgd, 0L);
23759+#endif
23760
23761- for (addr = i = 0; i < PREALLOCATED_PMDS;
23762- i++, pud++, addr += PUD_SIZE) {
23763- pmd_t *pmd = pmds[i];
23764+ for (addr = i = 0; i < PREALLOCATED_PXDS;
23765+ i++, pyd++, addr += PYD_SIZE) {
23766+ pxd_t *pxd = pxds[i];
23767
23768 if (i >= KERNEL_PGD_BOUNDARY)
23769- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23770- sizeof(pmd_t) * PTRS_PER_PMD);
23771+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23772+ sizeof(pxd_t) * PTRS_PER_PMD);
23773
23774- pud_populate(mm, pud, pmd);
23775+ pyd_populate(mm, pyd, pxd);
23776 }
23777 }
23778
23779 pgd_t *pgd_alloc(struct mm_struct *mm)
23780 {
23781 pgd_t *pgd;
23782- pmd_t *pmds[PREALLOCATED_PMDS];
23783+ pxd_t *pxds[PREALLOCATED_PXDS];
23784
23785 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
23786
23787@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23788
23789 mm->pgd = pgd;
23790
23791- if (preallocate_pmds(pmds) != 0)
23792+ if (preallocate_pxds(pxds) != 0)
23793 goto out_free_pgd;
23794
23795 if (paravirt_pgd_alloc(mm) != 0)
23796- goto out_free_pmds;
23797+ goto out_free_pxds;
23798
23799 /*
23800 * Make sure that pre-populating the pmds is atomic with
23801@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23802 spin_lock(&pgd_lock);
23803
23804 pgd_ctor(mm, pgd);
23805- pgd_prepopulate_pmd(mm, pgd, pmds);
23806+ pgd_prepopulate_pxd(mm, pgd, pxds);
23807
23808 spin_unlock(&pgd_lock);
23809
23810 return pgd;
23811
23812-out_free_pmds:
23813- free_pmds(pmds);
23814+out_free_pxds:
23815+ free_pxds(pxds);
23816 out_free_pgd:
23817 free_page((unsigned long)pgd);
23818 out:
23819@@ -295,7 +344,7 @@ out:
23820
23821 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
23822 {
23823- pgd_mop_up_pmds(mm, pgd);
23824+ pgd_mop_up_pxds(mm, pgd);
23825 pgd_dtor(pgd);
23826 paravirt_pgd_free(mm, pgd);
23827 free_page((unsigned long)pgd);
23828diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
23829index cac7184..09a39fa 100644
23830--- a/arch/x86/mm/pgtable_32.c
23831+++ b/arch/x86/mm/pgtable_32.c
23832@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
23833 return;
23834 }
23835 pte = pte_offset_kernel(pmd, vaddr);
23836+
23837+ pax_open_kernel();
23838 if (pte_val(pteval))
23839 set_pte_at(&init_mm, vaddr, pte, pteval);
23840 else
23841 pte_clear(&init_mm, vaddr, pte);
23842+ pax_close_kernel();
23843
23844 /*
23845 * It's enough to flush this one mapping.
23846diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
23847index 410531d..0f16030 100644
23848--- a/arch/x86/mm/setup_nx.c
23849+++ b/arch/x86/mm/setup_nx.c
23850@@ -5,8 +5,10 @@
23851 #include <asm/pgtable.h>
23852 #include <asm/proto.h>
23853
23854+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23855 static int disable_nx __cpuinitdata;
23856
23857+#ifndef CONFIG_PAX_PAGEEXEC
23858 /*
23859 * noexec = on|off
23860 *
23861@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
23862 return 0;
23863 }
23864 early_param("noexec", noexec_setup);
23865+#endif
23866+
23867+#endif
23868
23869 void __cpuinit x86_configure_nx(void)
23870 {
23871+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23872 if (cpu_has_nx && !disable_nx)
23873 __supported_pte_mask |= _PAGE_NX;
23874 else
23875+#endif
23876 __supported_pte_mask &= ~_PAGE_NX;
23877 }
23878
23879diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
23880index d6c0418..06a0ad5 100644
23881--- a/arch/x86/mm/tlb.c
23882+++ b/arch/x86/mm/tlb.c
23883@@ -65,7 +65,11 @@ void leave_mm(int cpu)
23884 BUG();
23885 cpumask_clear_cpu(cpu,
23886 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
23887+
23888+#ifndef CONFIG_PAX_PER_CPU_PGD
23889 load_cr3(swapper_pg_dir);
23890+#endif
23891+
23892 }
23893 EXPORT_SYMBOL_GPL(leave_mm);
23894
23895diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
23896index 6687022..ceabcfa 100644
23897--- a/arch/x86/net/bpf_jit.S
23898+++ b/arch/x86/net/bpf_jit.S
23899@@ -9,6 +9,7 @@
23900 */
23901 #include <linux/linkage.h>
23902 #include <asm/dwarf2.h>
23903+#include <asm/alternative-asm.h>
23904
23905 /*
23906 * Calling convention :
23907@@ -35,6 +36,7 @@ sk_load_word:
23908 jle bpf_slow_path_word
23909 mov (SKBDATA,%rsi),%eax
23910 bswap %eax /* ntohl() */
23911+ pax_force_retaddr
23912 ret
23913
23914
23915@@ -53,6 +55,7 @@ sk_load_half:
23916 jle bpf_slow_path_half
23917 movzwl (SKBDATA,%rsi),%eax
23918 rol $8,%ax # ntohs()
23919+ pax_force_retaddr
23920 ret
23921
23922 sk_load_byte_ind:
23923@@ -66,6 +69,7 @@ sk_load_byte:
23924 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
23925 jle bpf_slow_path_byte
23926 movzbl (SKBDATA,%rsi),%eax
23927+ pax_force_retaddr
23928 ret
23929
23930 /**
23931@@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
23932 movzbl (SKBDATA,%rsi),%ebx
23933 and $15,%bl
23934 shl $2,%bl
23935+ pax_force_retaddr
23936 ret
23937 CFI_ENDPROC
23938 ENDPROC(sk_load_byte_msh)
23939@@ -91,6 +96,7 @@ bpf_error:
23940 xor %eax,%eax
23941 mov -8(%rbp),%rbx
23942 leaveq
23943+ pax_force_retaddr
23944 ret
23945
23946 /* rsi contains offset and can be scratched */
23947@@ -113,6 +119,7 @@ bpf_slow_path_word:
23948 js bpf_error
23949 mov -12(%rbp),%eax
23950 bswap %eax
23951+ pax_force_retaddr
23952 ret
23953
23954 bpf_slow_path_half:
23955@@ -121,12 +128,14 @@ bpf_slow_path_half:
23956 mov -12(%rbp),%ax
23957 rol $8,%ax
23958 movzwl %ax,%eax
23959+ pax_force_retaddr
23960 ret
23961
23962 bpf_slow_path_byte:
23963 bpf_slow_path_common(1)
23964 js bpf_error
23965 movzbl -12(%rbp),%eax
23966+ pax_force_retaddr
23967 ret
23968
23969 bpf_slow_path_byte_msh:
23970@@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
23971 and $15,%al
23972 shl $2,%al
23973 xchg %eax,%ebx
23974+ pax_force_retaddr
23975 ret
23976diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
23977index 7c1b765..8c072c6 100644
23978--- a/arch/x86/net/bpf_jit_comp.c
23979+++ b/arch/x86/net/bpf_jit_comp.c
23980@@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
23981 set_fs(old_fs);
23982 }
23983
23984+struct bpf_jit_work {
23985+ struct work_struct work;
23986+ void *image;
23987+};
23988
23989 void bpf_jit_compile(struct sk_filter *fp)
23990 {
23991@@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
23992 if (addrs == NULL)
23993 return;
23994
23995+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
23996+ if (!fp->work)
23997+ goto out;
23998+
23999 /* Before first pass, make a rough estimation of addrs[]
24000 * each bpf instruction is translated to less than 64 bytes
24001 */
24002@@ -476,7 +484,7 @@ void bpf_jit_compile(struct sk_filter *fp)
24003 func = sk_load_word;
24004 common_load: seen |= SEEN_DATAREF;
24005 if ((int)K < 0)
24006- goto out;
24007+ goto error;
24008 t_offset = func - (image + addrs[i]);
24009 EMIT1_off32(0xbe, K); /* mov imm32,%esi */
24010 EMIT1_off32(0xe8, t_offset); /* call */
24011@@ -586,17 +594,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
24012 break;
24013 default:
24014 /* hmm, too complex filter, give up with jit compiler */
24015- goto out;
24016+ goto error;
24017 }
24018 ilen = prog - temp;
24019 if (image) {
24020 if (unlikely(proglen + ilen > oldproglen)) {
24021 pr_err("bpb_jit_compile fatal error\n");
24022- kfree(addrs);
24023- module_free(NULL, image);
24024- return;
24025+ module_free_exec(NULL, image);
24026+ goto error;
24027 }
24028+ pax_open_kernel();
24029 memcpy(image + proglen, temp, ilen);
24030+ pax_close_kernel();
24031 }
24032 proglen += ilen;
24033 addrs[i] = proglen;
24034@@ -617,11 +626,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
24035 break;
24036 }
24037 if (proglen == oldproglen) {
24038- image = module_alloc(max_t(unsigned int,
24039- proglen,
24040- sizeof(struct work_struct)));
24041+ image = module_alloc_exec(proglen);
24042 if (!image)
24043- goto out;
24044+ goto error;
24045 }
24046 oldproglen = proglen;
24047 }
24048@@ -637,7 +644,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
24049 bpf_flush_icache(image, image + proglen);
24050
24051 fp->bpf_func = (void *)image;
24052- }
24053+ } else
24054+error:
24055+ kfree(fp->work);
24056+
24057 out:
24058 kfree(addrs);
24059 return;
24060@@ -645,18 +655,20 @@ out:
24061
24062 static void jit_free_defer(struct work_struct *arg)
24063 {
24064- module_free(NULL, arg);
24065+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
24066+ kfree(arg);
24067 }
24068
24069 /* run from softirq, we must use a work_struct to call
24070- * module_free() from process context
24071+ * module_free_exec() from process context
24072 */
24073 void bpf_jit_free(struct sk_filter *fp)
24074 {
24075 if (fp->bpf_func != sk_run_filter) {
24076- struct work_struct *work = (struct work_struct *)fp->bpf_func;
24077+ struct work_struct *work = &fp->work->work;
24078
24079 INIT_WORK(work, jit_free_defer);
24080+ fp->work->image = fp->bpf_func;
24081 schedule_work(work);
24082 }
24083 }
24084diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
24085index bff89df..377758a 100644
24086--- a/arch/x86/oprofile/backtrace.c
24087+++ b/arch/x86/oprofile/backtrace.c
24088@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
24089 struct stack_frame_ia32 *fp;
24090 unsigned long bytes;
24091
24092- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
24093+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
24094 if (bytes != sizeof(bufhead))
24095 return NULL;
24096
24097- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
24098+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
24099
24100 oprofile_add_trace(bufhead[0].return_address);
24101
24102@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
24103 struct stack_frame bufhead[2];
24104 unsigned long bytes;
24105
24106- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
24107+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
24108 if (bytes != sizeof(bufhead))
24109 return NULL;
24110
24111@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
24112 {
24113 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
24114
24115- if (!user_mode_vm(regs)) {
24116+ if (!user_mode(regs)) {
24117 unsigned long stack = kernel_stack_pointer(regs);
24118 if (depth)
24119 dump_trace(NULL, regs, (unsigned long *)stack, 0,
24120diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
24121index cb29191..036766d 100644
24122--- a/arch/x86/pci/mrst.c
24123+++ b/arch/x86/pci/mrst.c
24124@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
24125 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
24126 pci_mmcfg_late_init();
24127 pcibios_enable_irq = mrst_pci_irq_enable;
24128- pci_root_ops = pci_mrst_ops;
24129+ pax_open_kernel();
24130+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
24131+ pax_close_kernel();
24132 /* Continue with standard init */
24133 return 1;
24134 }
24135diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
24136index db0e9a5..0372c14 100644
24137--- a/arch/x86/pci/pcbios.c
24138+++ b/arch/x86/pci/pcbios.c
24139@@ -79,50 +79,93 @@ union bios32 {
24140 static struct {
24141 unsigned long address;
24142 unsigned short segment;
24143-} bios32_indirect = { 0, __KERNEL_CS };
24144+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
24145
24146 /*
24147 * Returns the entry point for the given service, NULL on error
24148 */
24149
24150-static unsigned long bios32_service(unsigned long service)
24151+static unsigned long __devinit bios32_service(unsigned long service)
24152 {
24153 unsigned char return_code; /* %al */
24154 unsigned long address; /* %ebx */
24155 unsigned long length; /* %ecx */
24156 unsigned long entry; /* %edx */
24157 unsigned long flags;
24158+ struct desc_struct d, *gdt;
24159
24160 local_irq_save(flags);
24161- __asm__("lcall *(%%edi); cld"
24162+
24163+ gdt = get_cpu_gdt_table(smp_processor_id());
24164+
24165+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
24166+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
24167+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
24168+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
24169+
24170+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
24171 : "=a" (return_code),
24172 "=b" (address),
24173 "=c" (length),
24174 "=d" (entry)
24175 : "0" (service),
24176 "1" (0),
24177- "D" (&bios32_indirect));
24178+ "D" (&bios32_indirect),
24179+ "r"(__PCIBIOS_DS)
24180+ : "memory");
24181+
24182+ pax_open_kernel();
24183+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
24184+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
24185+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
24186+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
24187+ pax_close_kernel();
24188+
24189 local_irq_restore(flags);
24190
24191 switch (return_code) {
24192- case 0:
24193- return address + entry;
24194- case 0x80: /* Not present */
24195- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
24196- return 0;
24197- default: /* Shouldn't happen */
24198- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
24199- service, return_code);
24200+ case 0: {
24201+ int cpu;
24202+ unsigned char flags;
24203+
24204+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
24205+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
24206+ printk(KERN_WARNING "bios32_service: not valid\n");
24207 return 0;
24208+ }
24209+ address = address + PAGE_OFFSET;
24210+ length += 16UL; /* some BIOSs underreport this... */
24211+ flags = 4;
24212+ if (length >= 64*1024*1024) {
24213+ length >>= PAGE_SHIFT;
24214+ flags |= 8;
24215+ }
24216+
24217+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
24218+ gdt = get_cpu_gdt_table(cpu);
24219+ pack_descriptor(&d, address, length, 0x9b, flags);
24220+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
24221+ pack_descriptor(&d, address, length, 0x93, flags);
24222+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
24223+ }
24224+ return entry;
24225+ }
24226+ case 0x80: /* Not present */
24227+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
24228+ return 0;
24229+ default: /* Shouldn't happen */
24230+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
24231+ service, return_code);
24232+ return 0;
24233 }
24234 }
24235
24236 static struct {
24237 unsigned long address;
24238 unsigned short segment;
24239-} pci_indirect = { 0, __KERNEL_CS };
24240+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
24241
24242-static int pci_bios_present;
24243+static int pci_bios_present __read_only;
24244
24245 static int __devinit check_pcibios(void)
24246 {
24247@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
24248 unsigned long flags, pcibios_entry;
24249
24250 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
24251- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
24252+ pci_indirect.address = pcibios_entry;
24253
24254 local_irq_save(flags);
24255- __asm__(
24256- "lcall *(%%edi); cld\n\t"
24257+ __asm__("movw %w6, %%ds\n\t"
24258+ "lcall *%%ss:(%%edi); cld\n\t"
24259+ "push %%ss\n\t"
24260+ "pop %%ds\n\t"
24261 "jc 1f\n\t"
24262 "xor %%ah, %%ah\n"
24263 "1:"
24264@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
24265 "=b" (ebx),
24266 "=c" (ecx)
24267 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
24268- "D" (&pci_indirect)
24269+ "D" (&pci_indirect),
24270+ "r" (__PCIBIOS_DS)
24271 : "memory");
24272 local_irq_restore(flags);
24273
24274@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24275
24276 switch (len) {
24277 case 1:
24278- __asm__("lcall *(%%esi); cld\n\t"
24279+ __asm__("movw %w6, %%ds\n\t"
24280+ "lcall *%%ss:(%%esi); cld\n\t"
24281+ "push %%ss\n\t"
24282+ "pop %%ds\n\t"
24283 "jc 1f\n\t"
24284 "xor %%ah, %%ah\n"
24285 "1:"
24286@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24287 : "1" (PCIBIOS_READ_CONFIG_BYTE),
24288 "b" (bx),
24289 "D" ((long)reg),
24290- "S" (&pci_indirect));
24291+ "S" (&pci_indirect),
24292+ "r" (__PCIBIOS_DS));
24293 /*
24294 * Zero-extend the result beyond 8 bits, do not trust the
24295 * BIOS having done it:
24296@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24297 *value &= 0xff;
24298 break;
24299 case 2:
24300- __asm__("lcall *(%%esi); cld\n\t"
24301+ __asm__("movw %w6, %%ds\n\t"
24302+ "lcall *%%ss:(%%esi); cld\n\t"
24303+ "push %%ss\n\t"
24304+ "pop %%ds\n\t"
24305 "jc 1f\n\t"
24306 "xor %%ah, %%ah\n"
24307 "1:"
24308@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24309 : "1" (PCIBIOS_READ_CONFIG_WORD),
24310 "b" (bx),
24311 "D" ((long)reg),
24312- "S" (&pci_indirect));
24313+ "S" (&pci_indirect),
24314+ "r" (__PCIBIOS_DS));
24315 /*
24316 * Zero-extend the result beyond 16 bits, do not trust the
24317 * BIOS having done it:
24318@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24319 *value &= 0xffff;
24320 break;
24321 case 4:
24322- __asm__("lcall *(%%esi); cld\n\t"
24323+ __asm__("movw %w6, %%ds\n\t"
24324+ "lcall *%%ss:(%%esi); cld\n\t"
24325+ "push %%ss\n\t"
24326+ "pop %%ds\n\t"
24327 "jc 1f\n\t"
24328 "xor %%ah, %%ah\n"
24329 "1:"
24330@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24331 : "1" (PCIBIOS_READ_CONFIG_DWORD),
24332 "b" (bx),
24333 "D" ((long)reg),
24334- "S" (&pci_indirect));
24335+ "S" (&pci_indirect),
24336+ "r" (__PCIBIOS_DS));
24337 break;
24338 }
24339
24340@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24341
24342 switch (len) {
24343 case 1:
24344- __asm__("lcall *(%%esi); cld\n\t"
24345+ __asm__("movw %w6, %%ds\n\t"
24346+ "lcall *%%ss:(%%esi); cld\n\t"
24347+ "push %%ss\n\t"
24348+ "pop %%ds\n\t"
24349 "jc 1f\n\t"
24350 "xor %%ah, %%ah\n"
24351 "1:"
24352@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24353 "c" (value),
24354 "b" (bx),
24355 "D" ((long)reg),
24356- "S" (&pci_indirect));
24357+ "S" (&pci_indirect),
24358+ "r" (__PCIBIOS_DS));
24359 break;
24360 case 2:
24361- __asm__("lcall *(%%esi); cld\n\t"
24362+ __asm__("movw %w6, %%ds\n\t"
24363+ "lcall *%%ss:(%%esi); cld\n\t"
24364+ "push %%ss\n\t"
24365+ "pop %%ds\n\t"
24366 "jc 1f\n\t"
24367 "xor %%ah, %%ah\n"
24368 "1:"
24369@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24370 "c" (value),
24371 "b" (bx),
24372 "D" ((long)reg),
24373- "S" (&pci_indirect));
24374+ "S" (&pci_indirect),
24375+ "r" (__PCIBIOS_DS));
24376 break;
24377 case 4:
24378- __asm__("lcall *(%%esi); cld\n\t"
24379+ __asm__("movw %w6, %%ds\n\t"
24380+ "lcall *%%ss:(%%esi); cld\n\t"
24381+ "push %%ss\n\t"
24382+ "pop %%ds\n\t"
24383 "jc 1f\n\t"
24384 "xor %%ah, %%ah\n"
24385 "1:"
24386@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24387 "c" (value),
24388 "b" (bx),
24389 "D" ((long)reg),
24390- "S" (&pci_indirect));
24391+ "S" (&pci_indirect),
24392+ "r" (__PCIBIOS_DS));
24393 break;
24394 }
24395
24396@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24397
24398 DBG("PCI: Fetching IRQ routing table... ");
24399 __asm__("push %%es\n\t"
24400+ "movw %w8, %%ds\n\t"
24401 "push %%ds\n\t"
24402 "pop %%es\n\t"
24403- "lcall *(%%esi); cld\n\t"
24404+ "lcall *%%ss:(%%esi); cld\n\t"
24405 "pop %%es\n\t"
24406+ "push %%ss\n\t"
24407+ "pop %%ds\n"
24408 "jc 1f\n\t"
24409 "xor %%ah, %%ah\n"
24410 "1:"
24411@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24412 "1" (0),
24413 "D" ((long) &opt),
24414 "S" (&pci_indirect),
24415- "m" (opt)
24416+ "m" (opt),
24417+ "r" (__PCIBIOS_DS)
24418 : "memory");
24419 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
24420 if (ret & 0xff00)
24421@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24422 {
24423 int ret;
24424
24425- __asm__("lcall *(%%esi); cld\n\t"
24426+ __asm__("movw %w5, %%ds\n\t"
24427+ "lcall *%%ss:(%%esi); cld\n\t"
24428+ "push %%ss\n\t"
24429+ "pop %%ds\n"
24430 "jc 1f\n\t"
24431 "xor %%ah, %%ah\n"
24432 "1:"
24433@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24434 : "0" (PCIBIOS_SET_PCI_HW_INT),
24435 "b" ((dev->bus->number << 8) | dev->devfn),
24436 "c" ((irq << 8) | (pin + 10)),
24437- "S" (&pci_indirect));
24438+ "S" (&pci_indirect),
24439+ "r" (__PCIBIOS_DS));
24440 return !(ret & 0xff00);
24441 }
24442 EXPORT_SYMBOL(pcibios_set_irq_routing);
24443diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
24444index 40e4469..1ab536e 100644
24445--- a/arch/x86/platform/efi/efi_32.c
24446+++ b/arch/x86/platform/efi/efi_32.c
24447@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
24448 {
24449 struct desc_ptr gdt_descr;
24450
24451+#ifdef CONFIG_PAX_KERNEXEC
24452+ struct desc_struct d;
24453+#endif
24454+
24455 local_irq_save(efi_rt_eflags);
24456
24457 load_cr3(initial_page_table);
24458 __flush_tlb_all();
24459
24460+#ifdef CONFIG_PAX_KERNEXEC
24461+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
24462+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24463+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
24464+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24465+#endif
24466+
24467 gdt_descr.address = __pa(get_cpu_gdt_table(0));
24468 gdt_descr.size = GDT_SIZE - 1;
24469 load_gdt(&gdt_descr);
24470@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
24471 {
24472 struct desc_ptr gdt_descr;
24473
24474+#ifdef CONFIG_PAX_KERNEXEC
24475+ struct desc_struct d;
24476+
24477+ memset(&d, 0, sizeof d);
24478+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24479+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24480+#endif
24481+
24482 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
24483 gdt_descr.size = GDT_SIZE - 1;
24484 load_gdt(&gdt_descr);
24485diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
24486index fbe66e6..c5c0dd2 100644
24487--- a/arch/x86/platform/efi/efi_stub_32.S
24488+++ b/arch/x86/platform/efi/efi_stub_32.S
24489@@ -6,7 +6,9 @@
24490 */
24491
24492 #include <linux/linkage.h>
24493+#include <linux/init.h>
24494 #include <asm/page_types.h>
24495+#include <asm/segment.h>
24496
24497 /*
24498 * efi_call_phys(void *, ...) is a function with variable parameters.
24499@@ -20,7 +22,7 @@
24500 * service functions will comply with gcc calling convention, too.
24501 */
24502
24503-.text
24504+__INIT
24505 ENTRY(efi_call_phys)
24506 /*
24507 * 0. The function can only be called in Linux kernel. So CS has been
24508@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
24509 * The mapping of lower virtual memory has been created in prelog and
24510 * epilog.
24511 */
24512- movl $1f, %edx
24513- subl $__PAGE_OFFSET, %edx
24514- jmp *%edx
24515+ movl $(__KERNEXEC_EFI_DS), %edx
24516+ mov %edx, %ds
24517+ mov %edx, %es
24518+ mov %edx, %ss
24519+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
24520 1:
24521
24522 /*
24523@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
24524 * parameter 2, ..., param n. To make things easy, we save the return
24525 * address of efi_call_phys in a global variable.
24526 */
24527- popl %edx
24528- movl %edx, saved_return_addr
24529- /* get the function pointer into ECX*/
24530- popl %ecx
24531- movl %ecx, efi_rt_function_ptr
24532- movl $2f, %edx
24533- subl $__PAGE_OFFSET, %edx
24534- pushl %edx
24535+ popl (saved_return_addr)
24536+ popl (efi_rt_function_ptr)
24537
24538 /*
24539 * 3. Clear PG bit in %CR0.
24540@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
24541 /*
24542 * 5. Call the physical function.
24543 */
24544- jmp *%ecx
24545+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
24546
24547-2:
24548 /*
24549 * 6. After EFI runtime service returns, control will return to
24550 * following instruction. We'd better readjust stack pointer first.
24551@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
24552 movl %cr0, %edx
24553 orl $0x80000000, %edx
24554 movl %edx, %cr0
24555- jmp 1f
24556-1:
24557+
24558 /*
24559 * 8. Now restore the virtual mode from flat mode by
24560 * adding EIP with PAGE_OFFSET.
24561 */
24562- movl $1f, %edx
24563- jmp *%edx
24564+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
24565 1:
24566+ movl $(__KERNEL_DS), %edx
24567+ mov %edx, %ds
24568+ mov %edx, %es
24569+ mov %edx, %ss
24570
24571 /*
24572 * 9. Balance the stack. And because EAX contain the return value,
24573 * we'd better not clobber it.
24574 */
24575- leal efi_rt_function_ptr, %edx
24576- movl (%edx), %ecx
24577- pushl %ecx
24578+ pushl (efi_rt_function_ptr)
24579
24580 /*
24581- * 10. Push the saved return address onto the stack and return.
24582+ * 10. Return to the saved return address.
24583 */
24584- leal saved_return_addr, %edx
24585- movl (%edx), %ecx
24586- pushl %ecx
24587- ret
24588+ jmpl *(saved_return_addr)
24589 ENDPROC(efi_call_phys)
24590 .previous
24591
24592-.data
24593+__INITDATA
24594 saved_return_addr:
24595 .long 0
24596 efi_rt_function_ptr:
24597diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
24598index 4c07cca..2c8427d 100644
24599--- a/arch/x86/platform/efi/efi_stub_64.S
24600+++ b/arch/x86/platform/efi/efi_stub_64.S
24601@@ -7,6 +7,7 @@
24602 */
24603
24604 #include <linux/linkage.h>
24605+#include <asm/alternative-asm.h>
24606
24607 #define SAVE_XMM \
24608 mov %rsp, %rax; \
24609@@ -40,6 +41,7 @@ ENTRY(efi_call0)
24610 call *%rdi
24611 addq $32, %rsp
24612 RESTORE_XMM
24613+ pax_force_retaddr 0, 1
24614 ret
24615 ENDPROC(efi_call0)
24616
24617@@ -50,6 +52,7 @@ ENTRY(efi_call1)
24618 call *%rdi
24619 addq $32, %rsp
24620 RESTORE_XMM
24621+ pax_force_retaddr 0, 1
24622 ret
24623 ENDPROC(efi_call1)
24624
24625@@ -60,6 +63,7 @@ ENTRY(efi_call2)
24626 call *%rdi
24627 addq $32, %rsp
24628 RESTORE_XMM
24629+ pax_force_retaddr 0, 1
24630 ret
24631 ENDPROC(efi_call2)
24632
24633@@ -71,6 +75,7 @@ ENTRY(efi_call3)
24634 call *%rdi
24635 addq $32, %rsp
24636 RESTORE_XMM
24637+ pax_force_retaddr 0, 1
24638 ret
24639 ENDPROC(efi_call3)
24640
24641@@ -83,6 +88,7 @@ ENTRY(efi_call4)
24642 call *%rdi
24643 addq $32, %rsp
24644 RESTORE_XMM
24645+ pax_force_retaddr 0, 1
24646 ret
24647 ENDPROC(efi_call4)
24648
24649@@ -96,6 +102,7 @@ ENTRY(efi_call5)
24650 call *%rdi
24651 addq $48, %rsp
24652 RESTORE_XMM
24653+ pax_force_retaddr 0, 1
24654 ret
24655 ENDPROC(efi_call5)
24656
24657@@ -112,5 +119,6 @@ ENTRY(efi_call6)
24658 call *%rdi
24659 addq $48, %rsp
24660 RESTORE_XMM
24661+ pax_force_retaddr 0, 1
24662 ret
24663 ENDPROC(efi_call6)
24664diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
24665index ad4ec1c..686479e 100644
24666--- a/arch/x86/platform/mrst/mrst.c
24667+++ b/arch/x86/platform/mrst/mrst.c
24668@@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
24669 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
24670 int sfi_mrtc_num;
24671
24672-static void mrst_power_off(void)
24673+static __noreturn void mrst_power_off(void)
24674 {
24675 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24676 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
24677+ BUG();
24678 }
24679
24680-static void mrst_reboot(void)
24681+static __noreturn void mrst_reboot(void)
24682 {
24683 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24684 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
24685 else
24686 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
24687+ BUG();
24688 }
24689
24690 /* parse all the mtimer info to a static mtimer array */
24691diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
24692index f10c0af..3ec1f95 100644
24693--- a/arch/x86/power/cpu.c
24694+++ b/arch/x86/power/cpu.c
24695@@ -131,7 +131,7 @@ static void do_fpu_end(void)
24696 static void fix_processor_context(void)
24697 {
24698 int cpu = smp_processor_id();
24699- struct tss_struct *t = &per_cpu(init_tss, cpu);
24700+ struct tss_struct *t = init_tss + cpu;
24701
24702 set_tss_desc(cpu, t); /*
24703 * This just modifies memory; should not be
24704@@ -141,7 +141,9 @@ static void fix_processor_context(void)
24705 */
24706
24707 #ifdef CONFIG_X86_64
24708+ pax_open_kernel();
24709 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
24710+ pax_close_kernel();
24711
24712 syscall_init(); /* This sets MSR_*STAR and related */
24713 #endif
24714diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
24715index 5d17950..2253fc9 100644
24716--- a/arch/x86/vdso/Makefile
24717+++ b/arch/x86/vdso/Makefile
24718@@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
24719 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
24720 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
24721
24722-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24723+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24724 GCOV_PROFILE := n
24725
24726 #
24727diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
24728index 468d591..8e80a0a 100644
24729--- a/arch/x86/vdso/vdso32-setup.c
24730+++ b/arch/x86/vdso/vdso32-setup.c
24731@@ -25,6 +25,7 @@
24732 #include <asm/tlbflush.h>
24733 #include <asm/vdso.h>
24734 #include <asm/proto.h>
24735+#include <asm/mman.h>
24736
24737 enum {
24738 VDSO_DISABLED = 0,
24739@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
24740 void enable_sep_cpu(void)
24741 {
24742 int cpu = get_cpu();
24743- struct tss_struct *tss = &per_cpu(init_tss, cpu);
24744+ struct tss_struct *tss = init_tss + cpu;
24745
24746 if (!boot_cpu_has(X86_FEATURE_SEP)) {
24747 put_cpu();
24748@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
24749 gate_vma.vm_start = FIXADDR_USER_START;
24750 gate_vma.vm_end = FIXADDR_USER_END;
24751 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
24752- gate_vma.vm_page_prot = __P101;
24753+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
24754 /*
24755 * Make sure the vDSO gets into every core dump.
24756 * Dumping its contents makes post-mortem fully interpretable later
24757@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24758 if (compat)
24759 addr = VDSO_HIGH_BASE;
24760 else {
24761- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
24762+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
24763 if (IS_ERR_VALUE(addr)) {
24764 ret = addr;
24765 goto up_fail;
24766 }
24767 }
24768
24769- current->mm->context.vdso = (void *)addr;
24770+ current->mm->context.vdso = addr;
24771
24772 if (compat_uses_vma || !compat) {
24773 /*
24774@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24775 }
24776
24777 current_thread_info()->sysenter_return =
24778- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24779+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24780
24781 up_fail:
24782 if (ret)
24783- current->mm->context.vdso = NULL;
24784+ current->mm->context.vdso = 0;
24785
24786 up_write(&mm->mmap_sem);
24787
24788@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
24789
24790 const char *arch_vma_name(struct vm_area_struct *vma)
24791 {
24792- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24793+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24794 return "[vdso]";
24795+
24796+#ifdef CONFIG_PAX_SEGMEXEC
24797+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
24798+ return "[vdso]";
24799+#endif
24800+
24801 return NULL;
24802 }
24803
24804@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
24805 * Check to see if the corresponding task was created in compat vdso
24806 * mode.
24807 */
24808- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
24809+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
24810 return &gate_vma;
24811 return NULL;
24812 }
24813diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
24814index 153407c..611cba9 100644
24815--- a/arch/x86/vdso/vma.c
24816+++ b/arch/x86/vdso/vma.c
24817@@ -16,8 +16,6 @@
24818 #include <asm/vdso.h>
24819 #include <asm/page.h>
24820
24821-unsigned int __read_mostly vdso_enabled = 1;
24822-
24823 extern char vdso_start[], vdso_end[];
24824 extern unsigned short vdso_sync_cpuid;
24825
24826@@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24827 * unaligned here as a result of stack start randomization.
24828 */
24829 addr = PAGE_ALIGN(addr);
24830- addr = align_addr(addr, NULL, ALIGN_VDSO);
24831
24832 return addr;
24833 }
24834@@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24835 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24836 {
24837 struct mm_struct *mm = current->mm;
24838- unsigned long addr;
24839+ unsigned long addr = 0;
24840 int ret;
24841
24842- if (!vdso_enabled)
24843- return 0;
24844-
24845 down_write(&mm->mmap_sem);
24846+
24847+#ifdef CONFIG_PAX_RANDMMAP
24848+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24849+#endif
24850+
24851 addr = vdso_addr(mm->start_stack, vdso_size);
24852+ addr = align_addr(addr, NULL, ALIGN_VDSO);
24853 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
24854 if (IS_ERR_VALUE(addr)) {
24855 ret = addr;
24856 goto up_fail;
24857 }
24858
24859- current->mm->context.vdso = (void *)addr;
24860+ mm->context.vdso = addr;
24861
24862 ret = install_special_mapping(mm, addr, vdso_size,
24863 VM_READ|VM_EXEC|
24864 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
24865 VM_ALWAYSDUMP,
24866 vdso_pages);
24867- if (ret) {
24868- current->mm->context.vdso = NULL;
24869- goto up_fail;
24870- }
24871+
24872+ if (ret)
24873+ mm->context.vdso = 0;
24874
24875 up_fail:
24876 up_write(&mm->mmap_sem);
24877 return ret;
24878 }
24879-
24880-static __init int vdso_setup(char *s)
24881-{
24882- vdso_enabled = simple_strtoul(s, NULL, 0);
24883- return 0;
24884-}
24885-__setup("vdso=", vdso_setup);
24886diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
24887index 1f92865..c843b20 100644
24888--- a/arch/x86/xen/enlighten.c
24889+++ b/arch/x86/xen/enlighten.c
24890@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
24891
24892 struct shared_info xen_dummy_shared_info;
24893
24894-void *xen_initial_gdt;
24895-
24896 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
24897 __read_mostly int xen_have_vector_callback;
24898 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
24899@@ -1029,7 +1027,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
24900 #endif
24901 };
24902
24903-static void xen_reboot(int reason)
24904+static __noreturn void xen_reboot(int reason)
24905 {
24906 struct sched_shutdown r = { .reason = reason };
24907
24908@@ -1037,17 +1035,17 @@ static void xen_reboot(int reason)
24909 BUG();
24910 }
24911
24912-static void xen_restart(char *msg)
24913+static __noreturn void xen_restart(char *msg)
24914 {
24915 xen_reboot(SHUTDOWN_reboot);
24916 }
24917
24918-static void xen_emergency_restart(void)
24919+static __noreturn void xen_emergency_restart(void)
24920 {
24921 xen_reboot(SHUTDOWN_reboot);
24922 }
24923
24924-static void xen_machine_halt(void)
24925+static __noreturn void xen_machine_halt(void)
24926 {
24927 xen_reboot(SHUTDOWN_poweroff);
24928 }
24929@@ -1153,7 +1151,17 @@ asmlinkage void __init xen_start_kernel(void)
24930 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
24931
24932 /* Work out if we support NX */
24933- x86_configure_nx();
24934+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
24935+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
24936+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
24937+ unsigned l, h;
24938+
24939+ __supported_pte_mask |= _PAGE_NX;
24940+ rdmsr(MSR_EFER, l, h);
24941+ l |= EFER_NX;
24942+ wrmsr(MSR_EFER, l, h);
24943+ }
24944+#endif
24945
24946 xen_setup_features();
24947
24948@@ -1184,13 +1192,6 @@ asmlinkage void __init xen_start_kernel(void)
24949
24950 machine_ops = xen_machine_ops;
24951
24952- /*
24953- * The only reliable way to retain the initial address of the
24954- * percpu gdt_page is to remember it here, so we can go and
24955- * mark it RW later, when the initial percpu area is freed.
24956- */
24957- xen_initial_gdt = &per_cpu(gdt_page, 0);
24958-
24959 xen_smp_init();
24960
24961 #ifdef CONFIG_ACPI_NUMA
24962diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
24963index 87f6673..e2555a6 100644
24964--- a/arch/x86/xen/mmu.c
24965+++ b/arch/x86/xen/mmu.c
24966@@ -1733,6 +1733,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24967 convert_pfn_mfn(init_level4_pgt);
24968 convert_pfn_mfn(level3_ident_pgt);
24969 convert_pfn_mfn(level3_kernel_pgt);
24970+ convert_pfn_mfn(level3_vmalloc_start_pgt);
24971+ convert_pfn_mfn(level3_vmalloc_end_pgt);
24972+ convert_pfn_mfn(level3_vmemmap_pgt);
24973
24974 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
24975 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
24976@@ -1751,7 +1754,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24977 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
24978 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
24979 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
24980+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
24981+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
24982+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
24983 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
24984+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
24985 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
24986 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
24987
24988@@ -1962,6 +1969,7 @@ static void __init xen_post_allocator_init(void)
24989 pv_mmu_ops.set_pud = xen_set_pud;
24990 #if PAGETABLE_LEVELS == 4
24991 pv_mmu_ops.set_pgd = xen_set_pgd;
24992+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
24993 #endif
24994
24995 /* This will work as long as patching hasn't happened yet
24996@@ -2043,6 +2051,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
24997 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
24998 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
24999 .set_pgd = xen_set_pgd_hyper,
25000+ .set_pgd_batched = xen_set_pgd_hyper,
25001
25002 .alloc_pud = xen_alloc_pmd_init,
25003 .release_pud = xen_release_pmd_init,
25004diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
25005index 041d4fe..7666b7e 100644
25006--- a/arch/x86/xen/smp.c
25007+++ b/arch/x86/xen/smp.c
25008@@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
25009 {
25010 BUG_ON(smp_processor_id() != 0);
25011 native_smp_prepare_boot_cpu();
25012-
25013- /* We've switched to the "real" per-cpu gdt, so make sure the
25014- old memory can be recycled */
25015- make_lowmem_page_readwrite(xen_initial_gdt);
25016-
25017 xen_filter_cpu_maps();
25018 xen_setup_vcpu_info_placement();
25019 }
25020@@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
25021 gdt = get_cpu_gdt_table(cpu);
25022
25023 ctxt->flags = VGCF_IN_KERNEL;
25024- ctxt->user_regs.ds = __USER_DS;
25025- ctxt->user_regs.es = __USER_DS;
25026+ ctxt->user_regs.ds = __KERNEL_DS;
25027+ ctxt->user_regs.es = __KERNEL_DS;
25028 ctxt->user_regs.ss = __KERNEL_DS;
25029 #ifdef CONFIG_X86_32
25030 ctxt->user_regs.fs = __KERNEL_PERCPU;
25031- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
25032+ savesegment(gs, ctxt->user_regs.gs);
25033 #else
25034 ctxt->gs_base_kernel = per_cpu_offset(cpu);
25035 #endif
25036@@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
25037 int rc;
25038
25039 per_cpu(current_task, cpu) = idle;
25040+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
25041 #ifdef CONFIG_X86_32
25042 irq_ctx_init(cpu);
25043 #else
25044 clear_tsk_thread_flag(idle, TIF_FORK);
25045- per_cpu(kernel_stack, cpu) =
25046- (unsigned long)task_stack_page(idle) -
25047- KERNEL_STACK_OFFSET + THREAD_SIZE;
25048+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
25049 #endif
25050 xen_setup_runstate_info(cpu);
25051 xen_setup_timer(cpu);
25052diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
25053index b040b0e..8cc4fe0 100644
25054--- a/arch/x86/xen/xen-asm_32.S
25055+++ b/arch/x86/xen/xen-asm_32.S
25056@@ -83,14 +83,14 @@ ENTRY(xen_iret)
25057 ESP_OFFSET=4 # bytes pushed onto stack
25058
25059 /*
25060- * Store vcpu_info pointer for easy access. Do it this way to
25061- * avoid having to reload %fs
25062+ * Store vcpu_info pointer for easy access.
25063 */
25064 #ifdef CONFIG_SMP
25065- GET_THREAD_INFO(%eax)
25066- movl TI_cpu(%eax), %eax
25067- movl __per_cpu_offset(,%eax,4), %eax
25068- mov xen_vcpu(%eax), %eax
25069+ push %fs
25070+ mov $(__KERNEL_PERCPU), %eax
25071+ mov %eax, %fs
25072+ mov PER_CPU_VAR(xen_vcpu), %eax
25073+ pop %fs
25074 #else
25075 movl xen_vcpu, %eax
25076 #endif
25077diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
25078index aaa7291..3f77960 100644
25079--- a/arch/x86/xen/xen-head.S
25080+++ b/arch/x86/xen/xen-head.S
25081@@ -19,6 +19,17 @@ ENTRY(startup_xen)
25082 #ifdef CONFIG_X86_32
25083 mov %esi,xen_start_info
25084 mov $init_thread_union+THREAD_SIZE,%esp
25085+#ifdef CONFIG_SMP
25086+ movl $cpu_gdt_table,%edi
25087+ movl $__per_cpu_load,%eax
25088+ movw %ax,__KERNEL_PERCPU + 2(%edi)
25089+ rorl $16,%eax
25090+ movb %al,__KERNEL_PERCPU + 4(%edi)
25091+ movb %ah,__KERNEL_PERCPU + 7(%edi)
25092+ movl $__per_cpu_end - 1,%eax
25093+ subl $__per_cpu_start,%eax
25094+ movw %ax,__KERNEL_PERCPU + 0(%edi)
25095+#endif
25096 #else
25097 mov %rsi,xen_start_info
25098 mov $init_thread_union+THREAD_SIZE,%rsp
25099diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
25100index b095739..8c17bcd 100644
25101--- a/arch/x86/xen/xen-ops.h
25102+++ b/arch/x86/xen/xen-ops.h
25103@@ -10,8 +10,6 @@
25104 extern const char xen_hypervisor_callback[];
25105 extern const char xen_failsafe_callback[];
25106
25107-extern void *xen_initial_gdt;
25108-
25109 struct trap_info;
25110 void xen_copy_trap_info(struct trap_info *traps);
25111
25112diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
25113index 58916af..9cb880b 100644
25114--- a/block/blk-iopoll.c
25115+++ b/block/blk-iopoll.c
25116@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
25117 }
25118 EXPORT_SYMBOL(blk_iopoll_complete);
25119
25120-static void blk_iopoll_softirq(struct softirq_action *h)
25121+static void blk_iopoll_softirq(void)
25122 {
25123 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
25124 int rearm = 0, budget = blk_iopoll_budget;
25125diff --git a/block/blk-map.c b/block/blk-map.c
25126index 623e1cd..ca1e109 100644
25127--- a/block/blk-map.c
25128+++ b/block/blk-map.c
25129@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
25130 if (!len || !kbuf)
25131 return -EINVAL;
25132
25133- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
25134+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
25135 if (do_copy)
25136 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
25137 else
25138diff --git a/block/blk-softirq.c b/block/blk-softirq.c
25139index 1366a89..e17f54b 100644
25140--- a/block/blk-softirq.c
25141+++ b/block/blk-softirq.c
25142@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
25143 * Softirq action handler - move entries to local list and loop over them
25144 * while passing them to the queue registered handler.
25145 */
25146-static void blk_done_softirq(struct softirq_action *h)
25147+static void blk_done_softirq(void)
25148 {
25149 struct list_head *cpu_list, local_list;
25150
25151diff --git a/block/bsg.c b/block/bsg.c
25152index 702f131..37808bf 100644
25153--- a/block/bsg.c
25154+++ b/block/bsg.c
25155@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
25156 struct sg_io_v4 *hdr, struct bsg_device *bd,
25157 fmode_t has_write_perm)
25158 {
25159+ unsigned char tmpcmd[sizeof(rq->__cmd)];
25160+ unsigned char *cmdptr;
25161+
25162 if (hdr->request_len > BLK_MAX_CDB) {
25163 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
25164 if (!rq->cmd)
25165 return -ENOMEM;
25166- }
25167+ cmdptr = rq->cmd;
25168+ } else
25169+ cmdptr = tmpcmd;
25170
25171- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
25172+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
25173 hdr->request_len))
25174 return -EFAULT;
25175
25176+ if (cmdptr != rq->cmd)
25177+ memcpy(rq->cmd, cmdptr, hdr->request_len);
25178+
25179 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
25180 if (blk_verify_command(rq->cmd, has_write_perm))
25181 return -EPERM;
25182diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
25183index 7b72502..646105c 100644
25184--- a/block/compat_ioctl.c
25185+++ b/block/compat_ioctl.c
25186@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
25187 err |= __get_user(f->spec1, &uf->spec1);
25188 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
25189 err |= __get_user(name, &uf->name);
25190- f->name = compat_ptr(name);
25191+ f->name = (void __force_kernel *)compat_ptr(name);
25192 if (err) {
25193 err = -EFAULT;
25194 goto out;
25195diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
25196index 688be8a..8a37d98 100644
25197--- a/block/scsi_ioctl.c
25198+++ b/block/scsi_ioctl.c
25199@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
25200 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
25201 struct sg_io_hdr *hdr, fmode_t mode)
25202 {
25203- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
25204+ unsigned char tmpcmd[sizeof(rq->__cmd)];
25205+ unsigned char *cmdptr;
25206+
25207+ if (rq->cmd != rq->__cmd)
25208+ cmdptr = rq->cmd;
25209+ else
25210+ cmdptr = tmpcmd;
25211+
25212+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
25213 return -EFAULT;
25214+
25215+ if (cmdptr != rq->cmd)
25216+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
25217+
25218 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
25219 return -EPERM;
25220
25221@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
25222 int err;
25223 unsigned int in_len, out_len, bytes, opcode, cmdlen;
25224 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
25225+ unsigned char tmpcmd[sizeof(rq->__cmd)];
25226+ unsigned char *cmdptr;
25227
25228 if (!sic)
25229 return -EINVAL;
25230@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
25231 */
25232 err = -EFAULT;
25233 rq->cmd_len = cmdlen;
25234- if (copy_from_user(rq->cmd, sic->data, cmdlen))
25235+
25236+ if (rq->cmd != rq->__cmd)
25237+ cmdptr = rq->cmd;
25238+ else
25239+ cmdptr = tmpcmd;
25240+
25241+ if (copy_from_user(cmdptr, sic->data, cmdlen))
25242 goto error;
25243
25244+ if (rq->cmd != cmdptr)
25245+ memcpy(rq->cmd, cmdptr, cmdlen);
25246+
25247 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
25248 goto error;
25249
25250diff --git a/crypto/cryptd.c b/crypto/cryptd.c
25251index 671d4d6..5f24030 100644
25252--- a/crypto/cryptd.c
25253+++ b/crypto/cryptd.c
25254@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
25255
25256 struct cryptd_blkcipher_request_ctx {
25257 crypto_completion_t complete;
25258-};
25259+} __no_const;
25260
25261 struct cryptd_hash_ctx {
25262 struct crypto_shash *child;
25263@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
25264
25265 struct cryptd_aead_request_ctx {
25266 crypto_completion_t complete;
25267-};
25268+} __no_const;
25269
25270 static void cryptd_queue_worker(struct work_struct *work);
25271
25272diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
25273index 5d41894..22021e4 100644
25274--- a/drivers/acpi/apei/cper.c
25275+++ b/drivers/acpi/apei/cper.c
25276@@ -38,12 +38,12 @@
25277 */
25278 u64 cper_next_record_id(void)
25279 {
25280- static atomic64_t seq;
25281+ static atomic64_unchecked_t seq;
25282
25283- if (!atomic64_read(&seq))
25284- atomic64_set(&seq, ((u64)get_seconds()) << 32);
25285+ if (!atomic64_read_unchecked(&seq))
25286+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
25287
25288- return atomic64_inc_return(&seq);
25289+ return atomic64_inc_return_unchecked(&seq);
25290 }
25291 EXPORT_SYMBOL_GPL(cper_next_record_id);
25292
25293diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
25294index 6c47ae9..abfdd63 100644
25295--- a/drivers/acpi/ec_sys.c
25296+++ b/drivers/acpi/ec_sys.c
25297@@ -12,6 +12,7 @@
25298 #include <linux/acpi.h>
25299 #include <linux/debugfs.h>
25300 #include <linux/module.h>
25301+#include <linux/uaccess.h>
25302 #include "internal.h"
25303
25304 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
25305@@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25306 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
25307 */
25308 unsigned int size = EC_SPACE_SIZE;
25309- u8 *data = (u8 *) buf;
25310+ u8 data;
25311 loff_t init_off = *off;
25312 int err = 0;
25313
25314@@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25315 size = count;
25316
25317 while (size) {
25318- err = ec_read(*off, &data[*off - init_off]);
25319+ err = ec_read(*off, &data);
25320 if (err)
25321 return err;
25322+ if (put_user(data, &buf[*off - init_off]))
25323+ return -EFAULT;
25324 *off += 1;
25325 size--;
25326 }
25327@@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25328
25329 unsigned int size = count;
25330 loff_t init_off = *off;
25331- u8 *data = (u8 *) buf;
25332 int err = 0;
25333
25334 if (*off >= EC_SPACE_SIZE)
25335@@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25336 }
25337
25338 while (size) {
25339- u8 byte_write = data[*off - init_off];
25340+ u8 byte_write;
25341+ if (get_user(byte_write, &buf[*off - init_off]))
25342+ return -EFAULT;
25343 err = ec_write(*off, byte_write);
25344 if (err)
25345 return err;
25346diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
25347index 251c7b62..000462d 100644
25348--- a/drivers/acpi/proc.c
25349+++ b/drivers/acpi/proc.c
25350@@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
25351 size_t count, loff_t * ppos)
25352 {
25353 struct list_head *node, *next;
25354- char strbuf[5];
25355- char str[5] = "";
25356- unsigned int len = count;
25357+ char strbuf[5] = {0};
25358
25359- if (len > 4)
25360- len = 4;
25361- if (len < 0)
25362+ if (count > 4)
25363+ count = 4;
25364+ if (copy_from_user(strbuf, buffer, count))
25365 return -EFAULT;
25366-
25367- if (copy_from_user(strbuf, buffer, len))
25368- return -EFAULT;
25369- strbuf[len] = '\0';
25370- sscanf(strbuf, "%s", str);
25371+ strbuf[count] = '\0';
25372
25373 mutex_lock(&acpi_device_lock);
25374 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
25375@@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
25376 if (!dev->wakeup.flags.valid)
25377 continue;
25378
25379- if (!strncmp(dev->pnp.bus_id, str, 4)) {
25380+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
25381 if (device_can_wakeup(&dev->dev)) {
25382 bool enable = !device_may_wakeup(&dev->dev);
25383 device_set_wakeup_enable(&dev->dev, enable);
25384diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
25385index 9d7bc9f..a6fc091 100644
25386--- a/drivers/acpi/processor_driver.c
25387+++ b/drivers/acpi/processor_driver.c
25388@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
25389 return 0;
25390 #endif
25391
25392- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
25393+ BUG_ON(pr->id >= nr_cpu_ids);
25394
25395 /*
25396 * Buggy BIOS check
25397diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
25398index c04ad68..0b99473 100644
25399--- a/drivers/ata/libata-core.c
25400+++ b/drivers/ata/libata-core.c
25401@@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
25402 struct ata_port *ap;
25403 unsigned int tag;
25404
25405- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25406+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25407 ap = qc->ap;
25408
25409 qc->flags = 0;
25410@@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
25411 struct ata_port *ap;
25412 struct ata_link *link;
25413
25414- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25415+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25416 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
25417 ap = qc->ap;
25418 link = qc->dev->link;
25419@@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25420 return;
25421
25422 spin_lock(&lock);
25423+ pax_open_kernel();
25424
25425 for (cur = ops->inherits; cur; cur = cur->inherits) {
25426 void **inherit = (void **)cur;
25427@@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25428 if (IS_ERR(*pp))
25429 *pp = NULL;
25430
25431- ops->inherits = NULL;
25432+ *(struct ata_port_operations **)&ops->inherits = NULL;
25433
25434+ pax_close_kernel();
25435 spin_unlock(&lock);
25436 }
25437
25438diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
25439index e8574bb..f9f6a72 100644
25440--- a/drivers/ata/pata_arasan_cf.c
25441+++ b/drivers/ata/pata_arasan_cf.c
25442@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
25443 /* Handle platform specific quirks */
25444 if (pdata->quirk) {
25445 if (pdata->quirk & CF_BROKEN_PIO) {
25446- ap->ops->set_piomode = NULL;
25447+ pax_open_kernel();
25448+ *(void **)&ap->ops->set_piomode = NULL;
25449+ pax_close_kernel();
25450 ap->pio_mask = 0;
25451 }
25452 if (pdata->quirk & CF_BROKEN_MWDMA)
25453diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
25454index f9b983a..887b9d8 100644
25455--- a/drivers/atm/adummy.c
25456+++ b/drivers/atm/adummy.c
25457@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
25458 vcc->pop(vcc, skb);
25459 else
25460 dev_kfree_skb_any(skb);
25461- atomic_inc(&vcc->stats->tx);
25462+ atomic_inc_unchecked(&vcc->stats->tx);
25463
25464 return 0;
25465 }
25466diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
25467index f8f41e0..1f987dd 100644
25468--- a/drivers/atm/ambassador.c
25469+++ b/drivers/atm/ambassador.c
25470@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
25471 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25472
25473 // VC layer stats
25474- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25475+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25476
25477 // free the descriptor
25478 kfree (tx_descr);
25479@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25480 dump_skb ("<<<", vc, skb);
25481
25482 // VC layer stats
25483- atomic_inc(&atm_vcc->stats->rx);
25484+ atomic_inc_unchecked(&atm_vcc->stats->rx);
25485 __net_timestamp(skb);
25486 // end of our responsibility
25487 atm_vcc->push (atm_vcc, skb);
25488@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25489 } else {
25490 PRINTK (KERN_INFO, "dropped over-size frame");
25491 // should we count this?
25492- atomic_inc(&atm_vcc->stats->rx_drop);
25493+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25494 }
25495
25496 } else {
25497@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
25498 }
25499
25500 if (check_area (skb->data, skb->len)) {
25501- atomic_inc(&atm_vcc->stats->tx_err);
25502+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25503 return -ENOMEM; // ?
25504 }
25505
25506diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
25507index b22d71c..d6e1049 100644
25508--- a/drivers/atm/atmtcp.c
25509+++ b/drivers/atm/atmtcp.c
25510@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25511 if (vcc->pop) vcc->pop(vcc,skb);
25512 else dev_kfree_skb(skb);
25513 if (dev_data) return 0;
25514- atomic_inc(&vcc->stats->tx_err);
25515+ atomic_inc_unchecked(&vcc->stats->tx_err);
25516 return -ENOLINK;
25517 }
25518 size = skb->len+sizeof(struct atmtcp_hdr);
25519@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25520 if (!new_skb) {
25521 if (vcc->pop) vcc->pop(vcc,skb);
25522 else dev_kfree_skb(skb);
25523- atomic_inc(&vcc->stats->tx_err);
25524+ atomic_inc_unchecked(&vcc->stats->tx_err);
25525 return -ENOBUFS;
25526 }
25527 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25528@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25529 if (vcc->pop) vcc->pop(vcc,skb);
25530 else dev_kfree_skb(skb);
25531 out_vcc->push(out_vcc,new_skb);
25532- atomic_inc(&vcc->stats->tx);
25533- atomic_inc(&out_vcc->stats->rx);
25534+ atomic_inc_unchecked(&vcc->stats->tx);
25535+ atomic_inc_unchecked(&out_vcc->stats->rx);
25536 return 0;
25537 }
25538
25539@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25540 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25541 read_unlock(&vcc_sklist_lock);
25542 if (!out_vcc) {
25543- atomic_inc(&vcc->stats->tx_err);
25544+ atomic_inc_unchecked(&vcc->stats->tx_err);
25545 goto done;
25546 }
25547 skb_pull(skb,sizeof(struct atmtcp_hdr));
25548@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25549 __net_timestamp(new_skb);
25550 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25551 out_vcc->push(out_vcc,new_skb);
25552- atomic_inc(&vcc->stats->tx);
25553- atomic_inc(&out_vcc->stats->rx);
25554+ atomic_inc_unchecked(&vcc->stats->tx);
25555+ atomic_inc_unchecked(&out_vcc->stats->rx);
25556 done:
25557 if (vcc->pop) vcc->pop(vcc,skb);
25558 else dev_kfree_skb(skb);
25559diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
25560index 956e9ac..133516d 100644
25561--- a/drivers/atm/eni.c
25562+++ b/drivers/atm/eni.c
25563@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25564 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25565 vcc->dev->number);
25566 length = 0;
25567- atomic_inc(&vcc->stats->rx_err);
25568+ atomic_inc_unchecked(&vcc->stats->rx_err);
25569 }
25570 else {
25571 length = ATM_CELL_SIZE-1; /* no HEC */
25572@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25573 size);
25574 }
25575 eff = length = 0;
25576- atomic_inc(&vcc->stats->rx_err);
25577+ atomic_inc_unchecked(&vcc->stats->rx_err);
25578 }
25579 else {
25580 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25581@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25582 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25583 vcc->dev->number,vcc->vci,length,size << 2,descr);
25584 length = eff = 0;
25585- atomic_inc(&vcc->stats->rx_err);
25586+ atomic_inc_unchecked(&vcc->stats->rx_err);
25587 }
25588 }
25589 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25590@@ -771,7 +771,7 @@ rx_dequeued++;
25591 vcc->push(vcc,skb);
25592 pushed++;
25593 }
25594- atomic_inc(&vcc->stats->rx);
25595+ atomic_inc_unchecked(&vcc->stats->rx);
25596 }
25597 wake_up(&eni_dev->rx_wait);
25598 }
25599@@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
25600 PCI_DMA_TODEVICE);
25601 if (vcc->pop) vcc->pop(vcc,skb);
25602 else dev_kfree_skb_irq(skb);
25603- atomic_inc(&vcc->stats->tx);
25604+ atomic_inc_unchecked(&vcc->stats->tx);
25605 wake_up(&eni_dev->tx_wait);
25606 dma_complete++;
25607 }
25608@@ -1569,7 +1569,7 @@ tx_complete++;
25609 /*--------------------------------- entries ---------------------------------*/
25610
25611
25612-static const char *media_name[] __devinitdata = {
25613+static const char *media_name[] __devinitconst = {
25614 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
25615 "UTP", "05?", "06?", "07?", /* 4- 7 */
25616 "TAXI","09?", "10?", "11?", /* 8-11 */
25617diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
25618index 5072f8a..fa52520d 100644
25619--- a/drivers/atm/firestream.c
25620+++ b/drivers/atm/firestream.c
25621@@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
25622 }
25623 }
25624
25625- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25626+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25627
25628 fs_dprintk (FS_DEBUG_TXMEM, "i");
25629 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25630@@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25631 #endif
25632 skb_put (skb, qe->p1 & 0xffff);
25633 ATM_SKB(skb)->vcc = atm_vcc;
25634- atomic_inc(&atm_vcc->stats->rx);
25635+ atomic_inc_unchecked(&atm_vcc->stats->rx);
25636 __net_timestamp(skb);
25637 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25638 atm_vcc->push (atm_vcc, skb);
25639@@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25640 kfree (pe);
25641 }
25642 if (atm_vcc)
25643- atomic_inc(&atm_vcc->stats->rx_drop);
25644+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25645 break;
25646 case 0x1f: /* Reassembly abort: no buffers. */
25647 /* Silently increment error counter. */
25648 if (atm_vcc)
25649- atomic_inc(&atm_vcc->stats->rx_drop);
25650+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25651 break;
25652 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25653 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25654diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
25655index 361f5ae..7fc552d 100644
25656--- a/drivers/atm/fore200e.c
25657+++ b/drivers/atm/fore200e.c
25658@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
25659 #endif
25660 /* check error condition */
25661 if (*entry->status & STATUS_ERROR)
25662- atomic_inc(&vcc->stats->tx_err);
25663+ atomic_inc_unchecked(&vcc->stats->tx_err);
25664 else
25665- atomic_inc(&vcc->stats->tx);
25666+ atomic_inc_unchecked(&vcc->stats->tx);
25667 }
25668 }
25669
25670@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25671 if (skb == NULL) {
25672 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25673
25674- atomic_inc(&vcc->stats->rx_drop);
25675+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25676 return -ENOMEM;
25677 }
25678
25679@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25680
25681 dev_kfree_skb_any(skb);
25682
25683- atomic_inc(&vcc->stats->rx_drop);
25684+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25685 return -ENOMEM;
25686 }
25687
25688 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25689
25690 vcc->push(vcc, skb);
25691- atomic_inc(&vcc->stats->rx);
25692+ atomic_inc_unchecked(&vcc->stats->rx);
25693
25694 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25695
25696@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
25697 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25698 fore200e->atm_dev->number,
25699 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25700- atomic_inc(&vcc->stats->rx_err);
25701+ atomic_inc_unchecked(&vcc->stats->rx_err);
25702 }
25703 }
25704
25705@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
25706 goto retry_here;
25707 }
25708
25709- atomic_inc(&vcc->stats->tx_err);
25710+ atomic_inc_unchecked(&vcc->stats->tx_err);
25711
25712 fore200e->tx_sat++;
25713 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25714diff --git a/drivers/atm/he.c b/drivers/atm/he.c
25715index 9a51df4..f3bb5f8 100644
25716--- a/drivers/atm/he.c
25717+++ b/drivers/atm/he.c
25718@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25719
25720 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25721 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25722- atomic_inc(&vcc->stats->rx_drop);
25723+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25724 goto return_host_buffers;
25725 }
25726
25727@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25728 RBRQ_LEN_ERR(he_dev->rbrq_head)
25729 ? "LEN_ERR" : "",
25730 vcc->vpi, vcc->vci);
25731- atomic_inc(&vcc->stats->rx_err);
25732+ atomic_inc_unchecked(&vcc->stats->rx_err);
25733 goto return_host_buffers;
25734 }
25735
25736@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25737 vcc->push(vcc, skb);
25738 spin_lock(&he_dev->global_lock);
25739
25740- atomic_inc(&vcc->stats->rx);
25741+ atomic_inc_unchecked(&vcc->stats->rx);
25742
25743 return_host_buffers:
25744 ++pdus_assembled;
25745@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
25746 tpd->vcc->pop(tpd->vcc, tpd->skb);
25747 else
25748 dev_kfree_skb_any(tpd->skb);
25749- atomic_inc(&tpd->vcc->stats->tx_err);
25750+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25751 }
25752 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25753 return;
25754@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25755 vcc->pop(vcc, skb);
25756 else
25757 dev_kfree_skb_any(skb);
25758- atomic_inc(&vcc->stats->tx_err);
25759+ atomic_inc_unchecked(&vcc->stats->tx_err);
25760 return -EINVAL;
25761 }
25762
25763@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25764 vcc->pop(vcc, skb);
25765 else
25766 dev_kfree_skb_any(skb);
25767- atomic_inc(&vcc->stats->tx_err);
25768+ atomic_inc_unchecked(&vcc->stats->tx_err);
25769 return -EINVAL;
25770 }
25771 #endif
25772@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25773 vcc->pop(vcc, skb);
25774 else
25775 dev_kfree_skb_any(skb);
25776- atomic_inc(&vcc->stats->tx_err);
25777+ atomic_inc_unchecked(&vcc->stats->tx_err);
25778 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25779 return -ENOMEM;
25780 }
25781@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25782 vcc->pop(vcc, skb);
25783 else
25784 dev_kfree_skb_any(skb);
25785- atomic_inc(&vcc->stats->tx_err);
25786+ atomic_inc_unchecked(&vcc->stats->tx_err);
25787 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25788 return -ENOMEM;
25789 }
25790@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25791 __enqueue_tpd(he_dev, tpd, cid);
25792 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25793
25794- atomic_inc(&vcc->stats->tx);
25795+ atomic_inc_unchecked(&vcc->stats->tx);
25796
25797 return 0;
25798 }
25799diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
25800index b812103..e391a49 100644
25801--- a/drivers/atm/horizon.c
25802+++ b/drivers/atm/horizon.c
25803@@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
25804 {
25805 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25806 // VC layer stats
25807- atomic_inc(&vcc->stats->rx);
25808+ atomic_inc_unchecked(&vcc->stats->rx);
25809 __net_timestamp(skb);
25810 // end of our responsibility
25811 vcc->push (vcc, skb);
25812@@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
25813 dev->tx_iovec = NULL;
25814
25815 // VC layer stats
25816- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25817+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25818
25819 // free the skb
25820 hrz_kfree_skb (skb);
25821diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
25822index 1c05212..c28e200 100644
25823--- a/drivers/atm/idt77252.c
25824+++ b/drivers/atm/idt77252.c
25825@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
25826 else
25827 dev_kfree_skb(skb);
25828
25829- atomic_inc(&vcc->stats->tx);
25830+ atomic_inc_unchecked(&vcc->stats->tx);
25831 }
25832
25833 atomic_dec(&scq->used);
25834@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25835 if ((sb = dev_alloc_skb(64)) == NULL) {
25836 printk("%s: Can't allocate buffers for aal0.\n",
25837 card->name);
25838- atomic_add(i, &vcc->stats->rx_drop);
25839+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
25840 break;
25841 }
25842 if (!atm_charge(vcc, sb->truesize)) {
25843 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25844 card->name);
25845- atomic_add(i - 1, &vcc->stats->rx_drop);
25846+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25847 dev_kfree_skb(sb);
25848 break;
25849 }
25850@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25851 ATM_SKB(sb)->vcc = vcc;
25852 __net_timestamp(sb);
25853 vcc->push(vcc, sb);
25854- atomic_inc(&vcc->stats->rx);
25855+ atomic_inc_unchecked(&vcc->stats->rx);
25856
25857 cell += ATM_CELL_PAYLOAD;
25858 }
25859@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25860 "(CDC: %08x)\n",
25861 card->name, len, rpp->len, readl(SAR_REG_CDC));
25862 recycle_rx_pool_skb(card, rpp);
25863- atomic_inc(&vcc->stats->rx_err);
25864+ atomic_inc_unchecked(&vcc->stats->rx_err);
25865 return;
25866 }
25867 if (stat & SAR_RSQE_CRC) {
25868 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25869 recycle_rx_pool_skb(card, rpp);
25870- atomic_inc(&vcc->stats->rx_err);
25871+ atomic_inc_unchecked(&vcc->stats->rx_err);
25872 return;
25873 }
25874 if (skb_queue_len(&rpp->queue) > 1) {
25875@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25876 RXPRINTK("%s: Can't alloc RX skb.\n",
25877 card->name);
25878 recycle_rx_pool_skb(card, rpp);
25879- atomic_inc(&vcc->stats->rx_err);
25880+ atomic_inc_unchecked(&vcc->stats->rx_err);
25881 return;
25882 }
25883 if (!atm_charge(vcc, skb->truesize)) {
25884@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25885 __net_timestamp(skb);
25886
25887 vcc->push(vcc, skb);
25888- atomic_inc(&vcc->stats->rx);
25889+ atomic_inc_unchecked(&vcc->stats->rx);
25890
25891 return;
25892 }
25893@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25894 __net_timestamp(skb);
25895
25896 vcc->push(vcc, skb);
25897- atomic_inc(&vcc->stats->rx);
25898+ atomic_inc_unchecked(&vcc->stats->rx);
25899
25900 if (skb->truesize > SAR_FB_SIZE_3)
25901 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
25902@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
25903 if (vcc->qos.aal != ATM_AAL0) {
25904 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25905 card->name, vpi, vci);
25906- atomic_inc(&vcc->stats->rx_drop);
25907+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25908 goto drop;
25909 }
25910
25911 if ((sb = dev_alloc_skb(64)) == NULL) {
25912 printk("%s: Can't allocate buffers for AAL0.\n",
25913 card->name);
25914- atomic_inc(&vcc->stats->rx_err);
25915+ atomic_inc_unchecked(&vcc->stats->rx_err);
25916 goto drop;
25917 }
25918
25919@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
25920 ATM_SKB(sb)->vcc = vcc;
25921 __net_timestamp(sb);
25922 vcc->push(vcc, sb);
25923- atomic_inc(&vcc->stats->rx);
25924+ atomic_inc_unchecked(&vcc->stats->rx);
25925
25926 drop:
25927 skb_pull(queue, 64);
25928@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25929
25930 if (vc == NULL) {
25931 printk("%s: NULL connection in send().\n", card->name);
25932- atomic_inc(&vcc->stats->tx_err);
25933+ atomic_inc_unchecked(&vcc->stats->tx_err);
25934 dev_kfree_skb(skb);
25935 return -EINVAL;
25936 }
25937 if (!test_bit(VCF_TX, &vc->flags)) {
25938 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
25939- atomic_inc(&vcc->stats->tx_err);
25940+ atomic_inc_unchecked(&vcc->stats->tx_err);
25941 dev_kfree_skb(skb);
25942 return -EINVAL;
25943 }
25944@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25945 break;
25946 default:
25947 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
25948- atomic_inc(&vcc->stats->tx_err);
25949+ atomic_inc_unchecked(&vcc->stats->tx_err);
25950 dev_kfree_skb(skb);
25951 return -EINVAL;
25952 }
25953
25954 if (skb_shinfo(skb)->nr_frags != 0) {
25955 printk("%s: No scatter-gather yet.\n", card->name);
25956- atomic_inc(&vcc->stats->tx_err);
25957+ atomic_inc_unchecked(&vcc->stats->tx_err);
25958 dev_kfree_skb(skb);
25959 return -EINVAL;
25960 }
25961@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25962
25963 err = queue_skb(card, vc, skb, oam);
25964 if (err) {
25965- atomic_inc(&vcc->stats->tx_err);
25966+ atomic_inc_unchecked(&vcc->stats->tx_err);
25967 dev_kfree_skb(skb);
25968 return err;
25969 }
25970@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
25971 skb = dev_alloc_skb(64);
25972 if (!skb) {
25973 printk("%s: Out of memory in send_oam().\n", card->name);
25974- atomic_inc(&vcc->stats->tx_err);
25975+ atomic_inc_unchecked(&vcc->stats->tx_err);
25976 return -ENOMEM;
25977 }
25978 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
25979diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
25980index 3d0c2b0..45441fa 100644
25981--- a/drivers/atm/iphase.c
25982+++ b/drivers/atm/iphase.c
25983@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
25984 status = (u_short) (buf_desc_ptr->desc_mode);
25985 if (status & (RX_CER | RX_PTE | RX_OFL))
25986 {
25987- atomic_inc(&vcc->stats->rx_err);
25988+ atomic_inc_unchecked(&vcc->stats->rx_err);
25989 IF_ERR(printk("IA: bad packet, dropping it");)
25990 if (status & RX_CER) {
25991 IF_ERR(printk(" cause: packet CRC error\n");)
25992@@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
25993 len = dma_addr - buf_addr;
25994 if (len > iadev->rx_buf_sz) {
25995 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
25996- atomic_inc(&vcc->stats->rx_err);
25997+ atomic_inc_unchecked(&vcc->stats->rx_err);
25998 goto out_free_desc;
25999 }
26000
26001@@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
26002 ia_vcc = INPH_IA_VCC(vcc);
26003 if (ia_vcc == NULL)
26004 {
26005- atomic_inc(&vcc->stats->rx_err);
26006+ atomic_inc_unchecked(&vcc->stats->rx_err);
26007 dev_kfree_skb_any(skb);
26008 atm_return(vcc, atm_guess_pdu2truesize(len));
26009 goto INCR_DLE;
26010@@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
26011 if ((length > iadev->rx_buf_sz) || (length >
26012 (skb->len - sizeof(struct cpcs_trailer))))
26013 {
26014- atomic_inc(&vcc->stats->rx_err);
26015+ atomic_inc_unchecked(&vcc->stats->rx_err);
26016 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
26017 length, skb->len);)
26018 dev_kfree_skb_any(skb);
26019@@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
26020
26021 IF_RX(printk("rx_dle_intr: skb push");)
26022 vcc->push(vcc,skb);
26023- atomic_inc(&vcc->stats->rx);
26024+ atomic_inc_unchecked(&vcc->stats->rx);
26025 iadev->rx_pkt_cnt++;
26026 }
26027 INCR_DLE:
26028@@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
26029 {
26030 struct k_sonet_stats *stats;
26031 stats = &PRIV(_ia_dev[board])->sonet_stats;
26032- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
26033- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
26034- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
26035- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
26036- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
26037- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
26038- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
26039- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
26040- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
26041+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
26042+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
26043+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
26044+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
26045+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
26046+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
26047+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
26048+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
26049+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
26050 }
26051 ia_cmds.status = 0;
26052 break;
26053@@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
26054 if ((desc == 0) || (desc > iadev->num_tx_desc))
26055 {
26056 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
26057- atomic_inc(&vcc->stats->tx);
26058+ atomic_inc_unchecked(&vcc->stats->tx);
26059 if (vcc->pop)
26060 vcc->pop(vcc, skb);
26061 else
26062@@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
26063 ATM_DESC(skb) = vcc->vci;
26064 skb_queue_tail(&iadev->tx_dma_q, skb);
26065
26066- atomic_inc(&vcc->stats->tx);
26067+ atomic_inc_unchecked(&vcc->stats->tx);
26068 iadev->tx_pkt_cnt++;
26069 /* Increment transaction counter */
26070 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
26071
26072 #if 0
26073 /* add flow control logic */
26074- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
26075+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
26076 if (iavcc->vc_desc_cnt > 10) {
26077 vcc->tx_quota = vcc->tx_quota * 3 / 4;
26078 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
26079diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
26080index f556969..0da15eb 100644
26081--- a/drivers/atm/lanai.c
26082+++ b/drivers/atm/lanai.c
26083@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
26084 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
26085 lanai_endtx(lanai, lvcc);
26086 lanai_free_skb(lvcc->tx.atmvcc, skb);
26087- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
26088+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
26089 }
26090
26091 /* Try to fill the buffer - don't call unless there is backlog */
26092@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
26093 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
26094 __net_timestamp(skb);
26095 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
26096- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
26097+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
26098 out:
26099 lvcc->rx.buf.ptr = end;
26100 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
26101@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
26102 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
26103 "vcc %d\n", lanai->number, (unsigned int) s, vci);
26104 lanai->stats.service_rxnotaal5++;
26105- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26106+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26107 return 0;
26108 }
26109 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
26110@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
26111 int bytes;
26112 read_unlock(&vcc_sklist_lock);
26113 DPRINTK("got trashed rx pdu on vci %d\n", vci);
26114- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26115+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26116 lvcc->stats.x.aal5.service_trash++;
26117 bytes = (SERVICE_GET_END(s) * 16) -
26118 (((unsigned long) lvcc->rx.buf.ptr) -
26119@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
26120 }
26121 if (s & SERVICE_STREAM) {
26122 read_unlock(&vcc_sklist_lock);
26123- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26124+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26125 lvcc->stats.x.aal5.service_stream++;
26126 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
26127 "PDU on VCI %d!\n", lanai->number, vci);
26128@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
26129 return 0;
26130 }
26131 DPRINTK("got rx crc error on vci %d\n", vci);
26132- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26133+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26134 lvcc->stats.x.aal5.service_rxcrc++;
26135 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
26136 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
26137diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
26138index 1c70c45..300718d 100644
26139--- a/drivers/atm/nicstar.c
26140+++ b/drivers/atm/nicstar.c
26141@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26142 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
26143 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
26144 card->index);
26145- atomic_inc(&vcc->stats->tx_err);
26146+ atomic_inc_unchecked(&vcc->stats->tx_err);
26147 dev_kfree_skb_any(skb);
26148 return -EINVAL;
26149 }
26150@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26151 if (!vc->tx) {
26152 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
26153 card->index);
26154- atomic_inc(&vcc->stats->tx_err);
26155+ atomic_inc_unchecked(&vcc->stats->tx_err);
26156 dev_kfree_skb_any(skb);
26157 return -EINVAL;
26158 }
26159@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26160 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
26161 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
26162 card->index);
26163- atomic_inc(&vcc->stats->tx_err);
26164+ atomic_inc_unchecked(&vcc->stats->tx_err);
26165 dev_kfree_skb_any(skb);
26166 return -EINVAL;
26167 }
26168
26169 if (skb_shinfo(skb)->nr_frags != 0) {
26170 printk("nicstar%d: No scatter-gather yet.\n", card->index);
26171- atomic_inc(&vcc->stats->tx_err);
26172+ atomic_inc_unchecked(&vcc->stats->tx_err);
26173 dev_kfree_skb_any(skb);
26174 return -EINVAL;
26175 }
26176@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26177 }
26178
26179 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
26180- atomic_inc(&vcc->stats->tx_err);
26181+ atomic_inc_unchecked(&vcc->stats->tx_err);
26182 dev_kfree_skb_any(skb);
26183 return -EIO;
26184 }
26185- atomic_inc(&vcc->stats->tx);
26186+ atomic_inc_unchecked(&vcc->stats->tx);
26187
26188 return 0;
26189 }
26190@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26191 printk
26192 ("nicstar%d: Can't allocate buffers for aal0.\n",
26193 card->index);
26194- atomic_add(i, &vcc->stats->rx_drop);
26195+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
26196 break;
26197 }
26198 if (!atm_charge(vcc, sb->truesize)) {
26199 RXPRINTK
26200 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
26201 card->index);
26202- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
26203+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
26204 dev_kfree_skb_any(sb);
26205 break;
26206 }
26207@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26208 ATM_SKB(sb)->vcc = vcc;
26209 __net_timestamp(sb);
26210 vcc->push(vcc, sb);
26211- atomic_inc(&vcc->stats->rx);
26212+ atomic_inc_unchecked(&vcc->stats->rx);
26213 cell += ATM_CELL_PAYLOAD;
26214 }
26215
26216@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26217 if (iovb == NULL) {
26218 printk("nicstar%d: Out of iovec buffers.\n",
26219 card->index);
26220- atomic_inc(&vcc->stats->rx_drop);
26221+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26222 recycle_rx_buf(card, skb);
26223 return;
26224 }
26225@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26226 small or large buffer itself. */
26227 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
26228 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26229- atomic_inc(&vcc->stats->rx_err);
26230+ atomic_inc_unchecked(&vcc->stats->rx_err);
26231 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26232 NS_MAX_IOVECS);
26233 NS_PRV_IOVCNT(iovb) = 0;
26234@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26235 ("nicstar%d: Expected a small buffer, and this is not one.\n",
26236 card->index);
26237 which_list(card, skb);
26238- atomic_inc(&vcc->stats->rx_err);
26239+ atomic_inc_unchecked(&vcc->stats->rx_err);
26240 recycle_rx_buf(card, skb);
26241 vc->rx_iov = NULL;
26242 recycle_iov_buf(card, iovb);
26243@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26244 ("nicstar%d: Expected a large buffer, and this is not one.\n",
26245 card->index);
26246 which_list(card, skb);
26247- atomic_inc(&vcc->stats->rx_err);
26248+ atomic_inc_unchecked(&vcc->stats->rx_err);
26249 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26250 NS_PRV_IOVCNT(iovb));
26251 vc->rx_iov = NULL;
26252@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26253 printk(" - PDU size mismatch.\n");
26254 else
26255 printk(".\n");
26256- atomic_inc(&vcc->stats->rx_err);
26257+ atomic_inc_unchecked(&vcc->stats->rx_err);
26258 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26259 NS_PRV_IOVCNT(iovb));
26260 vc->rx_iov = NULL;
26261@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26262 /* skb points to a small buffer */
26263 if (!atm_charge(vcc, skb->truesize)) {
26264 push_rxbufs(card, skb);
26265- atomic_inc(&vcc->stats->rx_drop);
26266+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26267 } else {
26268 skb_put(skb, len);
26269 dequeue_sm_buf(card, skb);
26270@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26271 ATM_SKB(skb)->vcc = vcc;
26272 __net_timestamp(skb);
26273 vcc->push(vcc, skb);
26274- atomic_inc(&vcc->stats->rx);
26275+ atomic_inc_unchecked(&vcc->stats->rx);
26276 }
26277 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
26278 struct sk_buff *sb;
26279@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26280 if (len <= NS_SMBUFSIZE) {
26281 if (!atm_charge(vcc, sb->truesize)) {
26282 push_rxbufs(card, sb);
26283- atomic_inc(&vcc->stats->rx_drop);
26284+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26285 } else {
26286 skb_put(sb, len);
26287 dequeue_sm_buf(card, sb);
26288@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26289 ATM_SKB(sb)->vcc = vcc;
26290 __net_timestamp(sb);
26291 vcc->push(vcc, sb);
26292- atomic_inc(&vcc->stats->rx);
26293+ atomic_inc_unchecked(&vcc->stats->rx);
26294 }
26295
26296 push_rxbufs(card, skb);
26297@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26298
26299 if (!atm_charge(vcc, skb->truesize)) {
26300 push_rxbufs(card, skb);
26301- atomic_inc(&vcc->stats->rx_drop);
26302+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26303 } else {
26304 dequeue_lg_buf(card, skb);
26305 #ifdef NS_USE_DESTRUCTORS
26306@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26307 ATM_SKB(skb)->vcc = vcc;
26308 __net_timestamp(skb);
26309 vcc->push(vcc, skb);
26310- atomic_inc(&vcc->stats->rx);
26311+ atomic_inc_unchecked(&vcc->stats->rx);
26312 }
26313
26314 push_rxbufs(card, sb);
26315@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26316 printk
26317 ("nicstar%d: Out of huge buffers.\n",
26318 card->index);
26319- atomic_inc(&vcc->stats->rx_drop);
26320+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26321 recycle_iovec_rx_bufs(card,
26322 (struct iovec *)
26323 iovb->data,
26324@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26325 card->hbpool.count++;
26326 } else
26327 dev_kfree_skb_any(hb);
26328- atomic_inc(&vcc->stats->rx_drop);
26329+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26330 } else {
26331 /* Copy the small buffer to the huge buffer */
26332 sb = (struct sk_buff *)iov->iov_base;
26333@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26334 #endif /* NS_USE_DESTRUCTORS */
26335 __net_timestamp(hb);
26336 vcc->push(vcc, hb);
26337- atomic_inc(&vcc->stats->rx);
26338+ atomic_inc_unchecked(&vcc->stats->rx);
26339 }
26340 }
26341
26342diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
26343index 5d1d076..12fbca4 100644
26344--- a/drivers/atm/solos-pci.c
26345+++ b/drivers/atm/solos-pci.c
26346@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
26347 }
26348 atm_charge(vcc, skb->truesize);
26349 vcc->push(vcc, skb);
26350- atomic_inc(&vcc->stats->rx);
26351+ atomic_inc_unchecked(&vcc->stats->rx);
26352 break;
26353
26354 case PKT_STATUS:
26355@@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card)
26356 vcc = SKB_CB(oldskb)->vcc;
26357
26358 if (vcc) {
26359- atomic_inc(&vcc->stats->tx);
26360+ atomic_inc_unchecked(&vcc->stats->tx);
26361 solos_pop(vcc, oldskb);
26362 } else
26363 dev_kfree_skb_irq(oldskb);
26364diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
26365index 90f1ccc..04c4a1e 100644
26366--- a/drivers/atm/suni.c
26367+++ b/drivers/atm/suni.c
26368@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26369
26370
26371 #define ADD_LIMITED(s,v) \
26372- atomic_add((v),&stats->s); \
26373- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26374+ atomic_add_unchecked((v),&stats->s); \
26375+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26376
26377
26378 static void suni_hz(unsigned long from_timer)
26379diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
26380index 5120a96..e2572bd 100644
26381--- a/drivers/atm/uPD98402.c
26382+++ b/drivers/atm/uPD98402.c
26383@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
26384 struct sonet_stats tmp;
26385 int error = 0;
26386
26387- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26388+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26389 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26390 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26391 if (zero && !error) {
26392@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
26393
26394
26395 #define ADD_LIMITED(s,v) \
26396- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26397- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26398- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26399+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26400+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26401+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26402
26403
26404 static void stat_event(struct atm_dev *dev)
26405@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
26406 if (reason & uPD98402_INT_PFM) stat_event(dev);
26407 if (reason & uPD98402_INT_PCO) {
26408 (void) GET(PCOCR); /* clear interrupt cause */
26409- atomic_add(GET(HECCT),
26410+ atomic_add_unchecked(GET(HECCT),
26411 &PRIV(dev)->sonet_stats.uncorr_hcs);
26412 }
26413 if ((reason & uPD98402_INT_RFO) &&
26414@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
26415 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26416 uPD98402_INT_LOS),PIMR); /* enable them */
26417 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26418- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26419- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26420- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26421+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26422+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26423+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26424 return 0;
26425 }
26426
26427diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
26428index d889f56..17eb71e 100644
26429--- a/drivers/atm/zatm.c
26430+++ b/drivers/atm/zatm.c
26431@@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26432 }
26433 if (!size) {
26434 dev_kfree_skb_irq(skb);
26435- if (vcc) atomic_inc(&vcc->stats->rx_err);
26436+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26437 continue;
26438 }
26439 if (!atm_charge(vcc,skb->truesize)) {
26440@@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26441 skb->len = size;
26442 ATM_SKB(skb)->vcc = vcc;
26443 vcc->push(vcc,skb);
26444- atomic_inc(&vcc->stats->rx);
26445+ atomic_inc_unchecked(&vcc->stats->rx);
26446 }
26447 zout(pos & 0xffff,MTA(mbx));
26448 #if 0 /* probably a stupid idea */
26449@@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
26450 skb_queue_head(&zatm_vcc->backlog,skb);
26451 break;
26452 }
26453- atomic_inc(&vcc->stats->tx);
26454+ atomic_inc_unchecked(&vcc->stats->tx);
26455 wake_up(&zatm_vcc->tx_wait);
26456 }
26457
26458diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
26459index a4760e0..51283cf 100644
26460--- a/drivers/base/devtmpfs.c
26461+++ b/drivers/base/devtmpfs.c
26462@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
26463 if (!thread)
26464 return 0;
26465
26466- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
26467+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
26468 if (err)
26469 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
26470 else
26471diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
26472index caf995f..6f76697 100644
26473--- a/drivers/base/power/wakeup.c
26474+++ b/drivers/base/power/wakeup.c
26475@@ -30,14 +30,14 @@ bool events_check_enabled;
26476 * They need to be modified together atomically, so it's better to use one
26477 * atomic variable to hold them both.
26478 */
26479-static atomic_t combined_event_count = ATOMIC_INIT(0);
26480+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
26481
26482 #define IN_PROGRESS_BITS (sizeof(int) * 4)
26483 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
26484
26485 static void split_counters(unsigned int *cnt, unsigned int *inpr)
26486 {
26487- unsigned int comb = atomic_read(&combined_event_count);
26488+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
26489
26490 *cnt = (comb >> IN_PROGRESS_BITS);
26491 *inpr = comb & MAX_IN_PROGRESS;
26492@@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
26493 ws->last_time = ktime_get();
26494
26495 /* Increment the counter of events in progress. */
26496- atomic_inc(&combined_event_count);
26497+ atomic_inc_unchecked(&combined_event_count);
26498 }
26499
26500 /**
26501@@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
26502 * Increment the counter of registered wakeup events and decrement the
26503 * couter of wakeup events in progress simultaneously.
26504 */
26505- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
26506+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
26507 }
26508
26509 /**
26510diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
26511index b0f553b..77b928b 100644
26512--- a/drivers/block/cciss.c
26513+++ b/drivers/block/cciss.c
26514@@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
26515 int err;
26516 u32 cp;
26517
26518+ memset(&arg64, 0, sizeof(arg64));
26519+
26520 err = 0;
26521 err |=
26522 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26523@@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
26524 while (!list_empty(&h->reqQ)) {
26525 c = list_entry(h->reqQ.next, CommandList_struct, list);
26526 /* can't do anything if fifo is full */
26527- if ((h->access.fifo_full(h))) {
26528+ if ((h->access->fifo_full(h))) {
26529 dev_warn(&h->pdev->dev, "fifo full\n");
26530 break;
26531 }
26532@@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
26533 h->Qdepth--;
26534
26535 /* Tell the controller execute command */
26536- h->access.submit_command(h, c);
26537+ h->access->submit_command(h, c);
26538
26539 /* Put job onto the completed Q */
26540 addQ(&h->cmpQ, c);
26541@@ -3443,17 +3445,17 @@ startio:
26542
26543 static inline unsigned long get_next_completion(ctlr_info_t *h)
26544 {
26545- return h->access.command_completed(h);
26546+ return h->access->command_completed(h);
26547 }
26548
26549 static inline int interrupt_pending(ctlr_info_t *h)
26550 {
26551- return h->access.intr_pending(h);
26552+ return h->access->intr_pending(h);
26553 }
26554
26555 static inline long interrupt_not_for_us(ctlr_info_t *h)
26556 {
26557- return ((h->access.intr_pending(h) == 0) ||
26558+ return ((h->access->intr_pending(h) == 0) ||
26559 (h->interrupts_enabled == 0));
26560 }
26561
26562@@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
26563 u32 a;
26564
26565 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
26566- return h->access.command_completed(h);
26567+ return h->access->command_completed(h);
26568
26569 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
26570 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
26571@@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
26572 trans_support & CFGTBL_Trans_use_short_tags);
26573
26574 /* Change the access methods to the performant access methods */
26575- h->access = SA5_performant_access;
26576+ h->access = &SA5_performant_access;
26577 h->transMethod = CFGTBL_Trans_Performant;
26578
26579 return;
26580@@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
26581 if (prod_index < 0)
26582 return -ENODEV;
26583 h->product_name = products[prod_index].product_name;
26584- h->access = *(products[prod_index].access);
26585+ h->access = products[prod_index].access;
26586
26587 if (cciss_board_disabled(h)) {
26588 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
26589@@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
26590 }
26591
26592 /* make sure the board interrupts are off */
26593- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26594+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26595 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
26596 if (rc)
26597 goto clean2;
26598@@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
26599 * fake ones to scoop up any residual completions.
26600 */
26601 spin_lock_irqsave(&h->lock, flags);
26602- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26603+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26604 spin_unlock_irqrestore(&h->lock, flags);
26605 free_irq(h->intr[h->intr_mode], h);
26606 rc = cciss_request_irq(h, cciss_msix_discard_completions,
26607@@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
26608 dev_info(&h->pdev->dev, "Board READY.\n");
26609 dev_info(&h->pdev->dev,
26610 "Waiting for stale completions to drain.\n");
26611- h->access.set_intr_mask(h, CCISS_INTR_ON);
26612+ h->access->set_intr_mask(h, CCISS_INTR_ON);
26613 msleep(10000);
26614- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26615+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26616
26617 rc = controller_reset_failed(h->cfgtable);
26618 if (rc)
26619@@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
26620 cciss_scsi_setup(h);
26621
26622 /* Turn the interrupts on so we can service requests */
26623- h->access.set_intr_mask(h, CCISS_INTR_ON);
26624+ h->access->set_intr_mask(h, CCISS_INTR_ON);
26625
26626 /* Get the firmware version */
26627 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
26628@@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
26629 kfree(flush_buf);
26630 if (return_code != IO_OK)
26631 dev_warn(&h->pdev->dev, "Error flushing cache\n");
26632- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26633+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26634 free_irq(h->intr[h->intr_mode], h);
26635 }
26636
26637diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
26638index 7fda30e..eb5dfe0 100644
26639--- a/drivers/block/cciss.h
26640+++ b/drivers/block/cciss.h
26641@@ -101,7 +101,7 @@ struct ctlr_info
26642 /* information about each logical volume */
26643 drive_info_struct *drv[CISS_MAX_LUN];
26644
26645- struct access_method access;
26646+ struct access_method *access;
26647
26648 /* queue and queue Info */
26649 struct list_head reqQ;
26650diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
26651index 9125bbe..eede5c8 100644
26652--- a/drivers/block/cpqarray.c
26653+++ b/drivers/block/cpqarray.c
26654@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26655 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
26656 goto Enomem4;
26657 }
26658- hba[i]->access.set_intr_mask(hba[i], 0);
26659+ hba[i]->access->set_intr_mask(hba[i], 0);
26660 if (request_irq(hba[i]->intr, do_ida_intr,
26661 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
26662 {
26663@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26664 add_timer(&hba[i]->timer);
26665
26666 /* Enable IRQ now that spinlock and rate limit timer are set up */
26667- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26668+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26669
26670 for(j=0; j<NWD; j++) {
26671 struct gendisk *disk = ida_gendisk[i][j];
26672@@ -694,7 +694,7 @@ DBGINFO(
26673 for(i=0; i<NR_PRODUCTS; i++) {
26674 if (board_id == products[i].board_id) {
26675 c->product_name = products[i].product_name;
26676- c->access = *(products[i].access);
26677+ c->access = products[i].access;
26678 break;
26679 }
26680 }
26681@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
26682 hba[ctlr]->intr = intr;
26683 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
26684 hba[ctlr]->product_name = products[j].product_name;
26685- hba[ctlr]->access = *(products[j].access);
26686+ hba[ctlr]->access = products[j].access;
26687 hba[ctlr]->ctlr = ctlr;
26688 hba[ctlr]->board_id = board_id;
26689 hba[ctlr]->pci_dev = NULL; /* not PCI */
26690@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
26691
26692 while((c = h->reqQ) != NULL) {
26693 /* Can't do anything if we're busy */
26694- if (h->access.fifo_full(h) == 0)
26695+ if (h->access->fifo_full(h) == 0)
26696 return;
26697
26698 /* Get the first entry from the request Q */
26699@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
26700 h->Qdepth--;
26701
26702 /* Tell the controller to do our bidding */
26703- h->access.submit_command(h, c);
26704+ h->access->submit_command(h, c);
26705
26706 /* Get onto the completion Q */
26707 addQ(&h->cmpQ, c);
26708@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26709 unsigned long flags;
26710 __u32 a,a1;
26711
26712- istat = h->access.intr_pending(h);
26713+ istat = h->access->intr_pending(h);
26714 /* Is this interrupt for us? */
26715 if (istat == 0)
26716 return IRQ_NONE;
26717@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26718 */
26719 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
26720 if (istat & FIFO_NOT_EMPTY) {
26721- while((a = h->access.command_completed(h))) {
26722+ while((a = h->access->command_completed(h))) {
26723 a1 = a; a &= ~3;
26724 if ((c = h->cmpQ) == NULL)
26725 {
26726@@ -1449,11 +1449,11 @@ static int sendcmd(
26727 /*
26728 * Disable interrupt
26729 */
26730- info_p->access.set_intr_mask(info_p, 0);
26731+ info_p->access->set_intr_mask(info_p, 0);
26732 /* Make sure there is room in the command FIFO */
26733 /* Actually it should be completely empty at this time. */
26734 for (i = 200000; i > 0; i--) {
26735- temp = info_p->access.fifo_full(info_p);
26736+ temp = info_p->access->fifo_full(info_p);
26737 if (temp != 0) {
26738 break;
26739 }
26740@@ -1466,7 +1466,7 @@ DBG(
26741 /*
26742 * Send the cmd
26743 */
26744- info_p->access.submit_command(info_p, c);
26745+ info_p->access->submit_command(info_p, c);
26746 complete = pollcomplete(ctlr);
26747
26748 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
26749@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
26750 * we check the new geometry. Then turn interrupts back on when
26751 * we're done.
26752 */
26753- host->access.set_intr_mask(host, 0);
26754+ host->access->set_intr_mask(host, 0);
26755 getgeometry(ctlr);
26756- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
26757+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
26758
26759 for(i=0; i<NWD; i++) {
26760 struct gendisk *disk = ida_gendisk[ctlr][i];
26761@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
26762 /* Wait (up to 2 seconds) for a command to complete */
26763
26764 for (i = 200000; i > 0; i--) {
26765- done = hba[ctlr]->access.command_completed(hba[ctlr]);
26766+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
26767 if (done == 0) {
26768 udelay(10); /* a short fixed delay */
26769 } else
26770diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
26771index be73e9d..7fbf140 100644
26772--- a/drivers/block/cpqarray.h
26773+++ b/drivers/block/cpqarray.h
26774@@ -99,7 +99,7 @@ struct ctlr_info {
26775 drv_info_t drv[NWD];
26776 struct proc_dir_entry *proc;
26777
26778- struct access_method access;
26779+ struct access_method *access;
26780
26781 cmdlist_t *reqQ;
26782 cmdlist_t *cmpQ;
26783diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
26784index 9cf2035..bffca95 100644
26785--- a/drivers/block/drbd/drbd_int.h
26786+++ b/drivers/block/drbd/drbd_int.h
26787@@ -736,7 +736,7 @@ struct drbd_request;
26788 struct drbd_epoch {
26789 struct list_head list;
26790 unsigned int barrier_nr;
26791- atomic_t epoch_size; /* increased on every request added. */
26792+ atomic_unchecked_t epoch_size; /* increased on every request added. */
26793 atomic_t active; /* increased on every req. added, and dec on every finished. */
26794 unsigned long flags;
26795 };
26796@@ -1108,7 +1108,7 @@ struct drbd_conf {
26797 void *int_dig_in;
26798 void *int_dig_vv;
26799 wait_queue_head_t seq_wait;
26800- atomic_t packet_seq;
26801+ atomic_unchecked_t packet_seq;
26802 unsigned int peer_seq;
26803 spinlock_t peer_seq_lock;
26804 unsigned int minor;
26805@@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
26806
26807 static inline void drbd_tcp_cork(struct socket *sock)
26808 {
26809- int __user val = 1;
26810+ int val = 1;
26811 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26812- (char __user *)&val, sizeof(val));
26813+ (char __force_user *)&val, sizeof(val));
26814 }
26815
26816 static inline void drbd_tcp_uncork(struct socket *sock)
26817 {
26818- int __user val = 0;
26819+ int val = 0;
26820 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26821- (char __user *)&val, sizeof(val));
26822+ (char __force_user *)&val, sizeof(val));
26823 }
26824
26825 static inline void drbd_tcp_nodelay(struct socket *sock)
26826 {
26827- int __user val = 1;
26828+ int val = 1;
26829 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
26830- (char __user *)&val, sizeof(val));
26831+ (char __force_user *)&val, sizeof(val));
26832 }
26833
26834 static inline void drbd_tcp_quickack(struct socket *sock)
26835 {
26836- int __user val = 2;
26837+ int val = 2;
26838 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
26839- (char __user *)&val, sizeof(val));
26840+ (char __force_user *)&val, sizeof(val));
26841 }
26842
26843 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
26844diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
26845index 0358e55..bc33689 100644
26846--- a/drivers/block/drbd/drbd_main.c
26847+++ b/drivers/block/drbd/drbd_main.c
26848@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
26849 p.sector = sector;
26850 p.block_id = block_id;
26851 p.blksize = blksize;
26852- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
26853+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
26854
26855 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
26856 return false;
26857@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
26858 p.sector = cpu_to_be64(req->sector);
26859 p.block_id = (unsigned long)req;
26860 p.seq_num = cpu_to_be32(req->seq_num =
26861- atomic_add_return(1, &mdev->packet_seq));
26862+ atomic_add_return_unchecked(1, &mdev->packet_seq));
26863
26864 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
26865
26866@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
26867 atomic_set(&mdev->unacked_cnt, 0);
26868 atomic_set(&mdev->local_cnt, 0);
26869 atomic_set(&mdev->net_cnt, 0);
26870- atomic_set(&mdev->packet_seq, 0);
26871+ atomic_set_unchecked(&mdev->packet_seq, 0);
26872 atomic_set(&mdev->pp_in_use, 0);
26873 atomic_set(&mdev->pp_in_use_by_net, 0);
26874 atomic_set(&mdev->rs_sect_in, 0);
26875@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
26876 mdev->receiver.t_state);
26877
26878 /* no need to lock it, I'm the only thread alive */
26879- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
26880- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
26881+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
26882+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
26883 mdev->al_writ_cnt =
26884 mdev->bm_writ_cnt =
26885 mdev->read_cnt =
26886diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
26887index af2a250..219c74b 100644
26888--- a/drivers/block/drbd/drbd_nl.c
26889+++ b/drivers/block/drbd/drbd_nl.c
26890@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
26891 module_put(THIS_MODULE);
26892 }
26893
26894-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26895+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26896
26897 static unsigned short *
26898 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
26899@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
26900 cn_reply->id.idx = CN_IDX_DRBD;
26901 cn_reply->id.val = CN_VAL_DRBD;
26902
26903- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26904+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26905 cn_reply->ack = 0; /* not used here. */
26906 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26907 (int)((char *)tl - (char *)reply->tag_list);
26908@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
26909 cn_reply->id.idx = CN_IDX_DRBD;
26910 cn_reply->id.val = CN_VAL_DRBD;
26911
26912- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26913+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26914 cn_reply->ack = 0; /* not used here. */
26915 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26916 (int)((char *)tl - (char *)reply->tag_list);
26917@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
26918 cn_reply->id.idx = CN_IDX_DRBD;
26919 cn_reply->id.val = CN_VAL_DRBD;
26920
26921- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
26922+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
26923 cn_reply->ack = 0; // not used here.
26924 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26925 (int)((char*)tl - (char*)reply->tag_list);
26926@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
26927 cn_reply->id.idx = CN_IDX_DRBD;
26928 cn_reply->id.val = CN_VAL_DRBD;
26929
26930- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26931+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26932 cn_reply->ack = 0; /* not used here. */
26933 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26934 (int)((char *)tl - (char *)reply->tag_list);
26935diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
26936index 43beaca..4a5b1dd 100644
26937--- a/drivers/block/drbd/drbd_receiver.c
26938+++ b/drivers/block/drbd/drbd_receiver.c
26939@@ -894,7 +894,7 @@ retry:
26940 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
26941 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
26942
26943- atomic_set(&mdev->packet_seq, 0);
26944+ atomic_set_unchecked(&mdev->packet_seq, 0);
26945 mdev->peer_seq = 0;
26946
26947 drbd_thread_start(&mdev->asender);
26948@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26949 do {
26950 next_epoch = NULL;
26951
26952- epoch_size = atomic_read(&epoch->epoch_size);
26953+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
26954
26955 switch (ev & ~EV_CLEANUP) {
26956 case EV_PUT:
26957@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26958 rv = FE_DESTROYED;
26959 } else {
26960 epoch->flags = 0;
26961- atomic_set(&epoch->epoch_size, 0);
26962+ atomic_set_unchecked(&epoch->epoch_size, 0);
26963 /* atomic_set(&epoch->active, 0); is already zero */
26964 if (rv == FE_STILL_LIVE)
26965 rv = FE_RECYCLED;
26966@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26967 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
26968 drbd_flush(mdev);
26969
26970- if (atomic_read(&mdev->current_epoch->epoch_size)) {
26971+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26972 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
26973 if (epoch)
26974 break;
26975 }
26976
26977 epoch = mdev->current_epoch;
26978- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
26979+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
26980
26981 D_ASSERT(atomic_read(&epoch->active) == 0);
26982 D_ASSERT(epoch->flags == 0);
26983@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26984 }
26985
26986 epoch->flags = 0;
26987- atomic_set(&epoch->epoch_size, 0);
26988+ atomic_set_unchecked(&epoch->epoch_size, 0);
26989 atomic_set(&epoch->active, 0);
26990
26991 spin_lock(&mdev->epoch_lock);
26992- if (atomic_read(&mdev->current_epoch->epoch_size)) {
26993+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26994 list_add(&epoch->list, &mdev->current_epoch->list);
26995 mdev->current_epoch = epoch;
26996 mdev->epochs++;
26997@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26998 spin_unlock(&mdev->peer_seq_lock);
26999
27000 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
27001- atomic_inc(&mdev->current_epoch->epoch_size);
27002+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
27003 return drbd_drain_block(mdev, data_size);
27004 }
27005
27006@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
27007
27008 spin_lock(&mdev->epoch_lock);
27009 e->epoch = mdev->current_epoch;
27010- atomic_inc(&e->epoch->epoch_size);
27011+ atomic_inc_unchecked(&e->epoch->epoch_size);
27012 atomic_inc(&e->epoch->active);
27013 spin_unlock(&mdev->epoch_lock);
27014
27015@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
27016 D_ASSERT(list_empty(&mdev->done_ee));
27017
27018 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
27019- atomic_set(&mdev->current_epoch->epoch_size, 0);
27020+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
27021 D_ASSERT(list_empty(&mdev->current_epoch->list));
27022 }
27023
27024diff --git a/drivers/block/loop.c b/drivers/block/loop.c
27025index 1e888c9..05cf1b0 100644
27026--- a/drivers/block/loop.c
27027+++ b/drivers/block/loop.c
27028@@ -227,7 +227,7 @@ static int __do_lo_send_write(struct file *file,
27029 mm_segment_t old_fs = get_fs();
27030
27031 set_fs(get_ds());
27032- bw = file->f_op->write(file, buf, len, &pos);
27033+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
27034 set_fs(old_fs);
27035 if (likely(bw == len))
27036 return 0;
27037diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
27038index 4364303..9adf4ee 100644
27039--- a/drivers/char/Kconfig
27040+++ b/drivers/char/Kconfig
27041@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
27042
27043 config DEVKMEM
27044 bool "/dev/kmem virtual device support"
27045- default y
27046+ default n
27047+ depends on !GRKERNSEC_KMEM
27048 help
27049 Say Y here if you want to support the /dev/kmem device. The
27050 /dev/kmem device is rarely used, but can be used for certain
27051@@ -596,6 +597,7 @@ config DEVPORT
27052 bool
27053 depends on !M68K
27054 depends on ISA || PCI
27055+ depends on !GRKERNSEC_KMEM
27056 default y
27057
27058 source "drivers/s390/char/Kconfig"
27059diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
27060index 2e04433..22afc64 100644
27061--- a/drivers/char/agp/frontend.c
27062+++ b/drivers/char/agp/frontend.c
27063@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
27064 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
27065 return -EFAULT;
27066
27067- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
27068+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
27069 return -EFAULT;
27070
27071 client = agp_find_client_by_pid(reserve.pid);
27072diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
27073index 095ab90..afad0a4 100644
27074--- a/drivers/char/briq_panel.c
27075+++ b/drivers/char/briq_panel.c
27076@@ -9,6 +9,7 @@
27077 #include <linux/types.h>
27078 #include <linux/errno.h>
27079 #include <linux/tty.h>
27080+#include <linux/mutex.h>
27081 #include <linux/timer.h>
27082 #include <linux/kernel.h>
27083 #include <linux/wait.h>
27084@@ -34,6 +35,7 @@ static int vfd_is_open;
27085 static unsigned char vfd[40];
27086 static int vfd_cursor;
27087 static unsigned char ledpb, led;
27088+static DEFINE_MUTEX(vfd_mutex);
27089
27090 static void update_vfd(void)
27091 {
27092@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
27093 if (!vfd_is_open)
27094 return -EBUSY;
27095
27096+ mutex_lock(&vfd_mutex);
27097 for (;;) {
27098 char c;
27099 if (!indx)
27100 break;
27101- if (get_user(c, buf))
27102+ if (get_user(c, buf)) {
27103+ mutex_unlock(&vfd_mutex);
27104 return -EFAULT;
27105+ }
27106 if (esc) {
27107 set_led(c);
27108 esc = 0;
27109@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
27110 buf++;
27111 }
27112 update_vfd();
27113+ mutex_unlock(&vfd_mutex);
27114
27115 return len;
27116 }
27117diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
27118index f773a9d..65cd683 100644
27119--- a/drivers/char/genrtc.c
27120+++ b/drivers/char/genrtc.c
27121@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
27122 switch (cmd) {
27123
27124 case RTC_PLL_GET:
27125+ memset(&pll, 0, sizeof(pll));
27126 if (get_rtc_pll(&pll))
27127 return -EINVAL;
27128 else
27129diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
27130index 0833896..cccce52 100644
27131--- a/drivers/char/hpet.c
27132+++ b/drivers/char/hpet.c
27133@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
27134 }
27135
27136 static int
27137-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
27138+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
27139 struct hpet_info *info)
27140 {
27141 struct hpet_timer __iomem *timer;
27142diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
27143index 58c0e63..46c16bf 100644
27144--- a/drivers/char/ipmi/ipmi_msghandler.c
27145+++ b/drivers/char/ipmi/ipmi_msghandler.c
27146@@ -415,7 +415,7 @@ struct ipmi_smi {
27147 struct proc_dir_entry *proc_dir;
27148 char proc_dir_name[10];
27149
27150- atomic_t stats[IPMI_NUM_STATS];
27151+ atomic_unchecked_t stats[IPMI_NUM_STATS];
27152
27153 /*
27154 * run_to_completion duplicate of smb_info, smi_info
27155@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
27156
27157
27158 #define ipmi_inc_stat(intf, stat) \
27159- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
27160+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
27161 #define ipmi_get_stat(intf, stat) \
27162- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
27163+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
27164
27165 static int is_lan_addr(struct ipmi_addr *addr)
27166 {
27167@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
27168 INIT_LIST_HEAD(&intf->cmd_rcvrs);
27169 init_waitqueue_head(&intf->waitq);
27170 for (i = 0; i < IPMI_NUM_STATS; i++)
27171- atomic_set(&intf->stats[i], 0);
27172+ atomic_set_unchecked(&intf->stats[i], 0);
27173
27174 intf->proc_dir = NULL;
27175
27176diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
27177index 9397ab4..d01bee1 100644
27178--- a/drivers/char/ipmi/ipmi_si_intf.c
27179+++ b/drivers/char/ipmi/ipmi_si_intf.c
27180@@ -277,7 +277,7 @@ struct smi_info {
27181 unsigned char slave_addr;
27182
27183 /* Counters and things for the proc filesystem. */
27184- atomic_t stats[SI_NUM_STATS];
27185+ atomic_unchecked_t stats[SI_NUM_STATS];
27186
27187 struct task_struct *thread;
27188
27189@@ -286,9 +286,9 @@ struct smi_info {
27190 };
27191
27192 #define smi_inc_stat(smi, stat) \
27193- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
27194+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
27195 #define smi_get_stat(smi, stat) \
27196- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
27197+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
27198
27199 #define SI_MAX_PARMS 4
27200
27201@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
27202 atomic_set(&new_smi->req_events, 0);
27203 new_smi->run_to_completion = 0;
27204 for (i = 0; i < SI_NUM_STATS; i++)
27205- atomic_set(&new_smi->stats[i], 0);
27206+ atomic_set_unchecked(&new_smi->stats[i], 0);
27207
27208 new_smi->interrupt_disabled = 1;
27209 atomic_set(&new_smi->stop_operation, 0);
27210diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
27211index 1aeaaba..e018570 100644
27212--- a/drivers/char/mbcs.c
27213+++ b/drivers/char/mbcs.c
27214@@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
27215 return 0;
27216 }
27217
27218-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
27219+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
27220 {
27221 .part_num = MBCS_PART_NUM,
27222 .mfg_num = MBCS_MFG_NUM,
27223diff --git a/drivers/char/mem.c b/drivers/char/mem.c
27224index 1451790..f705c30 100644
27225--- a/drivers/char/mem.c
27226+++ b/drivers/char/mem.c
27227@@ -18,6 +18,7 @@
27228 #include <linux/raw.h>
27229 #include <linux/tty.h>
27230 #include <linux/capability.h>
27231+#include <linux/security.h>
27232 #include <linux/ptrace.h>
27233 #include <linux/device.h>
27234 #include <linux/highmem.h>
27235@@ -35,6 +36,10 @@
27236 # include <linux/efi.h>
27237 #endif
27238
27239+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27240+extern const struct file_operations grsec_fops;
27241+#endif
27242+
27243 static inline unsigned long size_inside_page(unsigned long start,
27244 unsigned long size)
27245 {
27246@@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27247
27248 while (cursor < to) {
27249 if (!devmem_is_allowed(pfn)) {
27250+#ifdef CONFIG_GRKERNSEC_KMEM
27251+ gr_handle_mem_readwrite(from, to);
27252+#else
27253 printk(KERN_INFO
27254 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27255 current->comm, from, to);
27256+#endif
27257 return 0;
27258 }
27259 cursor += PAGE_SIZE;
27260@@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27261 }
27262 return 1;
27263 }
27264+#elif defined(CONFIG_GRKERNSEC_KMEM)
27265+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27266+{
27267+ return 0;
27268+}
27269 #else
27270 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27271 {
27272@@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27273
27274 while (count > 0) {
27275 unsigned long remaining;
27276+ char *temp;
27277
27278 sz = size_inside_page(p, count);
27279
27280@@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27281 if (!ptr)
27282 return -EFAULT;
27283
27284- remaining = copy_to_user(buf, ptr, sz);
27285+#ifdef CONFIG_PAX_USERCOPY
27286+ temp = kmalloc(sz, GFP_KERNEL);
27287+ if (!temp) {
27288+ unxlate_dev_mem_ptr(p, ptr);
27289+ return -ENOMEM;
27290+ }
27291+ memcpy(temp, ptr, sz);
27292+#else
27293+ temp = ptr;
27294+#endif
27295+
27296+ remaining = copy_to_user(buf, temp, sz);
27297+
27298+#ifdef CONFIG_PAX_USERCOPY
27299+ kfree(temp);
27300+#endif
27301+
27302 unxlate_dev_mem_ptr(p, ptr);
27303 if (remaining)
27304 return -EFAULT;
27305@@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27306 size_t count, loff_t *ppos)
27307 {
27308 unsigned long p = *ppos;
27309- ssize_t low_count, read, sz;
27310+ ssize_t low_count, read, sz, err = 0;
27311 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
27312- int err = 0;
27313
27314 read = 0;
27315 if (p < (unsigned long) high_memory) {
27316@@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27317 }
27318 #endif
27319 while (low_count > 0) {
27320+ char *temp;
27321+
27322 sz = size_inside_page(p, low_count);
27323
27324 /*
27325@@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27326 */
27327 kbuf = xlate_dev_kmem_ptr((char *)p);
27328
27329- if (copy_to_user(buf, kbuf, sz))
27330+#ifdef CONFIG_PAX_USERCOPY
27331+ temp = kmalloc(sz, GFP_KERNEL);
27332+ if (!temp)
27333+ return -ENOMEM;
27334+ memcpy(temp, kbuf, sz);
27335+#else
27336+ temp = kbuf;
27337+#endif
27338+
27339+ err = copy_to_user(buf, temp, sz);
27340+
27341+#ifdef CONFIG_PAX_USERCOPY
27342+ kfree(temp);
27343+#endif
27344+
27345+ if (err)
27346 return -EFAULT;
27347 buf += sz;
27348 p += sz;
27349@@ -867,6 +914,9 @@ static const struct memdev {
27350 #ifdef CONFIG_CRASH_DUMP
27351 [12] = { "oldmem", 0, &oldmem_fops, NULL },
27352 #endif
27353+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27354+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
27355+#endif
27356 };
27357
27358 static int memory_open(struct inode *inode, struct file *filp)
27359diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
27360index da3cfee..a5a6606 100644
27361--- a/drivers/char/nvram.c
27362+++ b/drivers/char/nvram.c
27363@@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
27364
27365 spin_unlock_irq(&rtc_lock);
27366
27367- if (copy_to_user(buf, contents, tmp - contents))
27368+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
27369 return -EFAULT;
27370
27371 *ppos = i;
27372diff --git a/drivers/char/random.c b/drivers/char/random.c
27373index 6035ab8..bdfe4fd 100644
27374--- a/drivers/char/random.c
27375+++ b/drivers/char/random.c
27376@@ -261,8 +261,13 @@
27377 /*
27378 * Configuration information
27379 */
27380+#ifdef CONFIG_GRKERNSEC_RANDNET
27381+#define INPUT_POOL_WORDS 512
27382+#define OUTPUT_POOL_WORDS 128
27383+#else
27384 #define INPUT_POOL_WORDS 128
27385 #define OUTPUT_POOL_WORDS 32
27386+#endif
27387 #define SEC_XFER_SIZE 512
27388 #define EXTRACT_SIZE 10
27389
27390@@ -300,10 +305,17 @@ static struct poolinfo {
27391 int poolwords;
27392 int tap1, tap2, tap3, tap4, tap5;
27393 } poolinfo_table[] = {
27394+#ifdef CONFIG_GRKERNSEC_RANDNET
27395+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
27396+ { 512, 411, 308, 208, 104, 1 },
27397+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
27398+ { 128, 103, 76, 51, 25, 1 },
27399+#else
27400 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
27401 { 128, 103, 76, 51, 25, 1 },
27402 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
27403 { 32, 26, 20, 14, 7, 1 },
27404+#endif
27405 #if 0
27406 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
27407 { 2048, 1638, 1231, 819, 411, 1 },
27408@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
27409
27410 extract_buf(r, tmp);
27411 i = min_t(int, nbytes, EXTRACT_SIZE);
27412- if (copy_to_user(buf, tmp, i)) {
27413+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
27414 ret = -EFAULT;
27415 break;
27416 }
27417@@ -1228,7 +1240,7 @@ EXPORT_SYMBOL(generate_random_uuid);
27418 #include <linux/sysctl.h>
27419
27420 static int min_read_thresh = 8, min_write_thresh;
27421-static int max_read_thresh = INPUT_POOL_WORDS * 32;
27422+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
27423 static int max_write_thresh = INPUT_POOL_WORDS * 32;
27424 static char sysctl_bootid[16];
27425
27426diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
27427index 1ee8ce7..b778bef 100644
27428--- a/drivers/char/sonypi.c
27429+++ b/drivers/char/sonypi.c
27430@@ -55,6 +55,7 @@
27431 #include <asm/uaccess.h>
27432 #include <asm/io.h>
27433 #include <asm/system.h>
27434+#include <asm/local.h>
27435
27436 #include <linux/sonypi.h>
27437
27438@@ -491,7 +492,7 @@ static struct sonypi_device {
27439 spinlock_t fifo_lock;
27440 wait_queue_head_t fifo_proc_list;
27441 struct fasync_struct *fifo_async;
27442- int open_count;
27443+ local_t open_count;
27444 int model;
27445 struct input_dev *input_jog_dev;
27446 struct input_dev *input_key_dev;
27447@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
27448 static int sonypi_misc_release(struct inode *inode, struct file *file)
27449 {
27450 mutex_lock(&sonypi_device.lock);
27451- sonypi_device.open_count--;
27452+ local_dec(&sonypi_device.open_count);
27453 mutex_unlock(&sonypi_device.lock);
27454 return 0;
27455 }
27456@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
27457 {
27458 mutex_lock(&sonypi_device.lock);
27459 /* Flush input queue on first open */
27460- if (!sonypi_device.open_count)
27461+ if (!local_read(&sonypi_device.open_count))
27462 kfifo_reset(&sonypi_device.fifo);
27463- sonypi_device.open_count++;
27464+ local_inc(&sonypi_device.open_count);
27465 mutex_unlock(&sonypi_device.lock);
27466
27467 return 0;
27468diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
27469index 361a1df..2471eee 100644
27470--- a/drivers/char/tpm/tpm.c
27471+++ b/drivers/char/tpm/tpm.c
27472@@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
27473 chip->vendor.req_complete_val)
27474 goto out_recv;
27475
27476- if ((status == chip->vendor.req_canceled)) {
27477+ if (status == chip->vendor.req_canceled) {
27478 dev_err(chip->dev, "Operation Canceled\n");
27479 rc = -ECANCELED;
27480 goto out;
27481diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
27482index 0636520..169c1d0 100644
27483--- a/drivers/char/tpm/tpm_bios.c
27484+++ b/drivers/char/tpm/tpm_bios.c
27485@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
27486 event = addr;
27487
27488 if ((event->event_type == 0 && event->event_size == 0) ||
27489- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27490+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27491 return NULL;
27492
27493 return addr;
27494@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
27495 return NULL;
27496
27497 if ((event->event_type == 0 && event->event_size == 0) ||
27498- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27499+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27500 return NULL;
27501
27502 (*pos)++;
27503@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
27504 int i;
27505
27506 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27507- seq_putc(m, data[i]);
27508+ if (!seq_putc(m, data[i]))
27509+ return -EFAULT;
27510
27511 return 0;
27512 }
27513@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
27514 log->bios_event_log_end = log->bios_event_log + len;
27515
27516 virt = acpi_os_map_memory(start, len);
27517+ if (!virt) {
27518+ kfree(log->bios_event_log);
27519+ log->bios_event_log = NULL;
27520+ return -EFAULT;
27521+ }
27522
27523- memcpy(log->bios_event_log, virt, len);
27524+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
27525
27526 acpi_os_unmap_memory(virt, len);
27527 return 0;
27528diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
27529index 8e3c46d..c139b99 100644
27530--- a/drivers/char/virtio_console.c
27531+++ b/drivers/char/virtio_console.c
27532@@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
27533 if (to_user) {
27534 ssize_t ret;
27535
27536- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
27537+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
27538 if (ret)
27539 return -EFAULT;
27540 } else {
27541@@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
27542 if (!port_has_data(port) && !port->host_connected)
27543 return 0;
27544
27545- return fill_readbuf(port, ubuf, count, true);
27546+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
27547 }
27548
27549 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
27550diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
27551index eb1d864..39ee5a7 100644
27552--- a/drivers/dma/dmatest.c
27553+++ b/drivers/dma/dmatest.c
27554@@ -591,7 +591,7 @@ static int dmatest_add_channel(struct dma_chan *chan)
27555 }
27556 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
27557 cnt = dmatest_add_threads(dtc, DMA_PQ);
27558- thread_count += cnt > 0 ?: 0;
27559+ thread_count += cnt > 0 ? cnt : 0;
27560 }
27561
27562 pr_info("dmatest: Started %u threads using %s\n",
27563diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
27564index c9eee6d..f9d5280 100644
27565--- a/drivers/edac/amd64_edac.c
27566+++ b/drivers/edac/amd64_edac.c
27567@@ -2685,7 +2685,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
27568 * PCI core identifies what devices are on a system during boot, and then
27569 * inquiry this table to see if this driver is for a given device found.
27570 */
27571-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
27572+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
27573 {
27574 .vendor = PCI_VENDOR_ID_AMD,
27575 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
27576diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
27577index e47e73b..348e0bd 100644
27578--- a/drivers/edac/amd76x_edac.c
27579+++ b/drivers/edac/amd76x_edac.c
27580@@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
27581 edac_mc_free(mci);
27582 }
27583
27584-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
27585+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
27586 {
27587 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27588 AMD762},
27589diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
27590index 1af531a..3a8ff27 100644
27591--- a/drivers/edac/e752x_edac.c
27592+++ b/drivers/edac/e752x_edac.c
27593@@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
27594 edac_mc_free(mci);
27595 }
27596
27597-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
27598+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
27599 {
27600 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27601 E7520},
27602diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
27603index 6ffb6d2..383d8d7 100644
27604--- a/drivers/edac/e7xxx_edac.c
27605+++ b/drivers/edac/e7xxx_edac.c
27606@@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
27607 edac_mc_free(mci);
27608 }
27609
27610-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
27611+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
27612 {
27613 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27614 E7205},
27615diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
27616index 495198a..ac08c85 100644
27617--- a/drivers/edac/edac_pci_sysfs.c
27618+++ b/drivers/edac/edac_pci_sysfs.c
27619@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
27620 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
27621 static int edac_pci_poll_msec = 1000; /* one second workq period */
27622
27623-static atomic_t pci_parity_count = ATOMIC_INIT(0);
27624-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
27625+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
27626+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
27627
27628 static struct kobject *edac_pci_top_main_kobj;
27629 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
27630@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27631 edac_printk(KERN_CRIT, EDAC_PCI,
27632 "Signaled System Error on %s\n",
27633 pci_name(dev));
27634- atomic_inc(&pci_nonparity_count);
27635+ atomic_inc_unchecked(&pci_nonparity_count);
27636 }
27637
27638 if (status & (PCI_STATUS_PARITY)) {
27639@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27640 "Master Data Parity Error on %s\n",
27641 pci_name(dev));
27642
27643- atomic_inc(&pci_parity_count);
27644+ atomic_inc_unchecked(&pci_parity_count);
27645 }
27646
27647 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27648@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27649 "Detected Parity Error on %s\n",
27650 pci_name(dev));
27651
27652- atomic_inc(&pci_parity_count);
27653+ atomic_inc_unchecked(&pci_parity_count);
27654 }
27655 }
27656
27657@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27658 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
27659 "Signaled System Error on %s\n",
27660 pci_name(dev));
27661- atomic_inc(&pci_nonparity_count);
27662+ atomic_inc_unchecked(&pci_nonparity_count);
27663 }
27664
27665 if (status & (PCI_STATUS_PARITY)) {
27666@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27667 "Master Data Parity Error on "
27668 "%s\n", pci_name(dev));
27669
27670- atomic_inc(&pci_parity_count);
27671+ atomic_inc_unchecked(&pci_parity_count);
27672 }
27673
27674 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27675@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27676 "Detected Parity Error on %s\n",
27677 pci_name(dev));
27678
27679- atomic_inc(&pci_parity_count);
27680+ atomic_inc_unchecked(&pci_parity_count);
27681 }
27682 }
27683 }
27684@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
27685 if (!check_pci_errors)
27686 return;
27687
27688- before_count = atomic_read(&pci_parity_count);
27689+ before_count = atomic_read_unchecked(&pci_parity_count);
27690
27691 /* scan all PCI devices looking for a Parity Error on devices and
27692 * bridges.
27693@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
27694 /* Only if operator has selected panic on PCI Error */
27695 if (edac_pci_get_panic_on_pe()) {
27696 /* If the count is different 'after' from 'before' */
27697- if (before_count != atomic_read(&pci_parity_count))
27698+ if (before_count != atomic_read_unchecked(&pci_parity_count))
27699 panic("EDAC: PCI Parity Error");
27700 }
27701 }
27702diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
27703index c0510b3..6e2a954 100644
27704--- a/drivers/edac/i3000_edac.c
27705+++ b/drivers/edac/i3000_edac.c
27706@@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
27707 edac_mc_free(mci);
27708 }
27709
27710-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
27711+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
27712 {
27713 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27714 I3000},
27715diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
27716index aa08497..7e6822a 100644
27717--- a/drivers/edac/i3200_edac.c
27718+++ b/drivers/edac/i3200_edac.c
27719@@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
27720 edac_mc_free(mci);
27721 }
27722
27723-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
27724+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
27725 {
27726 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27727 I3200},
27728diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
27729index 4dc3ac2..67d05a6 100644
27730--- a/drivers/edac/i5000_edac.c
27731+++ b/drivers/edac/i5000_edac.c
27732@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
27733 *
27734 * The "E500P" device is the first device supported.
27735 */
27736-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
27737+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
27738 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
27739 .driver_data = I5000P},
27740
27741diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
27742index bcbdeec..9886d16 100644
27743--- a/drivers/edac/i5100_edac.c
27744+++ b/drivers/edac/i5100_edac.c
27745@@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
27746 edac_mc_free(mci);
27747 }
27748
27749-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
27750+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
27751 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
27752 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
27753 { 0, }
27754diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
27755index 74d6ec34..baff517 100644
27756--- a/drivers/edac/i5400_edac.c
27757+++ b/drivers/edac/i5400_edac.c
27758@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
27759 *
27760 * The "E500P" device is the first device supported.
27761 */
27762-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
27763+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
27764 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
27765 {0,} /* 0 terminated list. */
27766 };
27767diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
27768index 6104dba..e7ea8e1 100644
27769--- a/drivers/edac/i7300_edac.c
27770+++ b/drivers/edac/i7300_edac.c
27771@@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
27772 *
27773 * Has only 8086:360c PCI ID
27774 */
27775-static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
27776+static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
27777 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
27778 {0,} /* 0 terminated list. */
27779 };
27780diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
27781index 70ad892..178943c 100644
27782--- a/drivers/edac/i7core_edac.c
27783+++ b/drivers/edac/i7core_edac.c
27784@@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
27785 /*
27786 * pci_device_id table for which devices we are looking for
27787 */
27788-static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
27789+static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
27790 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
27791 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
27792 {0,} /* 0 terminated list. */
27793diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
27794index 4329d39..f3022ef 100644
27795--- a/drivers/edac/i82443bxgx_edac.c
27796+++ b/drivers/edac/i82443bxgx_edac.c
27797@@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
27798
27799 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
27800
27801-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
27802+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
27803 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
27804 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
27805 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
27806diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
27807index 931a057..fd28340 100644
27808--- a/drivers/edac/i82860_edac.c
27809+++ b/drivers/edac/i82860_edac.c
27810@@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
27811 edac_mc_free(mci);
27812 }
27813
27814-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
27815+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
27816 {
27817 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27818 I82860},
27819diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
27820index 33864c6..01edc61 100644
27821--- a/drivers/edac/i82875p_edac.c
27822+++ b/drivers/edac/i82875p_edac.c
27823@@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
27824 edac_mc_free(mci);
27825 }
27826
27827-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
27828+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
27829 {
27830 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27831 I82875P},
27832diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
27833index a5da732..983363b 100644
27834--- a/drivers/edac/i82975x_edac.c
27835+++ b/drivers/edac/i82975x_edac.c
27836@@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
27837 edac_mc_free(mci);
27838 }
27839
27840-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
27841+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
27842 {
27843 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27844 I82975X
27845diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
27846index 0106747..0b40417 100644
27847--- a/drivers/edac/mce_amd.h
27848+++ b/drivers/edac/mce_amd.h
27849@@ -83,7 +83,7 @@ struct amd_decoder_ops {
27850 bool (*dc_mce)(u16, u8);
27851 bool (*ic_mce)(u16, u8);
27852 bool (*nb_mce)(u16, u8);
27853-};
27854+} __no_const;
27855
27856 void amd_report_gart_errors(bool);
27857 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
27858diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
27859index b153674..ad2ba9b 100644
27860--- a/drivers/edac/r82600_edac.c
27861+++ b/drivers/edac/r82600_edac.c
27862@@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
27863 edac_mc_free(mci);
27864 }
27865
27866-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
27867+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
27868 {
27869 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
27870 },
27871diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
27872index 7a402bf..af0b211 100644
27873--- a/drivers/edac/sb_edac.c
27874+++ b/drivers/edac/sb_edac.c
27875@@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
27876 /*
27877 * pci_device_id table for which devices we are looking for
27878 */
27879-static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
27880+static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
27881 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
27882 {0,} /* 0 terminated list. */
27883 };
27884diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
27885index b6f47de..c5acf3a 100644
27886--- a/drivers/edac/x38_edac.c
27887+++ b/drivers/edac/x38_edac.c
27888@@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
27889 edac_mc_free(mci);
27890 }
27891
27892-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
27893+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
27894 {
27895 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27896 X38},
27897diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
27898index 85661b0..c784559a 100644
27899--- a/drivers/firewire/core-card.c
27900+++ b/drivers/firewire/core-card.c
27901@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
27902
27903 void fw_core_remove_card(struct fw_card *card)
27904 {
27905- struct fw_card_driver dummy_driver = dummy_driver_template;
27906+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
27907
27908 card->driver->update_phy_reg(card, 4,
27909 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
27910diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
27911index 4799393..37bd3ab 100644
27912--- a/drivers/firewire/core-cdev.c
27913+++ b/drivers/firewire/core-cdev.c
27914@@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
27915 int ret;
27916
27917 if ((request->channels == 0 && request->bandwidth == 0) ||
27918- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
27919- request->bandwidth < 0)
27920+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
27921 return -EINVAL;
27922
27923 r = kmalloc(sizeof(*r), GFP_KERNEL);
27924diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
27925index 855ab3f..11f4bbd 100644
27926--- a/drivers/firewire/core-transaction.c
27927+++ b/drivers/firewire/core-transaction.c
27928@@ -37,6 +37,7 @@
27929 #include <linux/timer.h>
27930 #include <linux/types.h>
27931 #include <linux/workqueue.h>
27932+#include <linux/sched.h>
27933
27934 #include <asm/byteorder.h>
27935
27936diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
27937index b45be57..5fad18b 100644
27938--- a/drivers/firewire/core.h
27939+++ b/drivers/firewire/core.h
27940@@ -101,6 +101,7 @@ struct fw_card_driver {
27941
27942 int (*stop_iso)(struct fw_iso_context *ctx);
27943 };
27944+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
27945
27946 void fw_card_initialize(struct fw_card *card,
27947 const struct fw_card_driver *driver, struct device *device);
27948diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
27949index 153980b..4b4d046 100644
27950--- a/drivers/firmware/dmi_scan.c
27951+++ b/drivers/firmware/dmi_scan.c
27952@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
27953 }
27954 }
27955 else {
27956- /*
27957- * no iounmap() for that ioremap(); it would be a no-op, but
27958- * it's so early in setup that sucker gets confused into doing
27959- * what it shouldn't if we actually call it.
27960- */
27961 p = dmi_ioremap(0xF0000, 0x10000);
27962 if (p == NULL)
27963 goto error;
27964@@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
27965 if (buf == NULL)
27966 return -1;
27967
27968- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
27969+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
27970
27971 iounmap(buf);
27972 return 0;
27973diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
27974index 98723cb..10ca85b 100644
27975--- a/drivers/gpio/gpio-vr41xx.c
27976+++ b/drivers/gpio/gpio-vr41xx.c
27977@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
27978 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
27979 maskl, pendl, maskh, pendh);
27980
27981- atomic_inc(&irq_err_count);
27982+ atomic_inc_unchecked(&irq_err_count);
27983
27984 return -EINVAL;
27985 }
27986diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
27987index 8323fc3..5c1d755 100644
27988--- a/drivers/gpu/drm/drm_crtc.c
27989+++ b/drivers/gpu/drm/drm_crtc.c
27990@@ -1379,7 +1379,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27991 */
27992 if ((out_resp->count_modes >= mode_count) && mode_count) {
27993 copied = 0;
27994- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
27995+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
27996 list_for_each_entry(mode, &connector->modes, head) {
27997 drm_crtc_convert_to_umode(&u_mode, mode);
27998 if (copy_to_user(mode_ptr + copied,
27999@@ -1394,8 +1394,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
28000
28001 if ((out_resp->count_props >= props_count) && props_count) {
28002 copied = 0;
28003- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
28004- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
28005+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
28006+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
28007 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
28008 if (connector->property_ids[i] != 0) {
28009 if (put_user(connector->property_ids[i],
28010@@ -1417,7 +1417,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
28011
28012 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
28013 copied = 0;
28014- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
28015+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
28016 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
28017 if (connector->encoder_ids[i] != 0) {
28018 if (put_user(connector->encoder_ids[i],
28019@@ -1576,7 +1576,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
28020 }
28021
28022 for (i = 0; i < crtc_req->count_connectors; i++) {
28023- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
28024+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
28025 if (get_user(out_id, &set_connectors_ptr[i])) {
28026 ret = -EFAULT;
28027 goto out;
28028@@ -1857,7 +1857,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
28029 fb = obj_to_fb(obj);
28030
28031 num_clips = r->num_clips;
28032- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
28033+ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
28034
28035 if (!num_clips != !clips_ptr) {
28036 ret = -EINVAL;
28037@@ -2283,7 +2283,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
28038 out_resp->flags = property->flags;
28039
28040 if ((out_resp->count_values >= value_count) && value_count) {
28041- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
28042+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
28043 for (i = 0; i < value_count; i++) {
28044 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
28045 ret = -EFAULT;
28046@@ -2296,7 +2296,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
28047 if (property->flags & DRM_MODE_PROP_ENUM) {
28048 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
28049 copied = 0;
28050- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
28051+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
28052 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
28053
28054 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
28055@@ -2319,7 +2319,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
28056 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
28057 copied = 0;
28058 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
28059- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
28060+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
28061
28062 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
28063 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
28064@@ -2380,7 +2380,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
28065 struct drm_mode_get_blob *out_resp = data;
28066 struct drm_property_blob *blob;
28067 int ret = 0;
28068- void *blob_ptr;
28069+ void __user *blob_ptr;
28070
28071 if (!drm_core_check_feature(dev, DRIVER_MODESET))
28072 return -EINVAL;
28073@@ -2394,7 +2394,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
28074 blob = obj_to_blob(obj);
28075
28076 if (out_resp->length == blob->length) {
28077- blob_ptr = (void *)(unsigned long)out_resp->data;
28078+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
28079 if (copy_to_user(blob_ptr, blob->data, blob->length)){
28080 ret = -EFAULT;
28081 goto done;
28082diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
28083index d2619d7..bd6bd00 100644
28084--- a/drivers/gpu/drm/drm_crtc_helper.c
28085+++ b/drivers/gpu/drm/drm_crtc_helper.c
28086@@ -279,7 +279,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
28087 struct drm_crtc *tmp;
28088 int crtc_mask = 1;
28089
28090- WARN(!crtc, "checking null crtc?\n");
28091+ BUG_ON(!crtc);
28092
28093 dev = crtc->dev;
28094
28095diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
28096index 40c187c..5746164 100644
28097--- a/drivers/gpu/drm/drm_drv.c
28098+++ b/drivers/gpu/drm/drm_drv.c
28099@@ -308,7 +308,7 @@ module_exit(drm_core_exit);
28100 /**
28101 * Copy and IOCTL return string to user space
28102 */
28103-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
28104+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
28105 {
28106 int len;
28107
28108@@ -387,7 +387,7 @@ long drm_ioctl(struct file *filp,
28109
28110 dev = file_priv->minor->dev;
28111 atomic_inc(&dev->ioctl_count);
28112- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
28113+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
28114 ++file_priv->ioctl_count;
28115
28116 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
28117diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
28118index 828bf65..cdaa0e9 100644
28119--- a/drivers/gpu/drm/drm_fops.c
28120+++ b/drivers/gpu/drm/drm_fops.c
28121@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
28122 }
28123
28124 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
28125- atomic_set(&dev->counts[i], 0);
28126+ atomic_set_unchecked(&dev->counts[i], 0);
28127
28128 dev->sigdata.lock = NULL;
28129
28130@@ -135,8 +135,8 @@ int drm_open(struct inode *inode, struct file *filp)
28131
28132 retcode = drm_open_helper(inode, filp, dev);
28133 if (!retcode) {
28134- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
28135- if (!dev->open_count++)
28136+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
28137+ if (local_inc_return(&dev->open_count) == 1)
28138 retcode = drm_setup(dev);
28139 }
28140 if (!retcode) {
28141@@ -473,7 +473,7 @@ int drm_release(struct inode *inode, struct file *filp)
28142
28143 mutex_lock(&drm_global_mutex);
28144
28145- DRM_DEBUG("open_count = %d\n", dev->open_count);
28146+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
28147
28148 if (dev->driver->preclose)
28149 dev->driver->preclose(dev, file_priv);
28150@@ -485,7 +485,7 @@ int drm_release(struct inode *inode, struct file *filp)
28151 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
28152 task_pid_nr(current),
28153 (long)old_encode_dev(file_priv->minor->device),
28154- dev->open_count);
28155+ local_read(&dev->open_count));
28156
28157 /* Release any auth tokens that might point to this file_priv,
28158 (do that under the drm_global_mutex) */
28159@@ -571,8 +571,8 @@ int drm_release(struct inode *inode, struct file *filp)
28160 * End inline drm_release
28161 */
28162
28163- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
28164- if (!--dev->open_count) {
28165+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
28166+ if (local_dec_and_test(&dev->open_count)) {
28167 if (atomic_read(&dev->ioctl_count)) {
28168 DRM_ERROR("Device busy: %d\n",
28169 atomic_read(&dev->ioctl_count));
28170diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
28171index c87dc96..326055d 100644
28172--- a/drivers/gpu/drm/drm_global.c
28173+++ b/drivers/gpu/drm/drm_global.c
28174@@ -36,7 +36,7 @@
28175 struct drm_global_item {
28176 struct mutex mutex;
28177 void *object;
28178- int refcount;
28179+ atomic_t refcount;
28180 };
28181
28182 static struct drm_global_item glob[DRM_GLOBAL_NUM];
28183@@ -49,7 +49,7 @@ void drm_global_init(void)
28184 struct drm_global_item *item = &glob[i];
28185 mutex_init(&item->mutex);
28186 item->object = NULL;
28187- item->refcount = 0;
28188+ atomic_set(&item->refcount, 0);
28189 }
28190 }
28191
28192@@ -59,7 +59,7 @@ void drm_global_release(void)
28193 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
28194 struct drm_global_item *item = &glob[i];
28195 BUG_ON(item->object != NULL);
28196- BUG_ON(item->refcount != 0);
28197+ BUG_ON(atomic_read(&item->refcount) != 0);
28198 }
28199 }
28200
28201@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
28202 void *object;
28203
28204 mutex_lock(&item->mutex);
28205- if (item->refcount == 0) {
28206+ if (atomic_read(&item->refcount) == 0) {
28207 item->object = kzalloc(ref->size, GFP_KERNEL);
28208 if (unlikely(item->object == NULL)) {
28209 ret = -ENOMEM;
28210@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
28211 goto out_err;
28212
28213 }
28214- ++item->refcount;
28215+ atomic_inc(&item->refcount);
28216 ref->object = item->object;
28217 object = item->object;
28218 mutex_unlock(&item->mutex);
28219@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
28220 struct drm_global_item *item = &glob[ref->global_type];
28221
28222 mutex_lock(&item->mutex);
28223- BUG_ON(item->refcount == 0);
28224+ BUG_ON(atomic_read(&item->refcount) == 0);
28225 BUG_ON(ref->object != item->object);
28226- if (--item->refcount == 0) {
28227+ if (atomic_dec_and_test(&item->refcount)) {
28228 ref->release(ref);
28229 item->object = NULL;
28230 }
28231diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
28232index ab1162d..42587b2 100644
28233--- a/drivers/gpu/drm/drm_info.c
28234+++ b/drivers/gpu/drm/drm_info.c
28235@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
28236 struct drm_local_map *map;
28237 struct drm_map_list *r_list;
28238
28239- /* Hardcoded from _DRM_FRAME_BUFFER,
28240- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
28241- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
28242- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
28243+ static const char * const types[] = {
28244+ [_DRM_FRAME_BUFFER] = "FB",
28245+ [_DRM_REGISTERS] = "REG",
28246+ [_DRM_SHM] = "SHM",
28247+ [_DRM_AGP] = "AGP",
28248+ [_DRM_SCATTER_GATHER] = "SG",
28249+ [_DRM_CONSISTENT] = "PCI",
28250+ [_DRM_GEM] = "GEM" };
28251 const char *type;
28252 int i;
28253
28254@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
28255 map = r_list->map;
28256 if (!map)
28257 continue;
28258- if (map->type < 0 || map->type > 5)
28259+ if (map->type >= ARRAY_SIZE(types))
28260 type = "??";
28261 else
28262 type = types[map->type];
28263@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
28264 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
28265 vma->vm_flags & VM_LOCKED ? 'l' : '-',
28266 vma->vm_flags & VM_IO ? 'i' : '-',
28267+#ifdef CONFIG_GRKERNSEC_HIDESYM
28268+ 0);
28269+#else
28270 vma->vm_pgoff);
28271+#endif
28272
28273 #if defined(__i386__)
28274 pgprot = pgprot_val(vma->vm_page_prot);
28275diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
28276index ddd70db..40321e6 100644
28277--- a/drivers/gpu/drm/drm_ioc32.c
28278+++ b/drivers/gpu/drm/drm_ioc32.c
28279@@ -456,7 +456,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
28280 request = compat_alloc_user_space(nbytes);
28281 if (!access_ok(VERIFY_WRITE, request, nbytes))
28282 return -EFAULT;
28283- list = (struct drm_buf_desc *) (request + 1);
28284+ list = (struct drm_buf_desc __user *) (request + 1);
28285
28286 if (__put_user(count, &request->count)
28287 || __put_user(list, &request->list))
28288@@ -517,7 +517,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
28289 request = compat_alloc_user_space(nbytes);
28290 if (!access_ok(VERIFY_WRITE, request, nbytes))
28291 return -EFAULT;
28292- list = (struct drm_buf_pub *) (request + 1);
28293+ list = (struct drm_buf_pub __user *) (request + 1);
28294
28295 if (__put_user(count, &request->count)
28296 || __put_user(list, &request->list))
28297diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
28298index 904d7e9..ab88581 100644
28299--- a/drivers/gpu/drm/drm_ioctl.c
28300+++ b/drivers/gpu/drm/drm_ioctl.c
28301@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev, void *data,
28302 stats->data[i].value =
28303 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
28304 else
28305- stats->data[i].value = atomic_read(&dev->counts[i]);
28306+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
28307 stats->data[i].type = dev->types[i];
28308 }
28309
28310diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
28311index 632ae24..244cf4a 100644
28312--- a/drivers/gpu/drm/drm_lock.c
28313+++ b/drivers/gpu/drm/drm_lock.c
28314@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28315 if (drm_lock_take(&master->lock, lock->context)) {
28316 master->lock.file_priv = file_priv;
28317 master->lock.lock_time = jiffies;
28318- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
28319+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
28320 break; /* Got lock */
28321 }
28322
28323@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28324 return -EINVAL;
28325 }
28326
28327- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
28328+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
28329
28330 if (drm_lock_free(&master->lock, lock->context)) {
28331 /* FIXME: Should really bail out here. */
28332diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
28333index 8f371e8..9f85d52 100644
28334--- a/drivers/gpu/drm/i810/i810_dma.c
28335+++ b/drivers/gpu/drm/i810/i810_dma.c
28336@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
28337 dma->buflist[vertex->idx],
28338 vertex->discard, vertex->used);
28339
28340- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28341- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28342+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28343+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28344 sarea_priv->last_enqueue = dev_priv->counter - 1;
28345 sarea_priv->last_dispatch = (int)hw_status[5];
28346
28347@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
28348 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
28349 mc->last_render);
28350
28351- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28352- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28353+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28354+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28355 sarea_priv->last_enqueue = dev_priv->counter - 1;
28356 sarea_priv->last_dispatch = (int)hw_status[5];
28357
28358diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
28359index c9339f4..f5e1b9d 100644
28360--- a/drivers/gpu/drm/i810/i810_drv.h
28361+++ b/drivers/gpu/drm/i810/i810_drv.h
28362@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
28363 int page_flipping;
28364
28365 wait_queue_head_t irq_queue;
28366- atomic_t irq_received;
28367- atomic_t irq_emitted;
28368+ atomic_unchecked_t irq_received;
28369+ atomic_unchecked_t irq_emitted;
28370
28371 int front_offset;
28372 } drm_i810_private_t;
28373diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
28374index b2e3c97..58cf079 100644
28375--- a/drivers/gpu/drm/i915/i915_debugfs.c
28376+++ b/drivers/gpu/drm/i915/i915_debugfs.c
28377@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
28378 I915_READ(GTIMR));
28379 }
28380 seq_printf(m, "Interrupts received: %d\n",
28381- atomic_read(&dev_priv->irq_received));
28382+ atomic_read_unchecked(&dev_priv->irq_received));
28383 for (i = 0; i < I915_NUM_RINGS; i++) {
28384 if (IS_GEN6(dev) || IS_GEN7(dev)) {
28385 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
28386@@ -1232,7 +1232,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
28387 return ret;
28388
28389 if (opregion->header)
28390- seq_write(m, opregion->header, OPREGION_SIZE);
28391+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
28392
28393 mutex_unlock(&dev->struct_mutex);
28394
28395diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
28396index c4da951..3c59c5c 100644
28397--- a/drivers/gpu/drm/i915/i915_dma.c
28398+++ b/drivers/gpu/drm/i915/i915_dma.c
28399@@ -1172,7 +1172,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
28400 bool can_switch;
28401
28402 spin_lock(&dev->count_lock);
28403- can_switch = (dev->open_count == 0);
28404+ can_switch = (local_read(&dev->open_count) == 0);
28405 spin_unlock(&dev->count_lock);
28406 return can_switch;
28407 }
28408diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
28409index ae294a0..1755461 100644
28410--- a/drivers/gpu/drm/i915/i915_drv.h
28411+++ b/drivers/gpu/drm/i915/i915_drv.h
28412@@ -229,7 +229,7 @@ struct drm_i915_display_funcs {
28413 /* render clock increase/decrease */
28414 /* display clock increase/decrease */
28415 /* pll clock increase/decrease */
28416-};
28417+} __no_const;
28418
28419 struct intel_device_info {
28420 u8 gen;
28421@@ -318,7 +318,7 @@ typedef struct drm_i915_private {
28422 int current_page;
28423 int page_flipping;
28424
28425- atomic_t irq_received;
28426+ atomic_unchecked_t irq_received;
28427
28428 /* protects the irq masks */
28429 spinlock_t irq_lock;
28430@@ -893,7 +893,7 @@ struct drm_i915_gem_object {
28431 * will be page flipped away on the next vblank. When it
28432 * reaches 0, dev_priv->pending_flip_queue will be woken up.
28433 */
28434- atomic_t pending_flip;
28435+ atomic_unchecked_t pending_flip;
28436 };
28437
28438 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
28439@@ -1273,7 +1273,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
28440 extern void intel_teardown_gmbus(struct drm_device *dev);
28441 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
28442 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
28443-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28444+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28445 {
28446 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
28447 }
28448diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28449index b9da890..cad1d98 100644
28450--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28451+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28452@@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
28453 i915_gem_clflush_object(obj);
28454
28455 if (obj->base.pending_write_domain)
28456- cd->flips |= atomic_read(&obj->pending_flip);
28457+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
28458
28459 /* The actual obj->write_domain will be updated with
28460 * pending_write_domain after we emit the accumulated flush for all
28461@@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
28462
28463 static int
28464 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
28465- int count)
28466+ unsigned int count)
28467 {
28468- int i;
28469+ unsigned int i;
28470
28471 for (i = 0; i < count; i++) {
28472 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
28473diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
28474index d47a53b..61154c2 100644
28475--- a/drivers/gpu/drm/i915/i915_irq.c
28476+++ b/drivers/gpu/drm/i915/i915_irq.c
28477@@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
28478 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
28479 struct drm_i915_master_private *master_priv;
28480
28481- atomic_inc(&dev_priv->irq_received);
28482+ atomic_inc_unchecked(&dev_priv->irq_received);
28483
28484 /* disable master interrupt before clearing iir */
28485 de_ier = I915_READ(DEIER);
28486@@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
28487 struct drm_i915_master_private *master_priv;
28488 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
28489
28490- atomic_inc(&dev_priv->irq_received);
28491+ atomic_inc_unchecked(&dev_priv->irq_received);
28492
28493 if (IS_GEN6(dev))
28494 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
28495@@ -1231,7 +1231,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
28496 int ret = IRQ_NONE, pipe;
28497 bool blc_event = false;
28498
28499- atomic_inc(&dev_priv->irq_received);
28500+ atomic_inc_unchecked(&dev_priv->irq_received);
28501
28502 iir = I915_READ(IIR);
28503
28504@@ -1750,7 +1750,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
28505 {
28506 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28507
28508- atomic_set(&dev_priv->irq_received, 0);
28509+ atomic_set_unchecked(&dev_priv->irq_received, 0);
28510
28511 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28512 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28513@@ -1938,7 +1938,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
28514 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28515 int pipe;
28516
28517- atomic_set(&dev_priv->irq_received, 0);
28518+ atomic_set_unchecked(&dev_priv->irq_received, 0);
28519
28520 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28521 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28522diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
28523index daa5743..c0757a9 100644
28524--- a/drivers/gpu/drm/i915/intel_display.c
28525+++ b/drivers/gpu/drm/i915/intel_display.c
28526@@ -2230,7 +2230,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
28527
28528 wait_event(dev_priv->pending_flip_queue,
28529 atomic_read(&dev_priv->mm.wedged) ||
28530- atomic_read(&obj->pending_flip) == 0);
28531+ atomic_read_unchecked(&obj->pending_flip) == 0);
28532
28533 /* Big Hammer, we also need to ensure that any pending
28534 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
28535@@ -2851,7 +2851,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
28536 obj = to_intel_framebuffer(crtc->fb)->obj;
28537 dev_priv = crtc->dev->dev_private;
28538 wait_event(dev_priv->pending_flip_queue,
28539- atomic_read(&obj->pending_flip) == 0);
28540+ atomic_read_unchecked(&obj->pending_flip) == 0);
28541 }
28542
28543 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
28544@@ -6952,7 +6952,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
28545
28546 atomic_clear_mask(1 << intel_crtc->plane,
28547 &obj->pending_flip.counter);
28548- if (atomic_read(&obj->pending_flip) == 0)
28549+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
28550 wake_up(&dev_priv->pending_flip_queue);
28551
28552 schedule_work(&work->work);
28553@@ -7242,7 +7242,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28554 /* Block clients from rendering to the new back buffer until
28555 * the flip occurs and the object is no longer visible.
28556 */
28557- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28558+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28559
28560 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
28561 if (ret)
28562@@ -7256,7 +7256,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28563 return 0;
28564
28565 cleanup_pending:
28566- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28567+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28568 drm_gem_object_unreference(&work->old_fb_obj->base);
28569 drm_gem_object_unreference(&obj->base);
28570 mutex_unlock(&dev->struct_mutex);
28571diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
28572index 54558a0..2d97005 100644
28573--- a/drivers/gpu/drm/mga/mga_drv.h
28574+++ b/drivers/gpu/drm/mga/mga_drv.h
28575@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28576 u32 clear_cmd;
28577 u32 maccess;
28578
28579- atomic_t vbl_received; /**< Number of vblanks received. */
28580+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28581 wait_queue_head_t fence_queue;
28582- atomic_t last_fence_retired;
28583+ atomic_unchecked_t last_fence_retired;
28584 u32 next_fence_to_post;
28585
28586 unsigned int fb_cpp;
28587diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
28588index 2581202..f230a8d9 100644
28589--- a/drivers/gpu/drm/mga/mga_irq.c
28590+++ b/drivers/gpu/drm/mga/mga_irq.c
28591@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
28592 if (crtc != 0)
28593 return 0;
28594
28595- return atomic_read(&dev_priv->vbl_received);
28596+ return atomic_read_unchecked(&dev_priv->vbl_received);
28597 }
28598
28599
28600@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28601 /* VBLANK interrupt */
28602 if (status & MGA_VLINEPEN) {
28603 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28604- atomic_inc(&dev_priv->vbl_received);
28605+ atomic_inc_unchecked(&dev_priv->vbl_received);
28606 drm_handle_vblank(dev, 0);
28607 handled = 1;
28608 }
28609@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28610 if ((prim_start & ~0x03) != (prim_end & ~0x03))
28611 MGA_WRITE(MGA_PRIMEND, prim_end);
28612
28613- atomic_inc(&dev_priv->last_fence_retired);
28614+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
28615 DRM_WAKEUP(&dev_priv->fence_queue);
28616 handled = 1;
28617 }
28618@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
28619 * using fences.
28620 */
28621 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28622- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28623+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28624 - *sequence) <= (1 << 23)));
28625
28626 *sequence = cur_fence;
28627diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
28628index 5fc201b..7b032b9 100644
28629--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
28630+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
28631@@ -201,7 +201,7 @@ struct methods {
28632 const char desc[8];
28633 void (*loadbios)(struct drm_device *, uint8_t *);
28634 const bool rw;
28635-};
28636+} __do_const;
28637
28638 static struct methods shadow_methods[] = {
28639 { "PRAMIN", load_vbios_pramin, true },
28640@@ -5474,7 +5474,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
28641 struct bit_table {
28642 const char id;
28643 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
28644-};
28645+} __no_const;
28646
28647 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
28648
28649diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
28650index 4c0be3a..5757582 100644
28651--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
28652+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
28653@@ -238,7 +238,7 @@ struct nouveau_channel {
28654 struct list_head pending;
28655 uint32_t sequence;
28656 uint32_t sequence_ack;
28657- atomic_t last_sequence_irq;
28658+ atomic_unchecked_t last_sequence_irq;
28659 struct nouveau_vma vma;
28660 } fence;
28661
28662@@ -319,7 +319,7 @@ struct nouveau_exec_engine {
28663 u32 handle, u16 class);
28664 void (*set_tile_region)(struct drm_device *dev, int i);
28665 void (*tlb_flush)(struct drm_device *, int engine);
28666-};
28667+} __no_const;
28668
28669 struct nouveau_instmem_engine {
28670 void *priv;
28671@@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
28672 struct nouveau_mc_engine {
28673 int (*init)(struct drm_device *dev);
28674 void (*takedown)(struct drm_device *dev);
28675-};
28676+} __no_const;
28677
28678 struct nouveau_timer_engine {
28679 int (*init)(struct drm_device *dev);
28680 void (*takedown)(struct drm_device *dev);
28681 uint64_t (*read)(struct drm_device *dev);
28682-};
28683+} __no_const;
28684
28685 struct nouveau_fb_engine {
28686 int num_tiles;
28687@@ -558,7 +558,7 @@ struct nouveau_vram_engine {
28688 void (*put)(struct drm_device *, struct nouveau_mem **);
28689
28690 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
28691-};
28692+} __no_const;
28693
28694 struct nouveau_engine {
28695 struct nouveau_instmem_engine instmem;
28696@@ -706,7 +706,7 @@ struct drm_nouveau_private {
28697 struct drm_global_reference mem_global_ref;
28698 struct ttm_bo_global_ref bo_global_ref;
28699 struct ttm_bo_device bdev;
28700- atomic_t validate_sequence;
28701+ atomic_unchecked_t validate_sequence;
28702 } ttm;
28703
28704 struct {
28705diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
28706index 2f6daae..c9d7b9e 100644
28707--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
28708+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
28709@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
28710 if (USE_REFCNT(dev))
28711 sequence = nvchan_rd32(chan, 0x48);
28712 else
28713- sequence = atomic_read(&chan->fence.last_sequence_irq);
28714+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
28715
28716 if (chan->fence.sequence_ack == sequence)
28717 goto out;
28718@@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
28719 return ret;
28720 }
28721
28722- atomic_set(&chan->fence.last_sequence_irq, 0);
28723+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
28724 return 0;
28725 }
28726
28727diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
28728index 7ce3fde..cb3ea04 100644
28729--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
28730+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
28731@@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
28732 int trycnt = 0;
28733 int ret, i;
28734
28735- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
28736+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
28737 retry:
28738 if (++trycnt > 100000) {
28739 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
28740diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
28741index d8831ab..0ba8356 100644
28742--- a/drivers/gpu/drm/nouveau/nouveau_state.c
28743+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
28744@@ -542,7 +542,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
28745 bool can_switch;
28746
28747 spin_lock(&dev->count_lock);
28748- can_switch = (dev->open_count == 0);
28749+ can_switch = (local_read(&dev->open_count) == 0);
28750 spin_unlock(&dev->count_lock);
28751 return can_switch;
28752 }
28753diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
28754index dbdea8e..cd6eeeb 100644
28755--- a/drivers/gpu/drm/nouveau/nv04_graph.c
28756+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
28757@@ -554,7 +554,7 @@ static int
28758 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
28759 u32 class, u32 mthd, u32 data)
28760 {
28761- atomic_set(&chan->fence.last_sequence_irq, data);
28762+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
28763 return 0;
28764 }
28765
28766diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
28767index bcac90b..53bfc76 100644
28768--- a/drivers/gpu/drm/r128/r128_cce.c
28769+++ b/drivers/gpu/drm/r128/r128_cce.c
28770@@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
28771
28772 /* GH: Simple idle check.
28773 */
28774- atomic_set(&dev_priv->idle_count, 0);
28775+ atomic_set_unchecked(&dev_priv->idle_count, 0);
28776
28777 /* We don't support anything other than bus-mastering ring mode,
28778 * but the ring can be in either AGP or PCI space for the ring
28779diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
28780index 930c71b..499aded 100644
28781--- a/drivers/gpu/drm/r128/r128_drv.h
28782+++ b/drivers/gpu/drm/r128/r128_drv.h
28783@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28784 int is_pci;
28785 unsigned long cce_buffers_offset;
28786
28787- atomic_t idle_count;
28788+ atomic_unchecked_t idle_count;
28789
28790 int page_flipping;
28791 int current_page;
28792 u32 crtc_offset;
28793 u32 crtc_offset_cntl;
28794
28795- atomic_t vbl_received;
28796+ atomic_unchecked_t vbl_received;
28797
28798 u32 color_fmt;
28799 unsigned int front_offset;
28800diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
28801index 429d5a0..7e899ed 100644
28802--- a/drivers/gpu/drm/r128/r128_irq.c
28803+++ b/drivers/gpu/drm/r128/r128_irq.c
28804@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
28805 if (crtc != 0)
28806 return 0;
28807
28808- return atomic_read(&dev_priv->vbl_received);
28809+ return atomic_read_unchecked(&dev_priv->vbl_received);
28810 }
28811
28812 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28813@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28814 /* VBLANK interrupt */
28815 if (status & R128_CRTC_VBLANK_INT) {
28816 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28817- atomic_inc(&dev_priv->vbl_received);
28818+ atomic_inc_unchecked(&dev_priv->vbl_received);
28819 drm_handle_vblank(dev, 0);
28820 return IRQ_HANDLED;
28821 }
28822diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
28823index a9e33ce..09edd4b 100644
28824--- a/drivers/gpu/drm/r128/r128_state.c
28825+++ b/drivers/gpu/drm/r128/r128_state.c
28826@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
28827
28828 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
28829 {
28830- if (atomic_read(&dev_priv->idle_count) == 0)
28831+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
28832 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28833 else
28834- atomic_set(&dev_priv->idle_count, 0);
28835+ atomic_set_unchecked(&dev_priv->idle_count, 0);
28836 }
28837
28838 #endif
28839diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
28840index 5a82b6b..9e69c73 100644
28841--- a/drivers/gpu/drm/radeon/mkregtable.c
28842+++ b/drivers/gpu/drm/radeon/mkregtable.c
28843@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
28844 regex_t mask_rex;
28845 regmatch_t match[4];
28846 char buf[1024];
28847- size_t end;
28848+ long end;
28849 int len;
28850 int done = 0;
28851 int r;
28852 unsigned o;
28853 struct offset *offset;
28854 char last_reg_s[10];
28855- int last_reg;
28856+ unsigned long last_reg;
28857
28858 if (regcomp
28859 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
28860diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
28861index 8227e76..ce0b195 100644
28862--- a/drivers/gpu/drm/radeon/radeon.h
28863+++ b/drivers/gpu/drm/radeon/radeon.h
28864@@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_device *rdev);
28865 */
28866 struct radeon_fence_driver {
28867 uint32_t scratch_reg;
28868- atomic_t seq;
28869+ atomic_unchecked_t seq;
28870 uint32_t last_seq;
28871 unsigned long last_jiffies;
28872 unsigned long last_timeout;
28873@@ -530,7 +530,7 @@ struct r600_blit_cp_primitives {
28874 int x2, int y2);
28875 void (*draw_auto)(struct radeon_device *rdev);
28876 void (*set_default_state)(struct radeon_device *rdev);
28877-};
28878+} __no_const;
28879
28880 struct r600_blit {
28881 struct mutex mutex;
28882@@ -954,7 +954,7 @@ struct radeon_asic {
28883 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
28884 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
28885 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
28886-};
28887+} __no_const;
28888
28889 /*
28890 * Asic structures
28891diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
28892index 9231564..78b00fd 100644
28893--- a/drivers/gpu/drm/radeon/radeon_device.c
28894+++ b/drivers/gpu/drm/radeon/radeon_device.c
28895@@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
28896 bool can_switch;
28897
28898 spin_lock(&dev->count_lock);
28899- can_switch = (dev->open_count == 0);
28900+ can_switch = (local_read(&dev->open_count) == 0);
28901 spin_unlock(&dev->count_lock);
28902 return can_switch;
28903 }
28904diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
28905index a1b59ca..86f2d44 100644
28906--- a/drivers/gpu/drm/radeon/radeon_drv.h
28907+++ b/drivers/gpu/drm/radeon/radeon_drv.h
28908@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
28909
28910 /* SW interrupt */
28911 wait_queue_head_t swi_queue;
28912- atomic_t swi_emitted;
28913+ atomic_unchecked_t swi_emitted;
28914 int vblank_crtc;
28915 uint32_t irq_enable_reg;
28916 uint32_t r500_disp_irq_reg;
28917diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
28918index 76ec0e9..6feb1a3 100644
28919--- a/drivers/gpu/drm/radeon/radeon_fence.c
28920+++ b/drivers/gpu/drm/radeon/radeon_fence.c
28921@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
28922 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
28923 return 0;
28924 }
28925- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
28926+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
28927 if (!rdev->cp.ready)
28928 /* FIXME: cp is not running assume everythings is done right
28929 * away
28930@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
28931 return r;
28932 }
28933 radeon_fence_write(rdev, 0);
28934- atomic_set(&rdev->fence_drv.seq, 0);
28935+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
28936 INIT_LIST_HEAD(&rdev->fence_drv.created);
28937 INIT_LIST_HEAD(&rdev->fence_drv.emited);
28938 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
28939diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
28940index 48b7cea..342236f 100644
28941--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
28942+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
28943@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
28944 request = compat_alloc_user_space(sizeof(*request));
28945 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
28946 || __put_user(req32.param, &request->param)
28947- || __put_user((void __user *)(unsigned long)req32.value,
28948+ || __put_user((unsigned long)req32.value,
28949 &request->value))
28950 return -EFAULT;
28951
28952diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
28953index 00da384..32f972d 100644
28954--- a/drivers/gpu/drm/radeon/radeon_irq.c
28955+++ b/drivers/gpu/drm/radeon/radeon_irq.c
28956@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
28957 unsigned int ret;
28958 RING_LOCALS;
28959
28960- atomic_inc(&dev_priv->swi_emitted);
28961- ret = atomic_read(&dev_priv->swi_emitted);
28962+ atomic_inc_unchecked(&dev_priv->swi_emitted);
28963+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
28964
28965 BEGIN_RING(4);
28966 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
28967@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
28968 drm_radeon_private_t *dev_priv =
28969 (drm_radeon_private_t *) dev->dev_private;
28970
28971- atomic_set(&dev_priv->swi_emitted, 0);
28972+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
28973 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
28974
28975 dev->max_vblank_count = 0x001fffff;
28976diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
28977index e8422ae..d22d4a8 100644
28978--- a/drivers/gpu/drm/radeon/radeon_state.c
28979+++ b/drivers/gpu/drm/radeon/radeon_state.c
28980@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
28981 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
28982 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
28983
28984- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28985+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28986 sarea_priv->nbox * sizeof(depth_boxes[0])))
28987 return -EFAULT;
28988
28989@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
28990 {
28991 drm_radeon_private_t *dev_priv = dev->dev_private;
28992 drm_radeon_getparam_t *param = data;
28993- int value;
28994+ int value = 0;
28995
28996 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
28997
28998diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
28999index 0b5468b..9c4b308 100644
29000--- a/drivers/gpu/drm/radeon/radeon_ttm.c
29001+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
29002@@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
29003 }
29004 if (unlikely(ttm_vm_ops == NULL)) {
29005 ttm_vm_ops = vma->vm_ops;
29006- radeon_ttm_vm_ops = *ttm_vm_ops;
29007- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
29008+ pax_open_kernel();
29009+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
29010+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
29011+ pax_close_kernel();
29012 }
29013 vma->vm_ops = &radeon_ttm_vm_ops;
29014 return 0;
29015diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
29016index a9049ed..501f284 100644
29017--- a/drivers/gpu/drm/radeon/rs690.c
29018+++ b/drivers/gpu/drm/radeon/rs690.c
29019@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
29020 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
29021 rdev->pm.sideport_bandwidth.full)
29022 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
29023- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
29024+ read_delay_latency.full = dfixed_const(800 * 1000);
29025 read_delay_latency.full = dfixed_div(read_delay_latency,
29026 rdev->pm.igp_sideport_mclk);
29027+ a.full = dfixed_const(370);
29028+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
29029 } else {
29030 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
29031 rdev->pm.k8_bandwidth.full)
29032diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
29033index 727e93d..1565650 100644
29034--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
29035+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
29036@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
29037 static int ttm_pool_mm_shrink(struct shrinker *shrink,
29038 struct shrink_control *sc)
29039 {
29040- static atomic_t start_pool = ATOMIC_INIT(0);
29041+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
29042 unsigned i;
29043- unsigned pool_offset = atomic_add_return(1, &start_pool);
29044+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
29045 struct ttm_page_pool *pool;
29046 int shrink_pages = sc->nr_to_scan;
29047
29048diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
29049index 9cf87d9..2000b7d 100644
29050--- a/drivers/gpu/drm/via/via_drv.h
29051+++ b/drivers/gpu/drm/via/via_drv.h
29052@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
29053 typedef uint32_t maskarray_t[5];
29054
29055 typedef struct drm_via_irq {
29056- atomic_t irq_received;
29057+ atomic_unchecked_t irq_received;
29058 uint32_t pending_mask;
29059 uint32_t enable_mask;
29060 wait_queue_head_t irq_queue;
29061@@ -75,7 +75,7 @@ typedef struct drm_via_private {
29062 struct timeval last_vblank;
29063 int last_vblank_valid;
29064 unsigned usec_per_vblank;
29065- atomic_t vbl_received;
29066+ atomic_unchecked_t vbl_received;
29067 drm_via_state_t hc_state;
29068 char pci_buf[VIA_PCI_BUF_SIZE];
29069 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
29070diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
29071index d391f48..10c8ca3 100644
29072--- a/drivers/gpu/drm/via/via_irq.c
29073+++ b/drivers/gpu/drm/via/via_irq.c
29074@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
29075 if (crtc != 0)
29076 return 0;
29077
29078- return atomic_read(&dev_priv->vbl_received);
29079+ return atomic_read_unchecked(&dev_priv->vbl_received);
29080 }
29081
29082 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29083@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29084
29085 status = VIA_READ(VIA_REG_INTERRUPT);
29086 if (status & VIA_IRQ_VBLANK_PENDING) {
29087- atomic_inc(&dev_priv->vbl_received);
29088- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
29089+ atomic_inc_unchecked(&dev_priv->vbl_received);
29090+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
29091 do_gettimeofday(&cur_vblank);
29092 if (dev_priv->last_vblank_valid) {
29093 dev_priv->usec_per_vblank =
29094@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29095 dev_priv->last_vblank = cur_vblank;
29096 dev_priv->last_vblank_valid = 1;
29097 }
29098- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
29099+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
29100 DRM_DEBUG("US per vblank is: %u\n",
29101 dev_priv->usec_per_vblank);
29102 }
29103@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29104
29105 for (i = 0; i < dev_priv->num_irqs; ++i) {
29106 if (status & cur_irq->pending_mask) {
29107- atomic_inc(&cur_irq->irq_received);
29108+ atomic_inc_unchecked(&cur_irq->irq_received);
29109 DRM_WAKEUP(&cur_irq->irq_queue);
29110 handled = 1;
29111 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
29112@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
29113 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29114 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
29115 masks[irq][4]));
29116- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
29117+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
29118 } else {
29119 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29120 (((cur_irq_sequence =
29121- atomic_read(&cur_irq->irq_received)) -
29122+ atomic_read_unchecked(&cur_irq->irq_received)) -
29123 *sequence) <= (1 << 23)));
29124 }
29125 *sequence = cur_irq_sequence;
29126@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
29127 }
29128
29129 for (i = 0; i < dev_priv->num_irqs; ++i) {
29130- atomic_set(&cur_irq->irq_received, 0);
29131+ atomic_set_unchecked(&cur_irq->irq_received, 0);
29132 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
29133 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
29134 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
29135@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
29136 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
29137 case VIA_IRQ_RELATIVE:
29138 irqwait->request.sequence +=
29139- atomic_read(&cur_irq->irq_received);
29140+ atomic_read_unchecked(&cur_irq->irq_received);
29141 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
29142 case VIA_IRQ_ABSOLUTE:
29143 break;
29144diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
29145index dc27970..f18b008 100644
29146--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
29147+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
29148@@ -260,7 +260,7 @@ struct vmw_private {
29149 * Fencing and IRQs.
29150 */
29151
29152- atomic_t marker_seq;
29153+ atomic_unchecked_t marker_seq;
29154 wait_queue_head_t fence_queue;
29155 wait_queue_head_t fifo_queue;
29156 int fence_queue_waiters; /* Protected by hw_mutex */
29157diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29158index a0c2f12..68ae6cb 100644
29159--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29160+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29161@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
29162 (unsigned int) min,
29163 (unsigned int) fifo->capabilities);
29164
29165- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
29166+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
29167 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
29168 vmw_marker_queue_init(&fifo->marker_queue);
29169 return vmw_fifo_send_fence(dev_priv, &dummy);
29170@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
29171 if (reserveable)
29172 iowrite32(bytes, fifo_mem +
29173 SVGA_FIFO_RESERVED);
29174- return fifo_mem + (next_cmd >> 2);
29175+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
29176 } else {
29177 need_bounce = true;
29178 }
29179@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
29180
29181 fm = vmw_fifo_reserve(dev_priv, bytes);
29182 if (unlikely(fm == NULL)) {
29183- *seqno = atomic_read(&dev_priv->marker_seq);
29184+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
29185 ret = -ENOMEM;
29186 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
29187 false, 3*HZ);
29188@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
29189 }
29190
29191 do {
29192- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
29193+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
29194 } while (*seqno == 0);
29195
29196 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
29197diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29198index cabc95f..14b3d77 100644
29199--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29200+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29201@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
29202 * emitted. Then the fence is stale and signaled.
29203 */
29204
29205- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
29206+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
29207 > VMW_FENCE_WRAP);
29208
29209 return ret;
29210@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
29211
29212 if (fifo_idle)
29213 down_read(&fifo_state->rwsem);
29214- signal_seq = atomic_read(&dev_priv->marker_seq);
29215+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
29216 ret = 0;
29217
29218 for (;;) {
29219diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
29220index 8a8725c..afed796 100644
29221--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
29222+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
29223@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
29224 while (!vmw_lag_lt(queue, us)) {
29225 spin_lock(&queue->lock);
29226 if (list_empty(&queue->head))
29227- seqno = atomic_read(&dev_priv->marker_seq);
29228+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
29229 else {
29230 marker = list_first_entry(&queue->head,
29231 struct vmw_marker, head);
29232diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
29233index bb656d8..4169fca 100644
29234--- a/drivers/hid/hid-core.c
29235+++ b/drivers/hid/hid-core.c
29236@@ -2012,7 +2012,7 @@ static bool hid_ignore(struct hid_device *hdev)
29237
29238 int hid_add_device(struct hid_device *hdev)
29239 {
29240- static atomic_t id = ATOMIC_INIT(0);
29241+ static atomic_unchecked_t id = ATOMIC_INIT(0);
29242 int ret;
29243
29244 if (WARN_ON(hdev->status & HID_STAT_ADDED))
29245@@ -2027,7 +2027,7 @@ int hid_add_device(struct hid_device *hdev)
29246 /* XXX hack, any other cleaner solution after the driver core
29247 * is converted to allow more than 20 bytes as the device name? */
29248 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
29249- hdev->vendor, hdev->product, atomic_inc_return(&id));
29250+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
29251
29252 hid_debug_register(hdev, dev_name(&hdev->dev));
29253 ret = device_add(&hdev->dev);
29254diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
29255index 4ef02b2..8a96831 100644
29256--- a/drivers/hid/usbhid/hiddev.c
29257+++ b/drivers/hid/usbhid/hiddev.c
29258@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
29259 break;
29260
29261 case HIDIOCAPPLICATION:
29262- if (arg < 0 || arg >= hid->maxapplication)
29263+ if (arg >= hid->maxapplication)
29264 break;
29265
29266 for (i = 0; i < hid->maxcollection; i++)
29267diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
29268index 4065374..10ed7dc 100644
29269--- a/drivers/hv/channel.c
29270+++ b/drivers/hv/channel.c
29271@@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
29272 int ret = 0;
29273 int t;
29274
29275- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
29276- atomic_inc(&vmbus_connection.next_gpadl_handle);
29277+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
29278+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
29279
29280 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
29281 if (ret)
29282diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
29283index 0fb100e..baf87e5 100644
29284--- a/drivers/hv/hv.c
29285+++ b/drivers/hv/hv.c
29286@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
29287 u64 output_address = (output) ? virt_to_phys(output) : 0;
29288 u32 output_address_hi = output_address >> 32;
29289 u32 output_address_lo = output_address & 0xFFFFFFFF;
29290- void *hypercall_page = hv_context.hypercall_page;
29291+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
29292
29293 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
29294 "=a"(hv_status_lo) : "d" (control_hi),
29295diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
29296index 0aee112..b72d21f 100644
29297--- a/drivers/hv/hyperv_vmbus.h
29298+++ b/drivers/hv/hyperv_vmbus.h
29299@@ -556,7 +556,7 @@ enum vmbus_connect_state {
29300 struct vmbus_connection {
29301 enum vmbus_connect_state conn_state;
29302
29303- atomic_t next_gpadl_handle;
29304+ atomic_unchecked_t next_gpadl_handle;
29305
29306 /*
29307 * Represents channel interrupts. Each bit position represents a
29308diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
29309index d2d0a2a..90b8f4d 100644
29310--- a/drivers/hv/vmbus_drv.c
29311+++ b/drivers/hv/vmbus_drv.c
29312@@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
29313 {
29314 int ret = 0;
29315
29316- static atomic_t device_num = ATOMIC_INIT(0);
29317+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
29318
29319 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
29320- atomic_inc_return(&device_num));
29321+ atomic_inc_return_unchecked(&device_num));
29322
29323 child_device_obj->device.bus = &hv_bus;
29324 child_device_obj->device.parent = &hv_acpi_dev->dev;
29325diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
29326index 66f6729..2d6de0a 100644
29327--- a/drivers/hwmon/acpi_power_meter.c
29328+++ b/drivers/hwmon/acpi_power_meter.c
29329@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
29330 return res;
29331
29332 temp /= 1000;
29333- if (temp < 0)
29334- return -EINVAL;
29335
29336 mutex_lock(&resource->lock);
29337 resource->trip[attr->index - 7] = temp;
29338diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
29339index 5357925..6cf0418 100644
29340--- a/drivers/hwmon/sht15.c
29341+++ b/drivers/hwmon/sht15.c
29342@@ -166,7 +166,7 @@ struct sht15_data {
29343 int supply_uV;
29344 bool supply_uV_valid;
29345 struct work_struct update_supply_work;
29346- atomic_t interrupt_handled;
29347+ atomic_unchecked_t interrupt_handled;
29348 };
29349
29350 /**
29351@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
29352 return ret;
29353
29354 gpio_direction_input(data->pdata->gpio_data);
29355- atomic_set(&data->interrupt_handled, 0);
29356+ atomic_set_unchecked(&data->interrupt_handled, 0);
29357
29358 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29359 if (gpio_get_value(data->pdata->gpio_data) == 0) {
29360 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
29361 /* Only relevant if the interrupt hasn't occurred. */
29362- if (!atomic_read(&data->interrupt_handled))
29363+ if (!atomic_read_unchecked(&data->interrupt_handled))
29364 schedule_work(&data->read_work);
29365 }
29366 ret = wait_event_timeout(data->wait_queue,
29367@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
29368
29369 /* First disable the interrupt */
29370 disable_irq_nosync(irq);
29371- atomic_inc(&data->interrupt_handled);
29372+ atomic_inc_unchecked(&data->interrupt_handled);
29373 /* Then schedule a reading work struct */
29374 if (data->state != SHT15_READING_NOTHING)
29375 schedule_work(&data->read_work);
29376@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
29377 * If not, then start the interrupt again - care here as could
29378 * have gone low in meantime so verify it hasn't!
29379 */
29380- atomic_set(&data->interrupt_handled, 0);
29381+ atomic_set_unchecked(&data->interrupt_handled, 0);
29382 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29383 /* If still not occurred or another handler has been scheduled */
29384 if (gpio_get_value(data->pdata->gpio_data)
29385- || atomic_read(&data->interrupt_handled))
29386+ || atomic_read_unchecked(&data->interrupt_handled))
29387 return;
29388 }
29389
29390diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
29391index 378fcb5..5e91fa8 100644
29392--- a/drivers/i2c/busses/i2c-amd756-s4882.c
29393+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
29394@@ -43,7 +43,7 @@
29395 extern struct i2c_adapter amd756_smbus;
29396
29397 static struct i2c_adapter *s4882_adapter;
29398-static struct i2c_algorithm *s4882_algo;
29399+static i2c_algorithm_no_const *s4882_algo;
29400
29401 /* Wrapper access functions for multiplexed SMBus */
29402 static DEFINE_MUTEX(amd756_lock);
29403diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
29404index 29015eb..af2d8e9 100644
29405--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
29406+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
29407@@ -41,7 +41,7 @@
29408 extern struct i2c_adapter *nforce2_smbus;
29409
29410 static struct i2c_adapter *s4985_adapter;
29411-static struct i2c_algorithm *s4985_algo;
29412+static i2c_algorithm_no_const *s4985_algo;
29413
29414 /* Wrapper access functions for multiplexed SMBus */
29415 static DEFINE_MUTEX(nforce2_lock);
29416diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
29417index d7a4833..7fae376 100644
29418--- a/drivers/i2c/i2c-mux.c
29419+++ b/drivers/i2c/i2c-mux.c
29420@@ -28,7 +28,7 @@
29421 /* multiplexer per channel data */
29422 struct i2c_mux_priv {
29423 struct i2c_adapter adap;
29424- struct i2c_algorithm algo;
29425+ i2c_algorithm_no_const algo;
29426
29427 struct i2c_adapter *parent;
29428 void *mux_dev; /* the mux chip/device */
29429diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
29430index 57d00ca..0145194 100644
29431--- a/drivers/ide/aec62xx.c
29432+++ b/drivers/ide/aec62xx.c
29433@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
29434 .cable_detect = atp86x_cable_detect,
29435 };
29436
29437-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
29438+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
29439 { /* 0: AEC6210 */
29440 .name = DRV_NAME,
29441 .init_chipset = init_chipset_aec62xx,
29442diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
29443index 2c8016a..911a27c 100644
29444--- a/drivers/ide/alim15x3.c
29445+++ b/drivers/ide/alim15x3.c
29446@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
29447 .dma_sff_read_status = ide_dma_sff_read_status,
29448 };
29449
29450-static const struct ide_port_info ali15x3_chipset __devinitdata = {
29451+static const struct ide_port_info ali15x3_chipset __devinitconst = {
29452 .name = DRV_NAME,
29453 .init_chipset = init_chipset_ali15x3,
29454 .init_hwif = init_hwif_ali15x3,
29455diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
29456index 3747b25..56fc995 100644
29457--- a/drivers/ide/amd74xx.c
29458+++ b/drivers/ide/amd74xx.c
29459@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
29460 .udma_mask = udma, \
29461 }
29462
29463-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
29464+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
29465 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
29466 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
29467 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
29468diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
29469index 15f0ead..cb43480 100644
29470--- a/drivers/ide/atiixp.c
29471+++ b/drivers/ide/atiixp.c
29472@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
29473 .cable_detect = atiixp_cable_detect,
29474 };
29475
29476-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
29477+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
29478 { /* 0: IXP200/300/400/700 */
29479 .name = DRV_NAME,
29480 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
29481diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
29482index 5f80312..d1fc438 100644
29483--- a/drivers/ide/cmd64x.c
29484+++ b/drivers/ide/cmd64x.c
29485@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
29486 .dma_sff_read_status = ide_dma_sff_read_status,
29487 };
29488
29489-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
29490+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
29491 { /* 0: CMD643 */
29492 .name = DRV_NAME,
29493 .init_chipset = init_chipset_cmd64x,
29494diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
29495index 2c1e5f7..1444762 100644
29496--- a/drivers/ide/cs5520.c
29497+++ b/drivers/ide/cs5520.c
29498@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
29499 .set_dma_mode = cs5520_set_dma_mode,
29500 };
29501
29502-static const struct ide_port_info cyrix_chipset __devinitdata = {
29503+static const struct ide_port_info cyrix_chipset __devinitconst = {
29504 .name = DRV_NAME,
29505 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
29506 .port_ops = &cs5520_port_ops,
29507diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
29508index 4dc4eb9..49b40ad 100644
29509--- a/drivers/ide/cs5530.c
29510+++ b/drivers/ide/cs5530.c
29511@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
29512 .udma_filter = cs5530_udma_filter,
29513 };
29514
29515-static const struct ide_port_info cs5530_chipset __devinitdata = {
29516+static const struct ide_port_info cs5530_chipset __devinitconst = {
29517 .name = DRV_NAME,
29518 .init_chipset = init_chipset_cs5530,
29519 .init_hwif = init_hwif_cs5530,
29520diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
29521index 5059faf..18d4c85 100644
29522--- a/drivers/ide/cs5535.c
29523+++ b/drivers/ide/cs5535.c
29524@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
29525 .cable_detect = cs5535_cable_detect,
29526 };
29527
29528-static const struct ide_port_info cs5535_chipset __devinitdata = {
29529+static const struct ide_port_info cs5535_chipset __devinitconst = {
29530 .name = DRV_NAME,
29531 .port_ops = &cs5535_port_ops,
29532 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
29533diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
29534index 847553f..3ffb49d 100644
29535--- a/drivers/ide/cy82c693.c
29536+++ b/drivers/ide/cy82c693.c
29537@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
29538 .set_dma_mode = cy82c693_set_dma_mode,
29539 };
29540
29541-static const struct ide_port_info cy82c693_chipset __devinitdata = {
29542+static const struct ide_port_info cy82c693_chipset __devinitconst = {
29543 .name = DRV_NAME,
29544 .init_iops = init_iops_cy82c693,
29545 .port_ops = &cy82c693_port_ops,
29546diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
29547index 58c51cd..4aec3b8 100644
29548--- a/drivers/ide/hpt366.c
29549+++ b/drivers/ide/hpt366.c
29550@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
29551 }
29552 };
29553
29554-static const struct hpt_info hpt36x __devinitdata = {
29555+static const struct hpt_info hpt36x __devinitconst = {
29556 .chip_name = "HPT36x",
29557 .chip_type = HPT36x,
29558 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
29559@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
29560 .timings = &hpt36x_timings
29561 };
29562
29563-static const struct hpt_info hpt370 __devinitdata = {
29564+static const struct hpt_info hpt370 __devinitconst = {
29565 .chip_name = "HPT370",
29566 .chip_type = HPT370,
29567 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29568@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
29569 .timings = &hpt37x_timings
29570 };
29571
29572-static const struct hpt_info hpt370a __devinitdata = {
29573+static const struct hpt_info hpt370a __devinitconst = {
29574 .chip_name = "HPT370A",
29575 .chip_type = HPT370A,
29576 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29577@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
29578 .timings = &hpt37x_timings
29579 };
29580
29581-static const struct hpt_info hpt374 __devinitdata = {
29582+static const struct hpt_info hpt374 __devinitconst = {
29583 .chip_name = "HPT374",
29584 .chip_type = HPT374,
29585 .udma_mask = ATA_UDMA5,
29586@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
29587 .timings = &hpt37x_timings
29588 };
29589
29590-static const struct hpt_info hpt372 __devinitdata = {
29591+static const struct hpt_info hpt372 __devinitconst = {
29592 .chip_name = "HPT372",
29593 .chip_type = HPT372,
29594 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29595@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
29596 .timings = &hpt37x_timings
29597 };
29598
29599-static const struct hpt_info hpt372a __devinitdata = {
29600+static const struct hpt_info hpt372a __devinitconst = {
29601 .chip_name = "HPT372A",
29602 .chip_type = HPT372A,
29603 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29604@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
29605 .timings = &hpt37x_timings
29606 };
29607
29608-static const struct hpt_info hpt302 __devinitdata = {
29609+static const struct hpt_info hpt302 __devinitconst = {
29610 .chip_name = "HPT302",
29611 .chip_type = HPT302,
29612 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29613@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
29614 .timings = &hpt37x_timings
29615 };
29616
29617-static const struct hpt_info hpt371 __devinitdata = {
29618+static const struct hpt_info hpt371 __devinitconst = {
29619 .chip_name = "HPT371",
29620 .chip_type = HPT371,
29621 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29622@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
29623 .timings = &hpt37x_timings
29624 };
29625
29626-static const struct hpt_info hpt372n __devinitdata = {
29627+static const struct hpt_info hpt372n __devinitconst = {
29628 .chip_name = "HPT372N",
29629 .chip_type = HPT372N,
29630 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29631@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
29632 .timings = &hpt37x_timings
29633 };
29634
29635-static const struct hpt_info hpt302n __devinitdata = {
29636+static const struct hpt_info hpt302n __devinitconst = {
29637 .chip_name = "HPT302N",
29638 .chip_type = HPT302N,
29639 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29640@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
29641 .timings = &hpt37x_timings
29642 };
29643
29644-static const struct hpt_info hpt371n __devinitdata = {
29645+static const struct hpt_info hpt371n __devinitconst = {
29646 .chip_name = "HPT371N",
29647 .chip_type = HPT371N,
29648 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29649@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
29650 .dma_sff_read_status = ide_dma_sff_read_status,
29651 };
29652
29653-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
29654+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
29655 { /* 0: HPT36x */
29656 .name = DRV_NAME,
29657 .init_chipset = init_chipset_hpt366,
29658diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
29659index 8126824..55a2798 100644
29660--- a/drivers/ide/ide-cd.c
29661+++ b/drivers/ide/ide-cd.c
29662@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
29663 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
29664 if ((unsigned long)buf & alignment
29665 || blk_rq_bytes(rq) & q->dma_pad_mask
29666- || object_is_on_stack(buf))
29667+ || object_starts_on_stack(buf))
29668 drive->dma = 0;
29669 }
29670 }
29671diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
29672index a743e68..1cfd674 100644
29673--- a/drivers/ide/ide-pci-generic.c
29674+++ b/drivers/ide/ide-pci-generic.c
29675@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
29676 .udma_mask = ATA_UDMA6, \
29677 }
29678
29679-static const struct ide_port_info generic_chipsets[] __devinitdata = {
29680+static const struct ide_port_info generic_chipsets[] __devinitconst = {
29681 /* 0: Unknown */
29682 DECLARE_GENERIC_PCI_DEV(0),
29683
29684diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
29685index 560e66d..d5dd180 100644
29686--- a/drivers/ide/it8172.c
29687+++ b/drivers/ide/it8172.c
29688@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
29689 .set_dma_mode = it8172_set_dma_mode,
29690 };
29691
29692-static const struct ide_port_info it8172_port_info __devinitdata = {
29693+static const struct ide_port_info it8172_port_info __devinitconst = {
29694 .name = DRV_NAME,
29695 .port_ops = &it8172_port_ops,
29696 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
29697diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
29698index 46816ba..1847aeb 100644
29699--- a/drivers/ide/it8213.c
29700+++ b/drivers/ide/it8213.c
29701@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
29702 .cable_detect = it8213_cable_detect,
29703 };
29704
29705-static const struct ide_port_info it8213_chipset __devinitdata = {
29706+static const struct ide_port_info it8213_chipset __devinitconst = {
29707 .name = DRV_NAME,
29708 .enablebits = { {0x41, 0x80, 0x80} },
29709 .port_ops = &it8213_port_ops,
29710diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
29711index 2e3169f..c5611db 100644
29712--- a/drivers/ide/it821x.c
29713+++ b/drivers/ide/it821x.c
29714@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
29715 .cable_detect = it821x_cable_detect,
29716 };
29717
29718-static const struct ide_port_info it821x_chipset __devinitdata = {
29719+static const struct ide_port_info it821x_chipset __devinitconst = {
29720 .name = DRV_NAME,
29721 .init_chipset = init_chipset_it821x,
29722 .init_hwif = init_hwif_it821x,
29723diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
29724index 74c2c4a..efddd7d 100644
29725--- a/drivers/ide/jmicron.c
29726+++ b/drivers/ide/jmicron.c
29727@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
29728 .cable_detect = jmicron_cable_detect,
29729 };
29730
29731-static const struct ide_port_info jmicron_chipset __devinitdata = {
29732+static const struct ide_port_info jmicron_chipset __devinitconst = {
29733 .name = DRV_NAME,
29734 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
29735 .port_ops = &jmicron_port_ops,
29736diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
29737index 95327a2..73f78d8 100644
29738--- a/drivers/ide/ns87415.c
29739+++ b/drivers/ide/ns87415.c
29740@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
29741 .dma_sff_read_status = superio_dma_sff_read_status,
29742 };
29743
29744-static const struct ide_port_info ns87415_chipset __devinitdata = {
29745+static const struct ide_port_info ns87415_chipset __devinitconst = {
29746 .name = DRV_NAME,
29747 .init_hwif = init_hwif_ns87415,
29748 .tp_ops = &ns87415_tp_ops,
29749diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
29750index 1a53a4c..39edc66 100644
29751--- a/drivers/ide/opti621.c
29752+++ b/drivers/ide/opti621.c
29753@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
29754 .set_pio_mode = opti621_set_pio_mode,
29755 };
29756
29757-static const struct ide_port_info opti621_chipset __devinitdata = {
29758+static const struct ide_port_info opti621_chipset __devinitconst = {
29759 .name = DRV_NAME,
29760 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
29761 .port_ops = &opti621_port_ops,
29762diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
29763index 9546fe2..2e5ceb6 100644
29764--- a/drivers/ide/pdc202xx_new.c
29765+++ b/drivers/ide/pdc202xx_new.c
29766@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
29767 .udma_mask = udma, \
29768 }
29769
29770-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
29771+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
29772 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
29773 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
29774 };
29775diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
29776index 3a35ec6..5634510 100644
29777--- a/drivers/ide/pdc202xx_old.c
29778+++ b/drivers/ide/pdc202xx_old.c
29779@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
29780 .max_sectors = sectors, \
29781 }
29782
29783-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
29784+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
29785 { /* 0: PDC20246 */
29786 .name = DRV_NAME,
29787 .init_chipset = init_chipset_pdc202xx,
29788diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
29789index 1892e81..fe0fd60 100644
29790--- a/drivers/ide/piix.c
29791+++ b/drivers/ide/piix.c
29792@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
29793 .udma_mask = udma, \
29794 }
29795
29796-static const struct ide_port_info piix_pci_info[] __devinitdata = {
29797+static const struct ide_port_info piix_pci_info[] __devinitconst = {
29798 /* 0: MPIIX */
29799 { /*
29800 * MPIIX actually has only a single IDE channel mapped to
29801diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
29802index a6414a8..c04173e 100644
29803--- a/drivers/ide/rz1000.c
29804+++ b/drivers/ide/rz1000.c
29805@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
29806 }
29807 }
29808
29809-static const struct ide_port_info rz1000_chipset __devinitdata = {
29810+static const struct ide_port_info rz1000_chipset __devinitconst = {
29811 .name = DRV_NAME,
29812 .host_flags = IDE_HFLAG_NO_DMA,
29813 };
29814diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
29815index 356b9b5..d4758eb 100644
29816--- a/drivers/ide/sc1200.c
29817+++ b/drivers/ide/sc1200.c
29818@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
29819 .dma_sff_read_status = ide_dma_sff_read_status,
29820 };
29821
29822-static const struct ide_port_info sc1200_chipset __devinitdata = {
29823+static const struct ide_port_info sc1200_chipset __devinitconst = {
29824 .name = DRV_NAME,
29825 .port_ops = &sc1200_port_ops,
29826 .dma_ops = &sc1200_dma_ops,
29827diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
29828index b7f5b0c..9701038 100644
29829--- a/drivers/ide/scc_pata.c
29830+++ b/drivers/ide/scc_pata.c
29831@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
29832 .dma_sff_read_status = scc_dma_sff_read_status,
29833 };
29834
29835-static const struct ide_port_info scc_chipset __devinitdata = {
29836+static const struct ide_port_info scc_chipset __devinitconst = {
29837 .name = "sccIDE",
29838 .init_iops = init_iops_scc,
29839 .init_dma = scc_init_dma,
29840diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
29841index 35fb8da..24d72ef 100644
29842--- a/drivers/ide/serverworks.c
29843+++ b/drivers/ide/serverworks.c
29844@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
29845 .cable_detect = svwks_cable_detect,
29846 };
29847
29848-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
29849+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
29850 { /* 0: OSB4 */
29851 .name = DRV_NAME,
29852 .init_chipset = init_chipset_svwks,
29853diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
29854index ddeda44..46f7e30 100644
29855--- a/drivers/ide/siimage.c
29856+++ b/drivers/ide/siimage.c
29857@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
29858 .udma_mask = ATA_UDMA6, \
29859 }
29860
29861-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
29862+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
29863 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
29864 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
29865 };
29866diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
29867index 4a00225..09e61b4 100644
29868--- a/drivers/ide/sis5513.c
29869+++ b/drivers/ide/sis5513.c
29870@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
29871 .cable_detect = sis_cable_detect,
29872 };
29873
29874-static const struct ide_port_info sis5513_chipset __devinitdata = {
29875+static const struct ide_port_info sis5513_chipset __devinitconst = {
29876 .name = DRV_NAME,
29877 .init_chipset = init_chipset_sis5513,
29878 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
29879diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
29880index f21dc2a..d051cd2 100644
29881--- a/drivers/ide/sl82c105.c
29882+++ b/drivers/ide/sl82c105.c
29883@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
29884 .dma_sff_read_status = ide_dma_sff_read_status,
29885 };
29886
29887-static const struct ide_port_info sl82c105_chipset __devinitdata = {
29888+static const struct ide_port_info sl82c105_chipset __devinitconst = {
29889 .name = DRV_NAME,
29890 .init_chipset = init_chipset_sl82c105,
29891 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
29892diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
29893index 864ffe0..863a5e9 100644
29894--- a/drivers/ide/slc90e66.c
29895+++ b/drivers/ide/slc90e66.c
29896@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
29897 .cable_detect = slc90e66_cable_detect,
29898 };
29899
29900-static const struct ide_port_info slc90e66_chipset __devinitdata = {
29901+static const struct ide_port_info slc90e66_chipset __devinitconst = {
29902 .name = DRV_NAME,
29903 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
29904 .port_ops = &slc90e66_port_ops,
29905diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
29906index 4799d5c..1794678 100644
29907--- a/drivers/ide/tc86c001.c
29908+++ b/drivers/ide/tc86c001.c
29909@@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
29910 .dma_sff_read_status = ide_dma_sff_read_status,
29911 };
29912
29913-static const struct ide_port_info tc86c001_chipset __devinitdata = {
29914+static const struct ide_port_info tc86c001_chipset __devinitconst = {
29915 .name = DRV_NAME,
29916 .init_hwif = init_hwif_tc86c001,
29917 .port_ops = &tc86c001_port_ops,
29918diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
29919index 281c914..55ce1b8 100644
29920--- a/drivers/ide/triflex.c
29921+++ b/drivers/ide/triflex.c
29922@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
29923 .set_dma_mode = triflex_set_mode,
29924 };
29925
29926-static const struct ide_port_info triflex_device __devinitdata = {
29927+static const struct ide_port_info triflex_device __devinitconst = {
29928 .name = DRV_NAME,
29929 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
29930 .port_ops = &triflex_port_ops,
29931diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
29932index 4b42ca0..e494a98 100644
29933--- a/drivers/ide/trm290.c
29934+++ b/drivers/ide/trm290.c
29935@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
29936 .dma_check = trm290_dma_check,
29937 };
29938
29939-static const struct ide_port_info trm290_chipset __devinitdata = {
29940+static const struct ide_port_info trm290_chipset __devinitconst = {
29941 .name = DRV_NAME,
29942 .init_hwif = init_hwif_trm290,
29943 .tp_ops = &trm290_tp_ops,
29944diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
29945index f46f49c..eb77678 100644
29946--- a/drivers/ide/via82cxxx.c
29947+++ b/drivers/ide/via82cxxx.c
29948@@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
29949 .cable_detect = via82cxxx_cable_detect,
29950 };
29951
29952-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
29953+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
29954 .name = DRV_NAME,
29955 .init_chipset = init_chipset_via82cxxx,
29956 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
29957diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
29958index eb0e2cc..14241c7 100644
29959--- a/drivers/ieee802154/fakehard.c
29960+++ b/drivers/ieee802154/fakehard.c
29961@@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
29962 phy->transmit_power = 0xbf;
29963
29964 dev->netdev_ops = &fake_ops;
29965- dev->ml_priv = &fake_mlme;
29966+ dev->ml_priv = (void *)&fake_mlme;
29967
29968 priv = netdev_priv(dev);
29969 priv->phy = phy;
29970diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
29971index 8b72f39..55df4c8 100644
29972--- a/drivers/infiniband/core/cm.c
29973+++ b/drivers/infiniband/core/cm.c
29974@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
29975
29976 struct cm_counter_group {
29977 struct kobject obj;
29978- atomic_long_t counter[CM_ATTR_COUNT];
29979+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29980 };
29981
29982 struct cm_counter_attribute {
29983@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
29984 struct ib_mad_send_buf *msg = NULL;
29985 int ret;
29986
29987- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29988+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29989 counter[CM_REQ_COUNTER]);
29990
29991 /* Quick state check to discard duplicate REQs. */
29992@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
29993 if (!cm_id_priv)
29994 return;
29995
29996- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29997+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29998 counter[CM_REP_COUNTER]);
29999 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
30000 if (ret)
30001@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
30002 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
30003 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
30004 spin_unlock_irq(&cm_id_priv->lock);
30005- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30006+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30007 counter[CM_RTU_COUNTER]);
30008 goto out;
30009 }
30010@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
30011 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
30012 dreq_msg->local_comm_id);
30013 if (!cm_id_priv) {
30014- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30015+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30016 counter[CM_DREQ_COUNTER]);
30017 cm_issue_drep(work->port, work->mad_recv_wc);
30018 return -EINVAL;
30019@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
30020 case IB_CM_MRA_REP_RCVD:
30021 break;
30022 case IB_CM_TIMEWAIT:
30023- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30024+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30025 counter[CM_DREQ_COUNTER]);
30026 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30027 goto unlock;
30028@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
30029 cm_free_msg(msg);
30030 goto deref;
30031 case IB_CM_DREQ_RCVD:
30032- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30033+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30034 counter[CM_DREQ_COUNTER]);
30035 goto unlock;
30036 default:
30037@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
30038 ib_modify_mad(cm_id_priv->av.port->mad_agent,
30039 cm_id_priv->msg, timeout)) {
30040 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
30041- atomic_long_inc(&work->port->
30042+ atomic_long_inc_unchecked(&work->port->
30043 counter_group[CM_RECV_DUPLICATES].
30044 counter[CM_MRA_COUNTER]);
30045 goto out;
30046@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
30047 break;
30048 case IB_CM_MRA_REQ_RCVD:
30049 case IB_CM_MRA_REP_RCVD:
30050- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30051+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30052 counter[CM_MRA_COUNTER]);
30053 /* fall through */
30054 default:
30055@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
30056 case IB_CM_LAP_IDLE:
30057 break;
30058 case IB_CM_MRA_LAP_SENT:
30059- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30060+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30061 counter[CM_LAP_COUNTER]);
30062 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30063 goto unlock;
30064@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
30065 cm_free_msg(msg);
30066 goto deref;
30067 case IB_CM_LAP_RCVD:
30068- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30069+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30070 counter[CM_LAP_COUNTER]);
30071 goto unlock;
30072 default:
30073@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
30074 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
30075 if (cur_cm_id_priv) {
30076 spin_unlock_irq(&cm.lock);
30077- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30078+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30079 counter[CM_SIDR_REQ_COUNTER]);
30080 goto out; /* Duplicate message. */
30081 }
30082@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
30083 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
30084 msg->retries = 1;
30085
30086- atomic_long_add(1 + msg->retries,
30087+ atomic_long_add_unchecked(1 + msg->retries,
30088 &port->counter_group[CM_XMIT].counter[attr_index]);
30089 if (msg->retries)
30090- atomic_long_add(msg->retries,
30091+ atomic_long_add_unchecked(msg->retries,
30092 &port->counter_group[CM_XMIT_RETRIES].
30093 counter[attr_index]);
30094
30095@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
30096 }
30097
30098 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
30099- atomic_long_inc(&port->counter_group[CM_RECV].
30100+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
30101 counter[attr_id - CM_ATTR_ID_OFFSET]);
30102
30103 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
30104@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
30105 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
30106
30107 return sprintf(buf, "%ld\n",
30108- atomic_long_read(&group->counter[cm_attr->index]));
30109+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
30110 }
30111
30112 static const struct sysfs_ops cm_counter_ops = {
30113diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
30114index 176c8f9..2627b62 100644
30115--- a/drivers/infiniband/core/fmr_pool.c
30116+++ b/drivers/infiniband/core/fmr_pool.c
30117@@ -98,8 +98,8 @@ struct ib_fmr_pool {
30118
30119 struct task_struct *thread;
30120
30121- atomic_t req_ser;
30122- atomic_t flush_ser;
30123+ atomic_unchecked_t req_ser;
30124+ atomic_unchecked_t flush_ser;
30125
30126 wait_queue_head_t force_wait;
30127 };
30128@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
30129 struct ib_fmr_pool *pool = pool_ptr;
30130
30131 do {
30132- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
30133+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
30134 ib_fmr_batch_release(pool);
30135
30136- atomic_inc(&pool->flush_ser);
30137+ atomic_inc_unchecked(&pool->flush_ser);
30138 wake_up_interruptible(&pool->force_wait);
30139
30140 if (pool->flush_function)
30141@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
30142 }
30143
30144 set_current_state(TASK_INTERRUPTIBLE);
30145- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
30146+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
30147 !kthread_should_stop())
30148 schedule();
30149 __set_current_state(TASK_RUNNING);
30150@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
30151 pool->dirty_watermark = params->dirty_watermark;
30152 pool->dirty_len = 0;
30153 spin_lock_init(&pool->pool_lock);
30154- atomic_set(&pool->req_ser, 0);
30155- atomic_set(&pool->flush_ser, 0);
30156+ atomic_set_unchecked(&pool->req_ser, 0);
30157+ atomic_set_unchecked(&pool->flush_ser, 0);
30158 init_waitqueue_head(&pool->force_wait);
30159
30160 pool->thread = kthread_run(ib_fmr_cleanup_thread,
30161@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
30162 }
30163 spin_unlock_irq(&pool->pool_lock);
30164
30165- serial = atomic_inc_return(&pool->req_ser);
30166+ serial = atomic_inc_return_unchecked(&pool->req_ser);
30167 wake_up_process(pool->thread);
30168
30169 if (wait_event_interruptible(pool->force_wait,
30170- atomic_read(&pool->flush_ser) - serial >= 0))
30171+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
30172 return -EINTR;
30173
30174 return 0;
30175@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
30176 } else {
30177 list_add_tail(&fmr->list, &pool->dirty_list);
30178 if (++pool->dirty_len >= pool->dirty_watermark) {
30179- atomic_inc(&pool->req_ser);
30180+ atomic_inc_unchecked(&pool->req_ser);
30181 wake_up_process(pool->thread);
30182 }
30183 }
30184diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
30185index 40c8353..946b0e4 100644
30186--- a/drivers/infiniband/hw/cxgb4/mem.c
30187+++ b/drivers/infiniband/hw/cxgb4/mem.c
30188@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
30189 int err;
30190 struct fw_ri_tpte tpt;
30191 u32 stag_idx;
30192- static atomic_t key;
30193+ static atomic_unchecked_t key;
30194
30195 if (c4iw_fatal_error(rdev))
30196 return -EIO;
30197@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
30198 &rdev->resource.tpt_fifo_lock);
30199 if (!stag_idx)
30200 return -ENOMEM;
30201- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
30202+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
30203 }
30204 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
30205 __func__, stag_state, type, pdid, stag_idx);
30206diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
30207index 79b3dbc..96e5fcc 100644
30208--- a/drivers/infiniband/hw/ipath/ipath_rc.c
30209+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
30210@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
30211 struct ib_atomic_eth *ateth;
30212 struct ipath_ack_entry *e;
30213 u64 vaddr;
30214- atomic64_t *maddr;
30215+ atomic64_unchecked_t *maddr;
30216 u64 sdata;
30217 u32 rkey;
30218 u8 next;
30219@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
30220 IB_ACCESS_REMOTE_ATOMIC)))
30221 goto nack_acc_unlck;
30222 /* Perform atomic OP and save result. */
30223- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30224+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30225 sdata = be64_to_cpu(ateth->swap_data);
30226 e = &qp->s_ack_queue[qp->r_head_ack_queue];
30227 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
30228- (u64) atomic64_add_return(sdata, maddr) - sdata :
30229+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30230 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30231 be64_to_cpu(ateth->compare_data),
30232 sdata);
30233diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
30234index 1f95bba..9530f87 100644
30235--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
30236+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
30237@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
30238 unsigned long flags;
30239 struct ib_wc wc;
30240 u64 sdata;
30241- atomic64_t *maddr;
30242+ atomic64_unchecked_t *maddr;
30243 enum ib_wc_status send_status;
30244
30245 /*
30246@@ -382,11 +382,11 @@ again:
30247 IB_ACCESS_REMOTE_ATOMIC)))
30248 goto acc_err;
30249 /* Perform atomic OP and save result. */
30250- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30251+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30252 sdata = wqe->wr.wr.atomic.compare_add;
30253 *(u64 *) sqp->s_sge.sge.vaddr =
30254 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
30255- (u64) atomic64_add_return(sdata, maddr) - sdata :
30256+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30257 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30258 sdata, wqe->wr.wr.atomic.swap);
30259 goto send_comp;
30260diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
30261index 5965b3d..16817fb 100644
30262--- a/drivers/infiniband/hw/nes/nes.c
30263+++ b/drivers/infiniband/hw/nes/nes.c
30264@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
30265 LIST_HEAD(nes_adapter_list);
30266 static LIST_HEAD(nes_dev_list);
30267
30268-atomic_t qps_destroyed;
30269+atomic_unchecked_t qps_destroyed;
30270
30271 static unsigned int ee_flsh_adapter;
30272 static unsigned int sysfs_nonidx_addr;
30273@@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
30274 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
30275 struct nes_adapter *nesadapter = nesdev->nesadapter;
30276
30277- atomic_inc(&qps_destroyed);
30278+ atomic_inc_unchecked(&qps_destroyed);
30279
30280 /* Free the control structures */
30281
30282diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
30283index 568b4f1..5ea3eff 100644
30284--- a/drivers/infiniband/hw/nes/nes.h
30285+++ b/drivers/infiniband/hw/nes/nes.h
30286@@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
30287 extern unsigned int wqm_quanta;
30288 extern struct list_head nes_adapter_list;
30289
30290-extern atomic_t cm_connects;
30291-extern atomic_t cm_accepts;
30292-extern atomic_t cm_disconnects;
30293-extern atomic_t cm_closes;
30294-extern atomic_t cm_connecteds;
30295-extern atomic_t cm_connect_reqs;
30296-extern atomic_t cm_rejects;
30297-extern atomic_t mod_qp_timouts;
30298-extern atomic_t qps_created;
30299-extern atomic_t qps_destroyed;
30300-extern atomic_t sw_qps_destroyed;
30301+extern atomic_unchecked_t cm_connects;
30302+extern atomic_unchecked_t cm_accepts;
30303+extern atomic_unchecked_t cm_disconnects;
30304+extern atomic_unchecked_t cm_closes;
30305+extern atomic_unchecked_t cm_connecteds;
30306+extern atomic_unchecked_t cm_connect_reqs;
30307+extern atomic_unchecked_t cm_rejects;
30308+extern atomic_unchecked_t mod_qp_timouts;
30309+extern atomic_unchecked_t qps_created;
30310+extern atomic_unchecked_t qps_destroyed;
30311+extern atomic_unchecked_t sw_qps_destroyed;
30312 extern u32 mh_detected;
30313 extern u32 mh_pauses_sent;
30314 extern u32 cm_packets_sent;
30315@@ -197,16 +197,16 @@ extern u32 cm_packets_created;
30316 extern u32 cm_packets_received;
30317 extern u32 cm_packets_dropped;
30318 extern u32 cm_packets_retrans;
30319-extern atomic_t cm_listens_created;
30320-extern atomic_t cm_listens_destroyed;
30321+extern atomic_unchecked_t cm_listens_created;
30322+extern atomic_unchecked_t cm_listens_destroyed;
30323 extern u32 cm_backlog_drops;
30324-extern atomic_t cm_loopbacks;
30325-extern atomic_t cm_nodes_created;
30326-extern atomic_t cm_nodes_destroyed;
30327-extern atomic_t cm_accel_dropped_pkts;
30328-extern atomic_t cm_resets_recvd;
30329-extern atomic_t pau_qps_created;
30330-extern atomic_t pau_qps_destroyed;
30331+extern atomic_unchecked_t cm_loopbacks;
30332+extern atomic_unchecked_t cm_nodes_created;
30333+extern atomic_unchecked_t cm_nodes_destroyed;
30334+extern atomic_unchecked_t cm_accel_dropped_pkts;
30335+extern atomic_unchecked_t cm_resets_recvd;
30336+extern atomic_unchecked_t pau_qps_created;
30337+extern atomic_unchecked_t pau_qps_destroyed;
30338
30339 extern u32 int_mod_timer_init;
30340 extern u32 int_mod_cq_depth_256;
30341diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
30342index 0a52d72..0642f36 100644
30343--- a/drivers/infiniband/hw/nes/nes_cm.c
30344+++ b/drivers/infiniband/hw/nes/nes_cm.c
30345@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
30346 u32 cm_packets_retrans;
30347 u32 cm_packets_created;
30348 u32 cm_packets_received;
30349-atomic_t cm_listens_created;
30350-atomic_t cm_listens_destroyed;
30351+atomic_unchecked_t cm_listens_created;
30352+atomic_unchecked_t cm_listens_destroyed;
30353 u32 cm_backlog_drops;
30354-atomic_t cm_loopbacks;
30355-atomic_t cm_nodes_created;
30356-atomic_t cm_nodes_destroyed;
30357-atomic_t cm_accel_dropped_pkts;
30358-atomic_t cm_resets_recvd;
30359+atomic_unchecked_t cm_loopbacks;
30360+atomic_unchecked_t cm_nodes_created;
30361+atomic_unchecked_t cm_nodes_destroyed;
30362+atomic_unchecked_t cm_accel_dropped_pkts;
30363+atomic_unchecked_t cm_resets_recvd;
30364
30365 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
30366 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
30367@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
30368
30369 static struct nes_cm_core *g_cm_core;
30370
30371-atomic_t cm_connects;
30372-atomic_t cm_accepts;
30373-atomic_t cm_disconnects;
30374-atomic_t cm_closes;
30375-atomic_t cm_connecteds;
30376-atomic_t cm_connect_reqs;
30377-atomic_t cm_rejects;
30378+atomic_unchecked_t cm_connects;
30379+atomic_unchecked_t cm_accepts;
30380+atomic_unchecked_t cm_disconnects;
30381+atomic_unchecked_t cm_closes;
30382+atomic_unchecked_t cm_connecteds;
30383+atomic_unchecked_t cm_connect_reqs;
30384+atomic_unchecked_t cm_rejects;
30385
30386 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
30387 {
30388@@ -1271,7 +1271,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
30389 kfree(listener);
30390 listener = NULL;
30391 ret = 0;
30392- atomic_inc(&cm_listens_destroyed);
30393+ atomic_inc_unchecked(&cm_listens_destroyed);
30394 } else {
30395 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
30396 }
30397@@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
30398 cm_node->rem_mac);
30399
30400 add_hte_node(cm_core, cm_node);
30401- atomic_inc(&cm_nodes_created);
30402+ atomic_inc_unchecked(&cm_nodes_created);
30403
30404 return cm_node;
30405 }
30406@@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
30407 }
30408
30409 atomic_dec(&cm_core->node_cnt);
30410- atomic_inc(&cm_nodes_destroyed);
30411+ atomic_inc_unchecked(&cm_nodes_destroyed);
30412 nesqp = cm_node->nesqp;
30413 if (nesqp) {
30414 nesqp->cm_node = NULL;
30415@@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
30416
30417 static void drop_packet(struct sk_buff *skb)
30418 {
30419- atomic_inc(&cm_accel_dropped_pkts);
30420+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30421 dev_kfree_skb_any(skb);
30422 }
30423
30424@@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
30425 {
30426
30427 int reset = 0; /* whether to send reset in case of err.. */
30428- atomic_inc(&cm_resets_recvd);
30429+ atomic_inc_unchecked(&cm_resets_recvd);
30430 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30431 " refcnt=%d\n", cm_node, cm_node->state,
30432 atomic_read(&cm_node->ref_count));
30433@@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
30434 rem_ref_cm_node(cm_node->cm_core, cm_node);
30435 return NULL;
30436 }
30437- atomic_inc(&cm_loopbacks);
30438+ atomic_inc_unchecked(&cm_loopbacks);
30439 loopbackremotenode->loopbackpartner = cm_node;
30440 loopbackremotenode->tcp_cntxt.rcv_wscale =
30441 NES_CM_DEFAULT_RCV_WND_SCALE;
30442@@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
30443 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
30444 else {
30445 rem_ref_cm_node(cm_core, cm_node);
30446- atomic_inc(&cm_accel_dropped_pkts);
30447+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30448 dev_kfree_skb_any(skb);
30449 }
30450 break;
30451@@ -2880,7 +2880,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30452
30453 if ((cm_id) && (cm_id->event_handler)) {
30454 if (issue_disconn) {
30455- atomic_inc(&cm_disconnects);
30456+ atomic_inc_unchecked(&cm_disconnects);
30457 cm_event.event = IW_CM_EVENT_DISCONNECT;
30458 cm_event.status = disconn_status;
30459 cm_event.local_addr = cm_id->local_addr;
30460@@ -2902,7 +2902,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30461 }
30462
30463 if (issue_close) {
30464- atomic_inc(&cm_closes);
30465+ atomic_inc_unchecked(&cm_closes);
30466 nes_disconnect(nesqp, 1);
30467
30468 cm_id->provider_data = nesqp;
30469@@ -3038,7 +3038,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30470
30471 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30472 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30473- atomic_inc(&cm_accepts);
30474+ atomic_inc_unchecked(&cm_accepts);
30475
30476 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30477 netdev_refcnt_read(nesvnic->netdev));
30478@@ -3240,7 +3240,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
30479 struct nes_cm_core *cm_core;
30480 u8 *start_buff;
30481
30482- atomic_inc(&cm_rejects);
30483+ atomic_inc_unchecked(&cm_rejects);
30484 cm_node = (struct nes_cm_node *)cm_id->provider_data;
30485 loopback = cm_node->loopbackpartner;
30486 cm_core = cm_node->cm_core;
30487@@ -3300,7 +3300,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30488 ntohl(cm_id->local_addr.sin_addr.s_addr),
30489 ntohs(cm_id->local_addr.sin_port));
30490
30491- atomic_inc(&cm_connects);
30492+ atomic_inc_unchecked(&cm_connects);
30493 nesqp->active_conn = 1;
30494
30495 /* cache the cm_id in the qp */
30496@@ -3406,7 +3406,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
30497 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
30498 return err;
30499 }
30500- atomic_inc(&cm_listens_created);
30501+ atomic_inc_unchecked(&cm_listens_created);
30502 }
30503
30504 cm_id->add_ref(cm_id);
30505@@ -3507,7 +3507,7 @@ static void cm_event_connected(struct nes_cm_event *event)
30506
30507 if (nesqp->destroyed)
30508 return;
30509- atomic_inc(&cm_connecteds);
30510+ atomic_inc_unchecked(&cm_connecteds);
30511 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30512 " local port 0x%04X. jiffies = %lu.\n",
30513 nesqp->hwqp.qp_id,
30514@@ -3694,7 +3694,7 @@ static void cm_event_reset(struct nes_cm_event *event)
30515
30516 cm_id->add_ref(cm_id);
30517 ret = cm_id->event_handler(cm_id, &cm_event);
30518- atomic_inc(&cm_closes);
30519+ atomic_inc_unchecked(&cm_closes);
30520 cm_event.event = IW_CM_EVENT_CLOSE;
30521 cm_event.status = 0;
30522 cm_event.provider_data = cm_id->provider_data;
30523@@ -3730,7 +3730,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
30524 return;
30525 cm_id = cm_node->cm_id;
30526
30527- atomic_inc(&cm_connect_reqs);
30528+ atomic_inc_unchecked(&cm_connect_reqs);
30529 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30530 cm_node, cm_id, jiffies);
30531
30532@@ -3770,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
30533 return;
30534 cm_id = cm_node->cm_id;
30535
30536- atomic_inc(&cm_connect_reqs);
30537+ atomic_inc_unchecked(&cm_connect_reqs);
30538 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30539 cm_node, cm_id, jiffies);
30540
30541diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
30542index b3b2a24..7bfaf1e 100644
30543--- a/drivers/infiniband/hw/nes/nes_mgt.c
30544+++ b/drivers/infiniband/hw/nes/nes_mgt.c
30545@@ -40,8 +40,8 @@
30546 #include "nes.h"
30547 #include "nes_mgt.h"
30548
30549-atomic_t pau_qps_created;
30550-atomic_t pau_qps_destroyed;
30551+atomic_unchecked_t pau_qps_created;
30552+atomic_unchecked_t pau_qps_destroyed;
30553
30554 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
30555 {
30556@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
30557 {
30558 struct sk_buff *skb;
30559 unsigned long flags;
30560- atomic_inc(&pau_qps_destroyed);
30561+ atomic_inc_unchecked(&pau_qps_destroyed);
30562
30563 /* Free packets that have not yet been forwarded */
30564 /* Lock is acquired by skb_dequeue when removing the skb */
30565@@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
30566 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
30567 skb_queue_head_init(&nesqp->pau_list);
30568 spin_lock_init(&nesqp->pau_lock);
30569- atomic_inc(&pau_qps_created);
30570+ atomic_inc_unchecked(&pau_qps_created);
30571 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
30572 }
30573
30574diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
30575index c00d2f3..8834298 100644
30576--- a/drivers/infiniband/hw/nes/nes_nic.c
30577+++ b/drivers/infiniband/hw/nes/nes_nic.c
30578@@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
30579 target_stat_values[++index] = mh_detected;
30580 target_stat_values[++index] = mh_pauses_sent;
30581 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30582- target_stat_values[++index] = atomic_read(&cm_connects);
30583- target_stat_values[++index] = atomic_read(&cm_accepts);
30584- target_stat_values[++index] = atomic_read(&cm_disconnects);
30585- target_stat_values[++index] = atomic_read(&cm_connecteds);
30586- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30587- target_stat_values[++index] = atomic_read(&cm_rejects);
30588- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30589- target_stat_values[++index] = atomic_read(&qps_created);
30590- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30591- target_stat_values[++index] = atomic_read(&qps_destroyed);
30592- target_stat_values[++index] = atomic_read(&cm_closes);
30593+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30594+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30595+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30596+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30597+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30598+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30599+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30600+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30601+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30602+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30603+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30604 target_stat_values[++index] = cm_packets_sent;
30605 target_stat_values[++index] = cm_packets_bounced;
30606 target_stat_values[++index] = cm_packets_created;
30607 target_stat_values[++index] = cm_packets_received;
30608 target_stat_values[++index] = cm_packets_dropped;
30609 target_stat_values[++index] = cm_packets_retrans;
30610- target_stat_values[++index] = atomic_read(&cm_listens_created);
30611- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
30612+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
30613+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
30614 target_stat_values[++index] = cm_backlog_drops;
30615- target_stat_values[++index] = atomic_read(&cm_loopbacks);
30616- target_stat_values[++index] = atomic_read(&cm_nodes_created);
30617- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30618- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30619- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30620+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30621+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30622+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30623+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30624+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30625 target_stat_values[++index] = nesadapter->free_4kpbl;
30626 target_stat_values[++index] = nesadapter->free_256pbl;
30627 target_stat_values[++index] = int_mod_timer_init;
30628 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
30629 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
30630 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
30631- target_stat_values[++index] = atomic_read(&pau_qps_created);
30632- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
30633+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
30634+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
30635 }
30636
30637 /**
30638diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
30639index 5095bc4..41e8fff 100644
30640--- a/drivers/infiniband/hw/nes/nes_verbs.c
30641+++ b/drivers/infiniband/hw/nes/nes_verbs.c
30642@@ -46,9 +46,9 @@
30643
30644 #include <rdma/ib_umem.h>
30645
30646-atomic_t mod_qp_timouts;
30647-atomic_t qps_created;
30648-atomic_t sw_qps_destroyed;
30649+atomic_unchecked_t mod_qp_timouts;
30650+atomic_unchecked_t qps_created;
30651+atomic_unchecked_t sw_qps_destroyed;
30652
30653 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30654
30655@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
30656 if (init_attr->create_flags)
30657 return ERR_PTR(-EINVAL);
30658
30659- atomic_inc(&qps_created);
30660+ atomic_inc_unchecked(&qps_created);
30661 switch (init_attr->qp_type) {
30662 case IB_QPT_RC:
30663 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
30664@@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
30665 struct iw_cm_event cm_event;
30666 int ret = 0;
30667
30668- atomic_inc(&sw_qps_destroyed);
30669+ atomic_inc_unchecked(&sw_qps_destroyed);
30670 nesqp->destroyed = 1;
30671
30672 /* Blow away the connection if it exists. */
30673diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
30674index b881bdc..c2e360c 100644
30675--- a/drivers/infiniband/hw/qib/qib.h
30676+++ b/drivers/infiniband/hw/qib/qib.h
30677@@ -51,6 +51,7 @@
30678 #include <linux/completion.h>
30679 #include <linux/kref.h>
30680 #include <linux/sched.h>
30681+#include <linux/slab.h>
30682
30683 #include "qib_common.h"
30684 #include "qib_verbs.h"
30685diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
30686index c351aa4..e6967c2 100644
30687--- a/drivers/input/gameport/gameport.c
30688+++ b/drivers/input/gameport/gameport.c
30689@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
30690 */
30691 static void gameport_init_port(struct gameport *gameport)
30692 {
30693- static atomic_t gameport_no = ATOMIC_INIT(0);
30694+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30695
30696 __module_get(THIS_MODULE);
30697
30698 mutex_init(&gameport->drv_mutex);
30699 device_initialize(&gameport->dev);
30700 dev_set_name(&gameport->dev, "gameport%lu",
30701- (unsigned long)atomic_inc_return(&gameport_no) - 1);
30702+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30703 gameport->dev.bus = &gameport_bus;
30704 gameport->dev.release = gameport_release_port;
30705 if (gameport->parent)
30706diff --git a/drivers/input/input.c b/drivers/input/input.c
30707index da38d97..2aa0b79 100644
30708--- a/drivers/input/input.c
30709+++ b/drivers/input/input.c
30710@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
30711 */
30712 int input_register_device(struct input_dev *dev)
30713 {
30714- static atomic_t input_no = ATOMIC_INIT(0);
30715+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30716 struct input_handler *handler;
30717 const char *path;
30718 int error;
30719@@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
30720 dev->setkeycode = input_default_setkeycode;
30721
30722 dev_set_name(&dev->dev, "input%ld",
30723- (unsigned long) atomic_inc_return(&input_no) - 1);
30724+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
30725
30726 error = device_add(&dev->dev);
30727 if (error)
30728diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
30729index b8d8611..7a4a04b 100644
30730--- a/drivers/input/joystick/sidewinder.c
30731+++ b/drivers/input/joystick/sidewinder.c
30732@@ -30,6 +30,7 @@
30733 #include <linux/kernel.h>
30734 #include <linux/module.h>
30735 #include <linux/slab.h>
30736+#include <linux/sched.h>
30737 #include <linux/init.h>
30738 #include <linux/input.h>
30739 #include <linux/gameport.h>
30740diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
30741index d728875..844c89b 100644
30742--- a/drivers/input/joystick/xpad.c
30743+++ b/drivers/input/joystick/xpad.c
30744@@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
30745
30746 static int xpad_led_probe(struct usb_xpad *xpad)
30747 {
30748- static atomic_t led_seq = ATOMIC_INIT(0);
30749+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
30750 long led_no;
30751 struct xpad_led *led;
30752 struct led_classdev *led_cdev;
30753@@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
30754 if (!led)
30755 return -ENOMEM;
30756
30757- led_no = (long)atomic_inc_return(&led_seq) - 1;
30758+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
30759
30760 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
30761 led->xpad = xpad;
30762diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
30763index 0110b5a..d3ad144 100644
30764--- a/drivers/input/mousedev.c
30765+++ b/drivers/input/mousedev.c
30766@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
30767
30768 spin_unlock_irq(&client->packet_lock);
30769
30770- if (copy_to_user(buffer, data, count))
30771+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
30772 return -EFAULT;
30773
30774 return count;
30775diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
30776index ba70058..571d25d 100644
30777--- a/drivers/input/serio/serio.c
30778+++ b/drivers/input/serio/serio.c
30779@@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
30780 */
30781 static void serio_init_port(struct serio *serio)
30782 {
30783- static atomic_t serio_no = ATOMIC_INIT(0);
30784+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
30785
30786 __module_get(THIS_MODULE);
30787
30788@@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
30789 mutex_init(&serio->drv_mutex);
30790 device_initialize(&serio->dev);
30791 dev_set_name(&serio->dev, "serio%ld",
30792- (long)atomic_inc_return(&serio_no) - 1);
30793+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
30794 serio->dev.bus = &serio_bus;
30795 serio->dev.release = serio_release_port;
30796 serio->dev.groups = serio_device_attr_groups;
30797diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
30798index e44933d..9ba484a 100644
30799--- a/drivers/isdn/capi/capi.c
30800+++ b/drivers/isdn/capi/capi.c
30801@@ -83,8 +83,8 @@ struct capiminor {
30802
30803 struct capi20_appl *ap;
30804 u32 ncci;
30805- atomic_t datahandle;
30806- atomic_t msgid;
30807+ atomic_unchecked_t datahandle;
30808+ atomic_unchecked_t msgid;
30809
30810 struct tty_port port;
30811 int ttyinstop;
30812@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
30813 capimsg_setu16(s, 2, mp->ap->applid);
30814 capimsg_setu8 (s, 4, CAPI_DATA_B3);
30815 capimsg_setu8 (s, 5, CAPI_RESP);
30816- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
30817+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
30818 capimsg_setu32(s, 8, mp->ncci);
30819 capimsg_setu16(s, 12, datahandle);
30820 }
30821@@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
30822 mp->outbytes -= len;
30823 spin_unlock_bh(&mp->outlock);
30824
30825- datahandle = atomic_inc_return(&mp->datahandle);
30826+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
30827 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
30828 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30829 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30830 capimsg_setu16(skb->data, 2, mp->ap->applid);
30831 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
30832 capimsg_setu8 (skb->data, 5, CAPI_REQ);
30833- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
30834+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
30835 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
30836 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
30837 capimsg_setu16(skb->data, 16, len); /* Data length */
30838diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
30839index db621db..825ea1a 100644
30840--- a/drivers/isdn/gigaset/common.c
30841+++ b/drivers/isdn/gigaset/common.c
30842@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
30843 cs->commands_pending = 0;
30844 cs->cur_at_seq = 0;
30845 cs->gotfwver = -1;
30846- cs->open_count = 0;
30847+ local_set(&cs->open_count, 0);
30848 cs->dev = NULL;
30849 cs->tty = NULL;
30850 cs->tty_dev = NULL;
30851diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
30852index 212efaf..f187c6b 100644
30853--- a/drivers/isdn/gigaset/gigaset.h
30854+++ b/drivers/isdn/gigaset/gigaset.h
30855@@ -35,6 +35,7 @@
30856 #include <linux/tty_driver.h>
30857 #include <linux/list.h>
30858 #include <linux/atomic.h>
30859+#include <asm/local.h>
30860
30861 #define GIG_VERSION {0, 5, 0, 0}
30862 #define GIG_COMPAT {0, 4, 0, 0}
30863@@ -433,7 +434,7 @@ struct cardstate {
30864 spinlock_t cmdlock;
30865 unsigned curlen, cmdbytes;
30866
30867- unsigned open_count;
30868+ local_t open_count;
30869 struct tty_struct *tty;
30870 struct tasklet_struct if_wake_tasklet;
30871 unsigned control_state;
30872diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
30873index ee0a549..a7c9798 100644
30874--- a/drivers/isdn/gigaset/interface.c
30875+++ b/drivers/isdn/gigaset/interface.c
30876@@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
30877 }
30878 tty->driver_data = cs;
30879
30880- ++cs->open_count;
30881-
30882- if (cs->open_count == 1) {
30883+ if (local_inc_return(&cs->open_count) == 1) {
30884 spin_lock_irqsave(&cs->lock, flags);
30885 cs->tty = tty;
30886 spin_unlock_irqrestore(&cs->lock, flags);
30887@@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
30888
30889 if (!cs->connected)
30890 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30891- else if (!cs->open_count)
30892+ else if (!local_read(&cs->open_count))
30893 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30894 else {
30895- if (!--cs->open_count) {
30896+ if (!local_dec_return(&cs->open_count)) {
30897 spin_lock_irqsave(&cs->lock, flags);
30898 cs->tty = NULL;
30899 spin_unlock_irqrestore(&cs->lock, flags);
30900@@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
30901 if (!cs->connected) {
30902 gig_dbg(DEBUG_IF, "not connected");
30903 retval = -ENODEV;
30904- } else if (!cs->open_count)
30905+ } else if (!local_read(&cs->open_count))
30906 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30907 else {
30908 retval = 0;
30909@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
30910 retval = -ENODEV;
30911 goto done;
30912 }
30913- if (!cs->open_count) {
30914+ if (!local_read(&cs->open_count)) {
30915 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30916 retval = -ENODEV;
30917 goto done;
30918@@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
30919 if (!cs->connected) {
30920 gig_dbg(DEBUG_IF, "not connected");
30921 retval = -ENODEV;
30922- } else if (!cs->open_count)
30923+ } else if (!local_read(&cs->open_count))
30924 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30925 else if (cs->mstate != MS_LOCKED) {
30926 dev_warn(cs->dev, "can't write to unlocked device\n");
30927@@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
30928
30929 if (!cs->connected)
30930 gig_dbg(DEBUG_IF, "not connected");
30931- else if (!cs->open_count)
30932+ else if (!local_read(&cs->open_count))
30933 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30934 else if (cs->mstate != MS_LOCKED)
30935 dev_warn(cs->dev, "can't write to unlocked device\n");
30936@@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
30937
30938 if (!cs->connected)
30939 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30940- else if (!cs->open_count)
30941+ else if (!local_read(&cs->open_count))
30942 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30943 else
30944 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30945@@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
30946
30947 if (!cs->connected)
30948 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30949- else if (!cs->open_count)
30950+ else if (!local_read(&cs->open_count))
30951 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30952 else
30953 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30954@@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
30955 goto out;
30956 }
30957
30958- if (!cs->open_count) {
30959+ if (!local_read(&cs->open_count)) {
30960 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30961 goto out;
30962 }
30963diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
30964index 2a57da59..e7a12ed 100644
30965--- a/drivers/isdn/hardware/avm/b1.c
30966+++ b/drivers/isdn/hardware/avm/b1.c
30967@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
30968 }
30969 if (left) {
30970 if (t4file->user) {
30971- if (copy_from_user(buf, dp, left))
30972+ if (left > sizeof buf || copy_from_user(buf, dp, left))
30973 return -EFAULT;
30974 } else {
30975 memcpy(buf, dp, left);
30976@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
30977 }
30978 if (left) {
30979 if (config->user) {
30980- if (copy_from_user(buf, dp, left))
30981+ if (left > sizeof buf || copy_from_user(buf, dp, left))
30982 return -EFAULT;
30983 } else {
30984 memcpy(buf, dp, left);
30985diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
30986index 85784a7..a19ca98 100644
30987--- a/drivers/isdn/hardware/eicon/divasync.h
30988+++ b/drivers/isdn/hardware/eicon/divasync.h
30989@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
30990 } diva_didd_add_adapter_t;
30991 typedef struct _diva_didd_remove_adapter {
30992 IDI_CALL p_request;
30993-} diva_didd_remove_adapter_t;
30994+} __no_const diva_didd_remove_adapter_t;
30995 typedef struct _diva_didd_read_adapter_array {
30996 void * buffer;
30997 dword length;
30998diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
30999index a3bd163..8956575 100644
31000--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
31001+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
31002@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
31003 typedef struct _diva_os_idi_adapter_interface {
31004 diva_init_card_proc_t cleanup_adapter_proc;
31005 diva_cmd_card_proc_t cmd_proc;
31006-} diva_os_idi_adapter_interface_t;
31007+} __no_const diva_os_idi_adapter_interface_t;
31008
31009 typedef struct _diva_os_xdi_adapter {
31010 struct list_head link;
31011diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
31012index 2339d73..802ab87 100644
31013--- a/drivers/isdn/i4l/isdn_net.c
31014+++ b/drivers/isdn/i4l/isdn_net.c
31015@@ -1901,7 +1901,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
31016 {
31017 isdn_net_local *lp = netdev_priv(dev);
31018 unsigned char *p;
31019- ushort len = 0;
31020+ int len = 0;
31021
31022 switch (lp->p_encap) {
31023 case ISDN_NET_ENCAP_ETHER:
31024diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
31025index 1f355bb..43f1fea 100644
31026--- a/drivers/isdn/icn/icn.c
31027+++ b/drivers/isdn/icn/icn.c
31028@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
31029 if (count > len)
31030 count = len;
31031 if (user) {
31032- if (copy_from_user(msg, buf, count))
31033+ if (count > sizeof msg || copy_from_user(msg, buf, count))
31034 return -EFAULT;
31035 } else
31036 memcpy(msg, buf, count);
31037diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
31038index b5fdcb7..5b6c59f 100644
31039--- a/drivers/lguest/core.c
31040+++ b/drivers/lguest/core.c
31041@@ -92,9 +92,17 @@ static __init int map_switcher(void)
31042 * it's worked so far. The end address needs +1 because __get_vm_area
31043 * allocates an extra guard page, so we need space for that.
31044 */
31045+
31046+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31047+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31048+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
31049+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31050+#else
31051 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31052 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
31053 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31054+#endif
31055+
31056 if (!switcher_vma) {
31057 err = -ENOMEM;
31058 printk("lguest: could not map switcher pages high\n");
31059@@ -119,7 +127,7 @@ static __init int map_switcher(void)
31060 * Now the Switcher is mapped at the right address, we can't fail!
31061 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
31062 */
31063- memcpy(switcher_vma->addr, start_switcher_text,
31064+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
31065 end_switcher_text - start_switcher_text);
31066
31067 printk(KERN_INFO "lguest: mapped switcher at %p\n",
31068diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
31069index 65af42f..530c87a 100644
31070--- a/drivers/lguest/x86/core.c
31071+++ b/drivers/lguest/x86/core.c
31072@@ -59,7 +59,7 @@ static struct {
31073 /* Offset from where switcher.S was compiled to where we've copied it */
31074 static unsigned long switcher_offset(void)
31075 {
31076- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
31077+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
31078 }
31079
31080 /* This cpu's struct lguest_pages. */
31081@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
31082 * These copies are pretty cheap, so we do them unconditionally: */
31083 /* Save the current Host top-level page directory.
31084 */
31085+
31086+#ifdef CONFIG_PAX_PER_CPU_PGD
31087+ pages->state.host_cr3 = read_cr3();
31088+#else
31089 pages->state.host_cr3 = __pa(current->mm->pgd);
31090+#endif
31091+
31092 /*
31093 * Set up the Guest's page tables to see this CPU's pages (and no
31094 * other CPU's pages).
31095@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
31096 * compiled-in switcher code and the high-mapped copy we just made.
31097 */
31098 for (i = 0; i < IDT_ENTRIES; i++)
31099- default_idt_entries[i] += switcher_offset();
31100+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
31101
31102 /*
31103 * Set up the Switcher's per-cpu areas.
31104@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
31105 * it will be undisturbed when we switch. To change %cs and jump we
31106 * need this structure to feed to Intel's "lcall" instruction.
31107 */
31108- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
31109+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
31110 lguest_entry.segment = LGUEST_CS;
31111
31112 /*
31113diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
31114index 40634b0..4f5855e 100644
31115--- a/drivers/lguest/x86/switcher_32.S
31116+++ b/drivers/lguest/x86/switcher_32.S
31117@@ -87,6 +87,7 @@
31118 #include <asm/page.h>
31119 #include <asm/segment.h>
31120 #include <asm/lguest.h>
31121+#include <asm/processor-flags.h>
31122
31123 // We mark the start of the code to copy
31124 // It's placed in .text tho it's never run here
31125@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
31126 // Changes type when we load it: damn Intel!
31127 // For after we switch over our page tables
31128 // That entry will be read-only: we'd crash.
31129+
31130+#ifdef CONFIG_PAX_KERNEXEC
31131+ mov %cr0, %edx
31132+ xor $X86_CR0_WP, %edx
31133+ mov %edx, %cr0
31134+#endif
31135+
31136 movl $(GDT_ENTRY_TSS*8), %edx
31137 ltr %dx
31138
31139@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
31140 // Let's clear it again for our return.
31141 // The GDT descriptor of the Host
31142 // Points to the table after two "size" bytes
31143- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
31144+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
31145 // Clear "used" from type field (byte 5, bit 2)
31146- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
31147+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
31148+
31149+#ifdef CONFIG_PAX_KERNEXEC
31150+ mov %cr0, %eax
31151+ xor $X86_CR0_WP, %eax
31152+ mov %eax, %cr0
31153+#endif
31154
31155 // Once our page table's switched, the Guest is live!
31156 // The Host fades as we run this final step.
31157@@ -295,13 +309,12 @@ deliver_to_host:
31158 // I consulted gcc, and it gave
31159 // These instructions, which I gladly credit:
31160 leal (%edx,%ebx,8), %eax
31161- movzwl (%eax),%edx
31162- movl 4(%eax), %eax
31163- xorw %ax, %ax
31164- orl %eax, %edx
31165+ movl 4(%eax), %edx
31166+ movw (%eax), %dx
31167 // Now the address of the handler's in %edx
31168 // We call it now: its "iret" drops us home.
31169- jmp *%edx
31170+ ljmp $__KERNEL_CS, $1f
31171+1: jmp *%edx
31172
31173 // Every interrupt can come to us here
31174 // But we must truly tell each apart.
31175diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
31176index 4daf9e5..b8d1d0f 100644
31177--- a/drivers/macintosh/macio_asic.c
31178+++ b/drivers/macintosh/macio_asic.c
31179@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
31180 * MacIO is matched against any Apple ID, it's probe() function
31181 * will then decide wether it applies or not
31182 */
31183-static const struct pci_device_id __devinitdata pci_ids [] = { {
31184+static const struct pci_device_id __devinitconst pci_ids [] = { {
31185 .vendor = PCI_VENDOR_ID_APPLE,
31186 .device = PCI_ANY_ID,
31187 .subvendor = PCI_ANY_ID,
31188diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
31189index 31c2dc2..a2de7a6 100644
31190--- a/drivers/md/dm-ioctl.c
31191+++ b/drivers/md/dm-ioctl.c
31192@@ -1589,7 +1589,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
31193 cmd == DM_LIST_VERSIONS_CMD)
31194 return 0;
31195
31196- if ((cmd == DM_DEV_CREATE_CMD)) {
31197+ if (cmd == DM_DEV_CREATE_CMD) {
31198 if (!*param->name) {
31199 DMWARN("name not supplied when creating device");
31200 return -EINVAL;
31201diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
31202index 9bfd057..01180bc 100644
31203--- a/drivers/md/dm-raid1.c
31204+++ b/drivers/md/dm-raid1.c
31205@@ -40,7 +40,7 @@ enum dm_raid1_error {
31206
31207 struct mirror {
31208 struct mirror_set *ms;
31209- atomic_t error_count;
31210+ atomic_unchecked_t error_count;
31211 unsigned long error_type;
31212 struct dm_dev *dev;
31213 sector_t offset;
31214@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
31215 struct mirror *m;
31216
31217 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
31218- if (!atomic_read(&m->error_count))
31219+ if (!atomic_read_unchecked(&m->error_count))
31220 return m;
31221
31222 return NULL;
31223@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
31224 * simple way to tell if a device has encountered
31225 * errors.
31226 */
31227- atomic_inc(&m->error_count);
31228+ atomic_inc_unchecked(&m->error_count);
31229
31230 if (test_and_set_bit(error_type, &m->error_type))
31231 return;
31232@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
31233 struct mirror *m = get_default_mirror(ms);
31234
31235 do {
31236- if (likely(!atomic_read(&m->error_count)))
31237+ if (likely(!atomic_read_unchecked(&m->error_count)))
31238 return m;
31239
31240 if (m-- == ms->mirror)
31241@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
31242 {
31243 struct mirror *default_mirror = get_default_mirror(m->ms);
31244
31245- return !atomic_read(&default_mirror->error_count);
31246+ return !atomic_read_unchecked(&default_mirror->error_count);
31247 }
31248
31249 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31250@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
31251 */
31252 if (likely(region_in_sync(ms, region, 1)))
31253 m = choose_mirror(ms, bio->bi_sector);
31254- else if (m && atomic_read(&m->error_count))
31255+ else if (m && atomic_read_unchecked(&m->error_count))
31256 m = NULL;
31257
31258 if (likely(m))
31259@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
31260 }
31261
31262 ms->mirror[mirror].ms = ms;
31263- atomic_set(&(ms->mirror[mirror].error_count), 0);
31264+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31265 ms->mirror[mirror].error_type = 0;
31266 ms->mirror[mirror].offset = offset;
31267
31268@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
31269 */
31270 static char device_status_char(struct mirror *m)
31271 {
31272- if (!atomic_read(&(m->error_count)))
31273+ if (!atomic_read_unchecked(&(m->error_count)))
31274 return 'A';
31275
31276 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
31277diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
31278index 3d80cf0..b77cc47 100644
31279--- a/drivers/md/dm-stripe.c
31280+++ b/drivers/md/dm-stripe.c
31281@@ -20,7 +20,7 @@ struct stripe {
31282 struct dm_dev *dev;
31283 sector_t physical_start;
31284
31285- atomic_t error_count;
31286+ atomic_unchecked_t error_count;
31287 };
31288
31289 struct stripe_c {
31290@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
31291 kfree(sc);
31292 return r;
31293 }
31294- atomic_set(&(sc->stripe[i].error_count), 0);
31295+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31296 }
31297
31298 ti->private = sc;
31299@@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti,
31300 DMEMIT("%d ", sc->stripes);
31301 for (i = 0; i < sc->stripes; i++) {
31302 DMEMIT("%s ", sc->stripe[i].dev->name);
31303- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31304+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31305 'D' : 'A';
31306 }
31307 buffer[i] = '\0';
31308@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
31309 */
31310 for (i = 0; i < sc->stripes; i++)
31311 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31312- atomic_inc(&(sc->stripe[i].error_count));
31313- if (atomic_read(&(sc->stripe[i].error_count)) <
31314+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
31315+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31316 DM_IO_ERROR_THRESHOLD)
31317 schedule_work(&sc->trigger_event);
31318 }
31319diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
31320index 8e91321..fd17aef 100644
31321--- a/drivers/md/dm-table.c
31322+++ b/drivers/md/dm-table.c
31323@@ -391,7 +391,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
31324 if (!dev_size)
31325 return 0;
31326
31327- if ((start >= dev_size) || (start + len > dev_size)) {
31328+ if ((start >= dev_size) || (len > dev_size - start)) {
31329 DMWARN("%s: %s too small for target: "
31330 "start=%llu, len=%llu, dev_size=%llu",
31331 dm_device_name(ti->table->md), bdevname(bdev, b),
31332diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
31333index 59c4f04..4c7b661 100644
31334--- a/drivers/md/dm-thin-metadata.c
31335+++ b/drivers/md/dm-thin-metadata.c
31336@@ -431,7 +431,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31337
31338 pmd->info.tm = tm;
31339 pmd->info.levels = 2;
31340- pmd->info.value_type.context = pmd->data_sm;
31341+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31342 pmd->info.value_type.size = sizeof(__le64);
31343 pmd->info.value_type.inc = data_block_inc;
31344 pmd->info.value_type.dec = data_block_dec;
31345@@ -450,7 +450,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31346
31347 pmd->bl_info.tm = tm;
31348 pmd->bl_info.levels = 1;
31349- pmd->bl_info.value_type.context = pmd->data_sm;
31350+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31351 pmd->bl_info.value_type.size = sizeof(__le64);
31352 pmd->bl_info.value_type.inc = data_block_inc;
31353 pmd->bl_info.value_type.dec = data_block_dec;
31354diff --git a/drivers/md/dm.c b/drivers/md/dm.c
31355index 4720f68..78d1df7 100644
31356--- a/drivers/md/dm.c
31357+++ b/drivers/md/dm.c
31358@@ -177,9 +177,9 @@ struct mapped_device {
31359 /*
31360 * Event handling.
31361 */
31362- atomic_t event_nr;
31363+ atomic_unchecked_t event_nr;
31364 wait_queue_head_t eventq;
31365- atomic_t uevent_seq;
31366+ atomic_unchecked_t uevent_seq;
31367 struct list_head uevent_list;
31368 spinlock_t uevent_lock; /* Protect access to uevent_list */
31369
31370@@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
31371 rwlock_init(&md->map_lock);
31372 atomic_set(&md->holders, 1);
31373 atomic_set(&md->open_count, 0);
31374- atomic_set(&md->event_nr, 0);
31375- atomic_set(&md->uevent_seq, 0);
31376+ atomic_set_unchecked(&md->event_nr, 0);
31377+ atomic_set_unchecked(&md->uevent_seq, 0);
31378 INIT_LIST_HEAD(&md->uevent_list);
31379 spin_lock_init(&md->uevent_lock);
31380
31381@@ -1980,7 +1980,7 @@ static void event_callback(void *context)
31382
31383 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31384
31385- atomic_inc(&md->event_nr);
31386+ atomic_inc_unchecked(&md->event_nr);
31387 wake_up(&md->eventq);
31388 }
31389
31390@@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
31391
31392 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31393 {
31394- return atomic_add_return(1, &md->uevent_seq);
31395+ return atomic_add_return_unchecked(1, &md->uevent_seq);
31396 }
31397
31398 uint32_t dm_get_event_nr(struct mapped_device *md)
31399 {
31400- return atomic_read(&md->event_nr);
31401+ return atomic_read_unchecked(&md->event_nr);
31402 }
31403
31404 int dm_wait_event(struct mapped_device *md, int event_nr)
31405 {
31406 return wait_event_interruptible(md->eventq,
31407- (event_nr != atomic_read(&md->event_nr)));
31408+ (event_nr != atomic_read_unchecked(&md->event_nr)));
31409 }
31410
31411 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31412diff --git a/drivers/md/md.c b/drivers/md/md.c
31413index f47f1f8..b7f559e 100644
31414--- a/drivers/md/md.c
31415+++ b/drivers/md/md.c
31416@@ -278,10 +278,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
31417 * start build, activate spare
31418 */
31419 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31420-static atomic_t md_event_count;
31421+static atomic_unchecked_t md_event_count;
31422 void md_new_event(struct mddev *mddev)
31423 {
31424- atomic_inc(&md_event_count);
31425+ atomic_inc_unchecked(&md_event_count);
31426 wake_up(&md_event_waiters);
31427 }
31428 EXPORT_SYMBOL_GPL(md_new_event);
31429@@ -291,7 +291,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31430 */
31431 static void md_new_event_inintr(struct mddev *mddev)
31432 {
31433- atomic_inc(&md_event_count);
31434+ atomic_inc_unchecked(&md_event_count);
31435 wake_up(&md_event_waiters);
31436 }
31437
31438@@ -1525,7 +1525,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
31439
31440 rdev->preferred_minor = 0xffff;
31441 rdev->data_offset = le64_to_cpu(sb->data_offset);
31442- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31443+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31444
31445 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31446 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31447@@ -1742,7 +1742,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
31448 else
31449 sb->resync_offset = cpu_to_le64(0);
31450
31451- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31452+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31453
31454 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31455 sb->size = cpu_to_le64(mddev->dev_sectors);
31456@@ -2639,7 +2639,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
31457 static ssize_t
31458 errors_show(struct md_rdev *rdev, char *page)
31459 {
31460- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31461+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31462 }
31463
31464 static ssize_t
31465@@ -2648,7 +2648,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
31466 char *e;
31467 unsigned long n = simple_strtoul(buf, &e, 10);
31468 if (*buf && (*e == 0 || *e == '\n')) {
31469- atomic_set(&rdev->corrected_errors, n);
31470+ atomic_set_unchecked(&rdev->corrected_errors, n);
31471 return len;
31472 }
31473 return -EINVAL;
31474@@ -3039,8 +3039,8 @@ int md_rdev_init(struct md_rdev *rdev)
31475 rdev->sb_loaded = 0;
31476 rdev->bb_page = NULL;
31477 atomic_set(&rdev->nr_pending, 0);
31478- atomic_set(&rdev->read_errors, 0);
31479- atomic_set(&rdev->corrected_errors, 0);
31480+ atomic_set_unchecked(&rdev->read_errors, 0);
31481+ atomic_set_unchecked(&rdev->corrected_errors, 0);
31482
31483 INIT_LIST_HEAD(&rdev->same_set);
31484 init_waitqueue_head(&rdev->blocked_wait);
31485@@ -6683,7 +6683,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31486
31487 spin_unlock(&pers_lock);
31488 seq_printf(seq, "\n");
31489- seq->poll_event = atomic_read(&md_event_count);
31490+ seq->poll_event = atomic_read_unchecked(&md_event_count);
31491 return 0;
31492 }
31493 if (v == (void*)2) {
31494@@ -6772,7 +6772,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31495 chunk_kb ? "KB" : "B");
31496 if (bitmap->file) {
31497 seq_printf(seq, ", file: ");
31498- seq_path(seq, &bitmap->file->f_path, " \t\n");
31499+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31500 }
31501
31502 seq_printf(seq, "\n");
31503@@ -6803,7 +6803,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
31504 return error;
31505
31506 seq = file->private_data;
31507- seq->poll_event = atomic_read(&md_event_count);
31508+ seq->poll_event = atomic_read_unchecked(&md_event_count);
31509 return error;
31510 }
31511
31512@@ -6817,7 +6817,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
31513 /* always allow read */
31514 mask = POLLIN | POLLRDNORM;
31515
31516- if (seq->poll_event != atomic_read(&md_event_count))
31517+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
31518 mask |= POLLERR | POLLPRI;
31519 return mask;
31520 }
31521@@ -6861,7 +6861,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
31522 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31523 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31524 (int)part_stat_read(&disk->part0, sectors[1]) -
31525- atomic_read(&disk->sync_io);
31526+ atomic_read_unchecked(&disk->sync_io);
31527 /* sync IO will cause sync_io to increase before the disk_stats
31528 * as sync_io is counted when a request starts, and
31529 * disk_stats is counted when it completes.
31530diff --git a/drivers/md/md.h b/drivers/md/md.h
31531index cf742d9..7c7c745 100644
31532--- a/drivers/md/md.h
31533+++ b/drivers/md/md.h
31534@@ -120,13 +120,13 @@ struct md_rdev {
31535 * only maintained for arrays that
31536 * support hot removal
31537 */
31538- atomic_t read_errors; /* number of consecutive read errors that
31539+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
31540 * we have tried to ignore.
31541 */
31542 struct timespec last_read_error; /* monotonic time since our
31543 * last read error
31544 */
31545- atomic_t corrected_errors; /* number of corrected read errors,
31546+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31547 * for reporting to userspace and storing
31548 * in superblock.
31549 */
31550@@ -410,7 +410,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
31551
31552 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31553 {
31554- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31555+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31556 }
31557
31558 struct md_personality
31559diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
31560index 50ed53b..4f29d7d 100644
31561--- a/drivers/md/persistent-data/dm-space-map-checker.c
31562+++ b/drivers/md/persistent-data/dm-space-map-checker.c
31563@@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
31564 /*----------------------------------------------------------------*/
31565
31566 struct sm_checker {
31567- struct dm_space_map sm;
31568+ dm_space_map_no_const sm;
31569
31570 struct count_array old_counts;
31571 struct count_array counts;
31572diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
31573index fc469ba..2d91555 100644
31574--- a/drivers/md/persistent-data/dm-space-map-disk.c
31575+++ b/drivers/md/persistent-data/dm-space-map-disk.c
31576@@ -23,7 +23,7 @@
31577 * Space map interface.
31578 */
31579 struct sm_disk {
31580- struct dm_space_map sm;
31581+ dm_space_map_no_const sm;
31582
31583 struct ll_disk ll;
31584 struct ll_disk old_ll;
31585diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
31586index e89ae5e..062e4c2 100644
31587--- a/drivers/md/persistent-data/dm-space-map-metadata.c
31588+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
31589@@ -43,7 +43,7 @@ struct block_op {
31590 };
31591
31592 struct sm_metadata {
31593- struct dm_space_map sm;
31594+ dm_space_map_no_const sm;
31595
31596 struct ll_disk ll;
31597 struct ll_disk old_ll;
31598diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
31599index 1cbfc6b..56e1dbb 100644
31600--- a/drivers/md/persistent-data/dm-space-map.h
31601+++ b/drivers/md/persistent-data/dm-space-map.h
31602@@ -60,6 +60,7 @@ struct dm_space_map {
31603 int (*root_size)(struct dm_space_map *sm, size_t *result);
31604 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
31605 };
31606+typedef struct dm_space_map __no_const dm_space_map_no_const;
31607
31608 /*----------------------------------------------------------------*/
31609
31610diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
31611index 7d9e071..015b1d5 100644
31612--- a/drivers/md/raid1.c
31613+++ b/drivers/md/raid1.c
31614@@ -1568,7 +1568,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
31615 if (r1_sync_page_io(rdev, sect, s,
31616 bio->bi_io_vec[idx].bv_page,
31617 READ) != 0)
31618- atomic_add(s, &rdev->corrected_errors);
31619+ atomic_add_unchecked(s, &rdev->corrected_errors);
31620 }
31621 sectors -= s;
31622 sect += s;
31623@@ -1781,7 +1781,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
31624 test_bit(In_sync, &rdev->flags)) {
31625 if (r1_sync_page_io(rdev, sect, s,
31626 conf->tmppage, READ)) {
31627- atomic_add(s, &rdev->corrected_errors);
31628+ atomic_add_unchecked(s, &rdev->corrected_errors);
31629 printk(KERN_INFO
31630 "md/raid1:%s: read error corrected "
31631 "(%d sectors at %llu on %s)\n",
31632diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
31633index 685ddf3..955b087 100644
31634--- a/drivers/md/raid10.c
31635+++ b/drivers/md/raid10.c
31636@@ -1440,7 +1440,7 @@ static void end_sync_read(struct bio *bio, int error)
31637 /* The write handler will notice the lack of
31638 * R10BIO_Uptodate and record any errors etc
31639 */
31640- atomic_add(r10_bio->sectors,
31641+ atomic_add_unchecked(r10_bio->sectors,
31642 &conf->mirrors[d].rdev->corrected_errors);
31643
31644 /* for reconstruct, we always reschedule after a read.
31645@@ -1740,7 +1740,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31646 {
31647 struct timespec cur_time_mon;
31648 unsigned long hours_since_last;
31649- unsigned int read_errors = atomic_read(&rdev->read_errors);
31650+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
31651
31652 ktime_get_ts(&cur_time_mon);
31653
31654@@ -1762,9 +1762,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31655 * overflowing the shift of read_errors by hours_since_last.
31656 */
31657 if (hours_since_last >= 8 * sizeof(read_errors))
31658- atomic_set(&rdev->read_errors, 0);
31659+ atomic_set_unchecked(&rdev->read_errors, 0);
31660 else
31661- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
31662+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
31663 }
31664
31665 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
31666@@ -1814,8 +1814,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31667 return;
31668
31669 check_decay_read_errors(mddev, rdev);
31670- atomic_inc(&rdev->read_errors);
31671- if (atomic_read(&rdev->read_errors) > max_read_errors) {
31672+ atomic_inc_unchecked(&rdev->read_errors);
31673+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
31674 char b[BDEVNAME_SIZE];
31675 bdevname(rdev->bdev, b);
31676
31677@@ -1823,7 +1823,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31678 "md/raid10:%s: %s: Raid device exceeded "
31679 "read_error threshold [cur %d:max %d]\n",
31680 mdname(mddev), b,
31681- atomic_read(&rdev->read_errors), max_read_errors);
31682+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
31683 printk(KERN_NOTICE
31684 "md/raid10:%s: %s: Failing raid device\n",
31685 mdname(mddev), b);
31686@@ -1968,7 +1968,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31687 (unsigned long long)(
31688 sect + rdev->data_offset),
31689 bdevname(rdev->bdev, b));
31690- atomic_add(s, &rdev->corrected_errors);
31691+ atomic_add_unchecked(s, &rdev->corrected_errors);
31692 }
31693
31694 rdev_dec_pending(rdev, mddev);
31695diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
31696index 858fdbb..b2dac95 100644
31697--- a/drivers/md/raid5.c
31698+++ b/drivers/md/raid5.c
31699@@ -1610,19 +1610,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
31700 (unsigned long long)(sh->sector
31701 + rdev->data_offset),
31702 bdevname(rdev->bdev, b));
31703- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
31704+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
31705 clear_bit(R5_ReadError, &sh->dev[i].flags);
31706 clear_bit(R5_ReWrite, &sh->dev[i].flags);
31707 }
31708- if (atomic_read(&conf->disks[i].rdev->read_errors))
31709- atomic_set(&conf->disks[i].rdev->read_errors, 0);
31710+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
31711+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
31712 } else {
31713 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
31714 int retry = 0;
31715 rdev = conf->disks[i].rdev;
31716
31717 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
31718- atomic_inc(&rdev->read_errors);
31719+ atomic_inc_unchecked(&rdev->read_errors);
31720 if (conf->mddev->degraded >= conf->max_degraded)
31721 printk_ratelimited(
31722 KERN_WARNING
31723@@ -1642,7 +1642,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
31724 (unsigned long long)(sh->sector
31725 + rdev->data_offset),
31726 bdn);
31727- else if (atomic_read(&rdev->read_errors)
31728+ else if (atomic_read_unchecked(&rdev->read_errors)
31729 > conf->max_nr_stripes)
31730 printk(KERN_WARNING
31731 "md/raid:%s: Too many read errors, failing device %s.\n",
31732diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
31733index ba9a643..e474ab5 100644
31734--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
31735+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
31736@@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
31737 .subvendor = _subvend, .subdevice = _subdev, \
31738 .driver_data = (unsigned long)&_driverdata }
31739
31740-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
31741+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
31742 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
31743 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
31744 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
31745diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
31746index a7d876f..8c21b61 100644
31747--- a/drivers/media/dvb/dvb-core/dvb_demux.h
31748+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
31749@@ -73,7 +73,7 @@ struct dvb_demux_feed {
31750 union {
31751 dmx_ts_cb ts;
31752 dmx_section_cb sec;
31753- } cb;
31754+ } __no_const cb;
31755
31756 struct dvb_demux *demux;
31757 void *priv;
31758diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
31759index f732877..d38c35a 100644
31760--- a/drivers/media/dvb/dvb-core/dvbdev.c
31761+++ b/drivers/media/dvb/dvb-core/dvbdev.c
31762@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
31763 const struct dvb_device *template, void *priv, int type)
31764 {
31765 struct dvb_device *dvbdev;
31766- struct file_operations *dvbdevfops;
31767+ file_operations_no_const *dvbdevfops;
31768 struct device *clsdev;
31769 int minor;
31770 int id;
31771diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
31772index 9f2a02c..5920f88 100644
31773--- a/drivers/media/dvb/dvb-usb/cxusb.c
31774+++ b/drivers/media/dvb/dvb-usb/cxusb.c
31775@@ -1069,7 +1069,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
31776 struct dib0700_adapter_state {
31777 int (*set_param_save) (struct dvb_frontend *,
31778 struct dvb_frontend_parameters *);
31779-};
31780+} __no_const;
31781
31782 static int dib7070_set_param_override(struct dvb_frontend *fe,
31783 struct dvb_frontend_parameters *fep)
31784diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
31785index f103ec1..5e8968b 100644
31786--- a/drivers/media/dvb/dvb-usb/dw2102.c
31787+++ b/drivers/media/dvb/dvb-usb/dw2102.c
31788@@ -95,7 +95,7 @@ struct su3000_state {
31789
31790 struct s6x0_state {
31791 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
31792-};
31793+} __no_const;
31794
31795 /* debug */
31796 static int dvb_usb_dw2102_debug;
31797diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
31798index 404f63a..4796533 100644
31799--- a/drivers/media/dvb/frontends/dib3000.h
31800+++ b/drivers/media/dvb/frontends/dib3000.h
31801@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
31802 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
31803 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
31804 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
31805-};
31806+} __no_const;
31807
31808 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
31809 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
31810diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
31811index 90bf573..e8463da 100644
31812--- a/drivers/media/dvb/frontends/ds3000.c
31813+++ b/drivers/media/dvb/frontends/ds3000.c
31814@@ -1210,7 +1210,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe,
31815
31816 for (i = 0; i < 30 ; i++) {
31817 ds3000_read_status(fe, &status);
31818- if (status && FE_HAS_LOCK)
31819+ if (status & FE_HAS_LOCK)
31820 break;
31821
31822 msleep(10);
31823diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
31824index 0564192..75b16f5 100644
31825--- a/drivers/media/dvb/ngene/ngene-cards.c
31826+++ b/drivers/media/dvb/ngene/ngene-cards.c
31827@@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
31828
31829 /****************************************************************************/
31830
31831-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
31832+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
31833 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
31834 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
31835 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
31836diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
31837index 16a089f..ab1667d 100644
31838--- a/drivers/media/radio/radio-cadet.c
31839+++ b/drivers/media/radio/radio-cadet.c
31840@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
31841 unsigned char readbuf[RDS_BUFFER];
31842 int i = 0;
31843
31844+ if (count > RDS_BUFFER)
31845+ return -EFAULT;
31846 mutex_lock(&dev->lock);
31847 if (dev->rdsstat == 0) {
31848 dev->rdsstat = 1;
31849diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
31850index 61287fc..8b08712 100644
31851--- a/drivers/media/rc/redrat3.c
31852+++ b/drivers/media/rc/redrat3.c
31853@@ -905,7 +905,7 @@ static int redrat3_set_tx_carrier(struct rc_dev *dev, u32 carrier)
31854 return carrier;
31855 }
31856
31857-static int redrat3_transmit_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
31858+static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf, u32 n)
31859 {
31860 struct redrat3_dev *rr3 = rcdev->priv;
31861 struct device *dev = rr3->dev;
31862diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
31863index 9cde353..8c6a1c3 100644
31864--- a/drivers/media/video/au0828/au0828.h
31865+++ b/drivers/media/video/au0828/au0828.h
31866@@ -191,7 +191,7 @@ struct au0828_dev {
31867
31868 /* I2C */
31869 struct i2c_adapter i2c_adap;
31870- struct i2c_algorithm i2c_algo;
31871+ i2c_algorithm_no_const i2c_algo;
31872 struct i2c_client i2c_client;
31873 u32 i2c_rc;
31874
31875diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
31876index 68d1240..46b32eb 100644
31877--- a/drivers/media/video/cx88/cx88-alsa.c
31878+++ b/drivers/media/video/cx88/cx88-alsa.c
31879@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
31880 * Only boards with eeprom and byte 1 at eeprom=1 have it
31881 */
31882
31883-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
31884+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
31885 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31886 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31887 {0, }
31888diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31889index 305e6aa..0143317 100644
31890--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31891+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31892@@ -196,7 +196,7 @@ struct pvr2_hdw {
31893
31894 /* I2C stuff */
31895 struct i2c_adapter i2c_adap;
31896- struct i2c_algorithm i2c_algo;
31897+ i2c_algorithm_no_const i2c_algo;
31898 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
31899 int i2c_cx25840_hack_state;
31900 int i2c_linked;
31901diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
31902index a0895bf..b7ebb1b 100644
31903--- a/drivers/media/video/timblogiw.c
31904+++ b/drivers/media/video/timblogiw.c
31905@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
31906
31907 /* Platform device functions */
31908
31909-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31910+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
31911 .vidioc_querycap = timblogiw_querycap,
31912 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
31913 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
31914@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31915 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
31916 };
31917
31918-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
31919+static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
31920 .owner = THIS_MODULE,
31921 .open = timblogiw_open,
31922 .release = timblogiw_close,
31923diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
31924index e9c6a60..daf6a33 100644
31925--- a/drivers/message/fusion/mptbase.c
31926+++ b/drivers/message/fusion/mptbase.c
31927@@ -6753,8 +6753,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
31928 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
31929 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
31930
31931+#ifdef CONFIG_GRKERNSEC_HIDESYM
31932+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
31933+#else
31934 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31935 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
31936+#endif
31937+
31938 /*
31939 * Rounding UP to nearest 4-kB boundary here...
31940 */
31941diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
31942index 9d95042..b808101 100644
31943--- a/drivers/message/fusion/mptsas.c
31944+++ b/drivers/message/fusion/mptsas.c
31945@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
31946 return 0;
31947 }
31948
31949+static inline void
31950+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31951+{
31952+ if (phy_info->port_details) {
31953+ phy_info->port_details->rphy = rphy;
31954+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31955+ ioc->name, rphy));
31956+ }
31957+
31958+ if (rphy) {
31959+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31960+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31961+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31962+ ioc->name, rphy, rphy->dev.release));
31963+ }
31964+}
31965+
31966 /* no mutex */
31967 static void
31968 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
31969@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
31970 return NULL;
31971 }
31972
31973-static inline void
31974-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31975-{
31976- if (phy_info->port_details) {
31977- phy_info->port_details->rphy = rphy;
31978- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31979- ioc->name, rphy));
31980- }
31981-
31982- if (rphy) {
31983- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31984- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31985- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31986- ioc->name, rphy, rphy->dev.release));
31987- }
31988-}
31989-
31990 static inline struct sas_port *
31991 mptsas_get_port(struct mptsas_phyinfo *phy_info)
31992 {
31993diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
31994index 0c3ced7..1fe34ec 100644
31995--- a/drivers/message/fusion/mptscsih.c
31996+++ b/drivers/message/fusion/mptscsih.c
31997@@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
31998
31999 h = shost_priv(SChost);
32000
32001- if (h) {
32002- if (h->info_kbuf == NULL)
32003- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32004- return h->info_kbuf;
32005- h->info_kbuf[0] = '\0';
32006+ if (!h)
32007+ return NULL;
32008
32009- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32010- h->info_kbuf[size-1] = '\0';
32011- }
32012+ if (h->info_kbuf == NULL)
32013+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32014+ return h->info_kbuf;
32015+ h->info_kbuf[0] = '\0';
32016+
32017+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32018+ h->info_kbuf[size-1] = '\0';
32019
32020 return h->info_kbuf;
32021 }
32022diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
32023index 07dbeaf..5533142 100644
32024--- a/drivers/message/i2o/i2o_proc.c
32025+++ b/drivers/message/i2o/i2o_proc.c
32026@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
32027 "Array Controller Device"
32028 };
32029
32030-static char *chtostr(u8 * chars, int n)
32031-{
32032- char tmp[256];
32033- tmp[0] = 0;
32034- return strncat(tmp, (char *)chars, n);
32035-}
32036-
32037 static int i2o_report_query_status(struct seq_file *seq, int block_status,
32038 char *group)
32039 {
32040@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
32041
32042 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
32043 seq_printf(seq, "%-#8x", ddm_table.module_id);
32044- seq_printf(seq, "%-29s",
32045- chtostr(ddm_table.module_name_version, 28));
32046+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
32047 seq_printf(seq, "%9d ", ddm_table.data_size);
32048 seq_printf(seq, "%8d", ddm_table.code_size);
32049
32050@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
32051
32052 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
32053 seq_printf(seq, "%-#8x", dst->module_id);
32054- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
32055- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
32056+ seq_printf(seq, "%-.28s", dst->module_name_version);
32057+ seq_printf(seq, "%-.8s", dst->date);
32058 seq_printf(seq, "%8d ", dst->module_size);
32059 seq_printf(seq, "%8d ", dst->mpb_size);
32060 seq_printf(seq, "0x%04x", dst->module_flags);
32061@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
32062 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
32063 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
32064 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
32065- seq_printf(seq, "Vendor info : %s\n",
32066- chtostr((u8 *) (work32 + 2), 16));
32067- seq_printf(seq, "Product info : %s\n",
32068- chtostr((u8 *) (work32 + 6), 16));
32069- seq_printf(seq, "Description : %s\n",
32070- chtostr((u8 *) (work32 + 10), 16));
32071- seq_printf(seq, "Product rev. : %s\n",
32072- chtostr((u8 *) (work32 + 14), 8));
32073+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
32074+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
32075+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
32076+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
32077
32078 seq_printf(seq, "Serial number : ");
32079 print_serial_number(seq, (u8 *) (work32 + 16),
32080@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
32081 }
32082
32083 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
32084- seq_printf(seq, "Module name : %s\n",
32085- chtostr(result.module_name, 24));
32086- seq_printf(seq, "Module revision : %s\n",
32087- chtostr(result.module_rev, 8));
32088+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
32089+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
32090
32091 seq_printf(seq, "Serial number : ");
32092 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
32093@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
32094 return 0;
32095 }
32096
32097- seq_printf(seq, "Device name : %s\n",
32098- chtostr(result.device_name, 64));
32099- seq_printf(seq, "Service name : %s\n",
32100- chtostr(result.service_name, 64));
32101- seq_printf(seq, "Physical name : %s\n",
32102- chtostr(result.physical_location, 64));
32103- seq_printf(seq, "Instance number : %s\n",
32104- chtostr(result.instance_number, 4));
32105+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
32106+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
32107+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
32108+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
32109
32110 return 0;
32111 }
32112diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
32113index a8c08f3..155fe3d 100644
32114--- a/drivers/message/i2o/iop.c
32115+++ b/drivers/message/i2o/iop.c
32116@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
32117
32118 spin_lock_irqsave(&c->context_list_lock, flags);
32119
32120- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
32121- atomic_inc(&c->context_list_counter);
32122+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
32123+ atomic_inc_unchecked(&c->context_list_counter);
32124
32125- entry->context = atomic_read(&c->context_list_counter);
32126+ entry->context = atomic_read_unchecked(&c->context_list_counter);
32127
32128 list_add(&entry->list, &c->context_list);
32129
32130@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
32131
32132 #if BITS_PER_LONG == 64
32133 spin_lock_init(&c->context_list_lock);
32134- atomic_set(&c->context_list_counter, 0);
32135+ atomic_set_unchecked(&c->context_list_counter, 0);
32136 INIT_LIST_HEAD(&c->context_list);
32137 #endif
32138
32139diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
32140index 7ce65f4..e66e9bc 100644
32141--- a/drivers/mfd/abx500-core.c
32142+++ b/drivers/mfd/abx500-core.c
32143@@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
32144
32145 struct abx500_device_entry {
32146 struct list_head list;
32147- struct abx500_ops ops;
32148+ abx500_ops_no_const ops;
32149 struct device *dev;
32150 };
32151
32152diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
32153index 5c2a06a..8fa077c 100644
32154--- a/drivers/mfd/janz-cmodio.c
32155+++ b/drivers/mfd/janz-cmodio.c
32156@@ -13,6 +13,7 @@
32157
32158 #include <linux/kernel.h>
32159 #include <linux/module.h>
32160+#include <linux/slab.h>
32161 #include <linux/init.h>
32162 #include <linux/pci.h>
32163 #include <linux/interrupt.h>
32164diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
32165index 29d12a7..f900ba4 100644
32166--- a/drivers/misc/lis3lv02d/lis3lv02d.c
32167+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
32168@@ -464,7 +464,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
32169 * the lid is closed. This leads to interrupts as soon as a little move
32170 * is done.
32171 */
32172- atomic_inc(&lis3->count);
32173+ atomic_inc_unchecked(&lis3->count);
32174
32175 wake_up_interruptible(&lis3->misc_wait);
32176 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
32177@@ -550,7 +550,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
32178 if (lis3->pm_dev)
32179 pm_runtime_get_sync(lis3->pm_dev);
32180
32181- atomic_set(&lis3->count, 0);
32182+ atomic_set_unchecked(&lis3->count, 0);
32183 return 0;
32184 }
32185
32186@@ -583,7 +583,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
32187 add_wait_queue(&lis3->misc_wait, &wait);
32188 while (true) {
32189 set_current_state(TASK_INTERRUPTIBLE);
32190- data = atomic_xchg(&lis3->count, 0);
32191+ data = atomic_xchg_unchecked(&lis3->count, 0);
32192 if (data)
32193 break;
32194
32195@@ -624,7 +624,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
32196 struct lis3lv02d, miscdev);
32197
32198 poll_wait(file, &lis3->misc_wait, wait);
32199- if (atomic_read(&lis3->count))
32200+ if (atomic_read_unchecked(&lis3->count))
32201 return POLLIN | POLLRDNORM;
32202 return 0;
32203 }
32204diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
32205index 2b1482a..5d33616 100644
32206--- a/drivers/misc/lis3lv02d/lis3lv02d.h
32207+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
32208@@ -266,7 +266,7 @@ struct lis3lv02d {
32209 struct input_polled_dev *idev; /* input device */
32210 struct platform_device *pdev; /* platform device */
32211 struct regulator_bulk_data regulators[2];
32212- atomic_t count; /* interrupt count after last read */
32213+ atomic_unchecked_t count; /* interrupt count after last read */
32214 union axis_conversion ac; /* hw -> logical axis */
32215 int mapped_btns[3];
32216
32217diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
32218index 2f30bad..c4c13d0 100644
32219--- a/drivers/misc/sgi-gru/gruhandles.c
32220+++ b/drivers/misc/sgi-gru/gruhandles.c
32221@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
32222 unsigned long nsec;
32223
32224 nsec = CLKS2NSEC(clks);
32225- atomic_long_inc(&mcs_op_statistics[op].count);
32226- atomic_long_add(nsec, &mcs_op_statistics[op].total);
32227+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32228+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
32229 if (mcs_op_statistics[op].max < nsec)
32230 mcs_op_statistics[op].max = nsec;
32231 }
32232diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
32233index 7768b87..f8aac38 100644
32234--- a/drivers/misc/sgi-gru/gruprocfs.c
32235+++ b/drivers/misc/sgi-gru/gruprocfs.c
32236@@ -32,9 +32,9 @@
32237
32238 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32239
32240-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32241+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32242 {
32243- unsigned long val = atomic_long_read(v);
32244+ unsigned long val = atomic_long_read_unchecked(v);
32245
32246 seq_printf(s, "%16lu %s\n", val, id);
32247 }
32248@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
32249
32250 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
32251 for (op = 0; op < mcsop_last; op++) {
32252- count = atomic_long_read(&mcs_op_statistics[op].count);
32253- total = atomic_long_read(&mcs_op_statistics[op].total);
32254+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32255+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32256 max = mcs_op_statistics[op].max;
32257 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32258 count ? total / count : 0, max);
32259diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
32260index 5c3ce24..4915ccb 100644
32261--- a/drivers/misc/sgi-gru/grutables.h
32262+++ b/drivers/misc/sgi-gru/grutables.h
32263@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
32264 * GRU statistics.
32265 */
32266 struct gru_stats_s {
32267- atomic_long_t vdata_alloc;
32268- atomic_long_t vdata_free;
32269- atomic_long_t gts_alloc;
32270- atomic_long_t gts_free;
32271- atomic_long_t gms_alloc;
32272- atomic_long_t gms_free;
32273- atomic_long_t gts_double_allocate;
32274- atomic_long_t assign_context;
32275- atomic_long_t assign_context_failed;
32276- atomic_long_t free_context;
32277- atomic_long_t load_user_context;
32278- atomic_long_t load_kernel_context;
32279- atomic_long_t lock_kernel_context;
32280- atomic_long_t unlock_kernel_context;
32281- atomic_long_t steal_user_context;
32282- atomic_long_t steal_kernel_context;
32283- atomic_long_t steal_context_failed;
32284- atomic_long_t nopfn;
32285- atomic_long_t asid_new;
32286- atomic_long_t asid_next;
32287- atomic_long_t asid_wrap;
32288- atomic_long_t asid_reuse;
32289- atomic_long_t intr;
32290- atomic_long_t intr_cbr;
32291- atomic_long_t intr_tfh;
32292- atomic_long_t intr_spurious;
32293- atomic_long_t intr_mm_lock_failed;
32294- atomic_long_t call_os;
32295- atomic_long_t call_os_wait_queue;
32296- atomic_long_t user_flush_tlb;
32297- atomic_long_t user_unload_context;
32298- atomic_long_t user_exception;
32299- atomic_long_t set_context_option;
32300- atomic_long_t check_context_retarget_intr;
32301- atomic_long_t check_context_unload;
32302- atomic_long_t tlb_dropin;
32303- atomic_long_t tlb_preload_page;
32304- atomic_long_t tlb_dropin_fail_no_asid;
32305- atomic_long_t tlb_dropin_fail_upm;
32306- atomic_long_t tlb_dropin_fail_invalid;
32307- atomic_long_t tlb_dropin_fail_range_active;
32308- atomic_long_t tlb_dropin_fail_idle;
32309- atomic_long_t tlb_dropin_fail_fmm;
32310- atomic_long_t tlb_dropin_fail_no_exception;
32311- atomic_long_t tfh_stale_on_fault;
32312- atomic_long_t mmu_invalidate_range;
32313- atomic_long_t mmu_invalidate_page;
32314- atomic_long_t flush_tlb;
32315- atomic_long_t flush_tlb_gru;
32316- atomic_long_t flush_tlb_gru_tgh;
32317- atomic_long_t flush_tlb_gru_zero_asid;
32318+ atomic_long_unchecked_t vdata_alloc;
32319+ atomic_long_unchecked_t vdata_free;
32320+ atomic_long_unchecked_t gts_alloc;
32321+ atomic_long_unchecked_t gts_free;
32322+ atomic_long_unchecked_t gms_alloc;
32323+ atomic_long_unchecked_t gms_free;
32324+ atomic_long_unchecked_t gts_double_allocate;
32325+ atomic_long_unchecked_t assign_context;
32326+ atomic_long_unchecked_t assign_context_failed;
32327+ atomic_long_unchecked_t free_context;
32328+ atomic_long_unchecked_t load_user_context;
32329+ atomic_long_unchecked_t load_kernel_context;
32330+ atomic_long_unchecked_t lock_kernel_context;
32331+ atomic_long_unchecked_t unlock_kernel_context;
32332+ atomic_long_unchecked_t steal_user_context;
32333+ atomic_long_unchecked_t steal_kernel_context;
32334+ atomic_long_unchecked_t steal_context_failed;
32335+ atomic_long_unchecked_t nopfn;
32336+ atomic_long_unchecked_t asid_new;
32337+ atomic_long_unchecked_t asid_next;
32338+ atomic_long_unchecked_t asid_wrap;
32339+ atomic_long_unchecked_t asid_reuse;
32340+ atomic_long_unchecked_t intr;
32341+ atomic_long_unchecked_t intr_cbr;
32342+ atomic_long_unchecked_t intr_tfh;
32343+ atomic_long_unchecked_t intr_spurious;
32344+ atomic_long_unchecked_t intr_mm_lock_failed;
32345+ atomic_long_unchecked_t call_os;
32346+ atomic_long_unchecked_t call_os_wait_queue;
32347+ atomic_long_unchecked_t user_flush_tlb;
32348+ atomic_long_unchecked_t user_unload_context;
32349+ atomic_long_unchecked_t user_exception;
32350+ atomic_long_unchecked_t set_context_option;
32351+ atomic_long_unchecked_t check_context_retarget_intr;
32352+ atomic_long_unchecked_t check_context_unload;
32353+ atomic_long_unchecked_t tlb_dropin;
32354+ atomic_long_unchecked_t tlb_preload_page;
32355+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
32356+ atomic_long_unchecked_t tlb_dropin_fail_upm;
32357+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
32358+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
32359+ atomic_long_unchecked_t tlb_dropin_fail_idle;
32360+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
32361+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
32362+ atomic_long_unchecked_t tfh_stale_on_fault;
32363+ atomic_long_unchecked_t mmu_invalidate_range;
32364+ atomic_long_unchecked_t mmu_invalidate_page;
32365+ atomic_long_unchecked_t flush_tlb;
32366+ atomic_long_unchecked_t flush_tlb_gru;
32367+ atomic_long_unchecked_t flush_tlb_gru_tgh;
32368+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
32369
32370- atomic_long_t copy_gpa;
32371- atomic_long_t read_gpa;
32372+ atomic_long_unchecked_t copy_gpa;
32373+ atomic_long_unchecked_t read_gpa;
32374
32375- atomic_long_t mesq_receive;
32376- atomic_long_t mesq_receive_none;
32377- atomic_long_t mesq_send;
32378- atomic_long_t mesq_send_failed;
32379- atomic_long_t mesq_noop;
32380- atomic_long_t mesq_send_unexpected_error;
32381- atomic_long_t mesq_send_lb_overflow;
32382- atomic_long_t mesq_send_qlimit_reached;
32383- atomic_long_t mesq_send_amo_nacked;
32384- atomic_long_t mesq_send_put_nacked;
32385- atomic_long_t mesq_page_overflow;
32386- atomic_long_t mesq_qf_locked;
32387- atomic_long_t mesq_qf_noop_not_full;
32388- atomic_long_t mesq_qf_switch_head_failed;
32389- atomic_long_t mesq_qf_unexpected_error;
32390- atomic_long_t mesq_noop_unexpected_error;
32391- atomic_long_t mesq_noop_lb_overflow;
32392- atomic_long_t mesq_noop_qlimit_reached;
32393- atomic_long_t mesq_noop_amo_nacked;
32394- atomic_long_t mesq_noop_put_nacked;
32395- atomic_long_t mesq_noop_page_overflow;
32396+ atomic_long_unchecked_t mesq_receive;
32397+ atomic_long_unchecked_t mesq_receive_none;
32398+ atomic_long_unchecked_t mesq_send;
32399+ atomic_long_unchecked_t mesq_send_failed;
32400+ atomic_long_unchecked_t mesq_noop;
32401+ atomic_long_unchecked_t mesq_send_unexpected_error;
32402+ atomic_long_unchecked_t mesq_send_lb_overflow;
32403+ atomic_long_unchecked_t mesq_send_qlimit_reached;
32404+ atomic_long_unchecked_t mesq_send_amo_nacked;
32405+ atomic_long_unchecked_t mesq_send_put_nacked;
32406+ atomic_long_unchecked_t mesq_page_overflow;
32407+ atomic_long_unchecked_t mesq_qf_locked;
32408+ atomic_long_unchecked_t mesq_qf_noop_not_full;
32409+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
32410+ atomic_long_unchecked_t mesq_qf_unexpected_error;
32411+ atomic_long_unchecked_t mesq_noop_unexpected_error;
32412+ atomic_long_unchecked_t mesq_noop_lb_overflow;
32413+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
32414+ atomic_long_unchecked_t mesq_noop_amo_nacked;
32415+ atomic_long_unchecked_t mesq_noop_put_nacked;
32416+ atomic_long_unchecked_t mesq_noop_page_overflow;
32417
32418 };
32419
32420@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
32421 tghop_invalidate, mcsop_last};
32422
32423 struct mcs_op_statistic {
32424- atomic_long_t count;
32425- atomic_long_t total;
32426+ atomic_long_unchecked_t count;
32427+ atomic_long_unchecked_t total;
32428 unsigned long max;
32429 };
32430
32431@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
32432
32433 #define STAT(id) do { \
32434 if (gru_options & OPT_STATS) \
32435- atomic_long_inc(&gru_stats.id); \
32436+ atomic_long_inc_unchecked(&gru_stats.id); \
32437 } while (0)
32438
32439 #ifdef CONFIG_SGI_GRU_DEBUG
32440diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
32441index 851b2f2..a4ec097 100644
32442--- a/drivers/misc/sgi-xp/xp.h
32443+++ b/drivers/misc/sgi-xp/xp.h
32444@@ -289,7 +289,7 @@ struct xpc_interface {
32445 xpc_notify_func, void *);
32446 void (*received) (short, int, void *);
32447 enum xp_retval (*partid_to_nasids) (short, void *);
32448-};
32449+} __no_const;
32450
32451 extern struct xpc_interface xpc_interface;
32452
32453diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
32454index b94d5f7..7f494c5 100644
32455--- a/drivers/misc/sgi-xp/xpc.h
32456+++ b/drivers/misc/sgi-xp/xpc.h
32457@@ -835,6 +835,7 @@ struct xpc_arch_operations {
32458 void (*received_payload) (struct xpc_channel *, void *);
32459 void (*notify_senders_of_disconnect) (struct xpc_channel *);
32460 };
32461+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
32462
32463 /* struct xpc_partition act_state values (for XPC HB) */
32464
32465@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
32466 /* found in xpc_main.c */
32467 extern struct device *xpc_part;
32468 extern struct device *xpc_chan;
32469-extern struct xpc_arch_operations xpc_arch_ops;
32470+extern xpc_arch_operations_no_const xpc_arch_ops;
32471 extern int xpc_disengage_timelimit;
32472 extern int xpc_disengage_timedout;
32473 extern int xpc_activate_IRQ_rcvd;
32474diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
32475index 8d082b4..aa749ae 100644
32476--- a/drivers/misc/sgi-xp/xpc_main.c
32477+++ b/drivers/misc/sgi-xp/xpc_main.c
32478@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
32479 .notifier_call = xpc_system_die,
32480 };
32481
32482-struct xpc_arch_operations xpc_arch_ops;
32483+xpc_arch_operations_no_const xpc_arch_ops;
32484
32485 /*
32486 * Timer function to enforce the timelimit on the partition disengage.
32487diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
32488index 6878a94..fe5c5f1 100644
32489--- a/drivers/mmc/host/sdhci-pci.c
32490+++ b/drivers/mmc/host/sdhci-pci.c
32491@@ -673,7 +673,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
32492 .probe = via_probe,
32493 };
32494
32495-static const struct pci_device_id pci_ids[] __devinitdata = {
32496+static const struct pci_device_id pci_ids[] __devinitconst = {
32497 {
32498 .vendor = PCI_VENDOR_ID_RICOH,
32499 .device = PCI_DEVICE_ID_RICOH_R5C822,
32500diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
32501index e9fad91..0a7a16a 100644
32502--- a/drivers/mtd/devices/doc2000.c
32503+++ b/drivers/mtd/devices/doc2000.c
32504@@ -773,7 +773,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
32505
32506 /* The ECC will not be calculated correctly if less than 512 is written */
32507 /* DBB-
32508- if (len != 0x200 && eccbuf)
32509+ if (len != 0x200)
32510 printk(KERN_WARNING
32511 "ECC needs a full sector write (adr: %lx size %lx)\n",
32512 (long) to, (long) len);
32513diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
32514index a3f7a27..234016e 100644
32515--- a/drivers/mtd/devices/doc2001.c
32516+++ b/drivers/mtd/devices/doc2001.c
32517@@ -392,7 +392,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
32518 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
32519
32520 /* Don't allow read past end of device */
32521- if (from >= this->totlen)
32522+ if (from >= this->totlen || !len)
32523 return -EINVAL;
32524
32525 /* Don't allow a single read to cross a 512-byte block boundary */
32526diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
32527index 3984d48..28aa897 100644
32528--- a/drivers/mtd/nand/denali.c
32529+++ b/drivers/mtd/nand/denali.c
32530@@ -26,6 +26,7 @@
32531 #include <linux/pci.h>
32532 #include <linux/mtd/mtd.h>
32533 #include <linux/module.h>
32534+#include <linux/slab.h>
32535
32536 #include "denali.h"
32537
32538diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
32539index ac40925..483b753 100644
32540--- a/drivers/mtd/nftlmount.c
32541+++ b/drivers/mtd/nftlmount.c
32542@@ -24,6 +24,7 @@
32543 #include <asm/errno.h>
32544 #include <linux/delay.h>
32545 #include <linux/slab.h>
32546+#include <linux/sched.h>
32547 #include <linux/mtd/mtd.h>
32548 #include <linux/mtd/nand.h>
32549 #include <linux/mtd/nftl.h>
32550diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
32551index 6c3fb5a..c542a81 100644
32552--- a/drivers/mtd/ubi/build.c
32553+++ b/drivers/mtd/ubi/build.c
32554@@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
32555 static int __init bytes_str_to_int(const char *str)
32556 {
32557 char *endp;
32558- unsigned long result;
32559+ unsigned long result, scale = 1;
32560
32561 result = simple_strtoul(str, &endp, 0);
32562 if (str == endp || result >= INT_MAX) {
32563@@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const char *str)
32564
32565 switch (*endp) {
32566 case 'G':
32567- result *= 1024;
32568+ scale *= 1024;
32569 case 'M':
32570- result *= 1024;
32571+ scale *= 1024;
32572 case 'K':
32573- result *= 1024;
32574+ scale *= 1024;
32575 if (endp[1] == 'i' && endp[2] == 'B')
32576 endp += 2;
32577 case '\0':
32578@@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const char *str)
32579 return -EINVAL;
32580 }
32581
32582- return result;
32583+ if ((intoverflow_t)result*scale >= INT_MAX) {
32584+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
32585+ str);
32586+ return -EINVAL;
32587+ }
32588+
32589+ return result*scale;
32590 }
32591
32592 /**
32593diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
32594index 1feae59..c2a61d2 100644
32595--- a/drivers/net/ethernet/atheros/atlx/atl2.c
32596+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
32597@@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
32598 */
32599
32600 #define ATL2_PARAM(X, desc) \
32601- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32602+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32603 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
32604 MODULE_PARM_DESC(X, desc);
32605 #else
32606diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32607index 9a517c2..a50cfcb 100644
32608--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32609+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32610@@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj {
32611
32612 int (*wait_comp)(struct bnx2x *bp,
32613 struct bnx2x_rx_mode_ramrod_params *p);
32614-};
32615+} __no_const;
32616
32617 /********************** Set multicast group ***********************************/
32618
32619diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
32620index 94b4bd0..73c02de 100644
32621--- a/drivers/net/ethernet/broadcom/tg3.h
32622+++ b/drivers/net/ethernet/broadcom/tg3.h
32623@@ -134,6 +134,7 @@
32624 #define CHIPREV_ID_5750_A0 0x4000
32625 #define CHIPREV_ID_5750_A1 0x4001
32626 #define CHIPREV_ID_5750_A3 0x4003
32627+#define CHIPREV_ID_5750_C1 0x4201
32628 #define CHIPREV_ID_5750_C2 0x4202
32629 #define CHIPREV_ID_5752_A0_HW 0x5000
32630 #define CHIPREV_ID_5752_A0 0x6000
32631diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32632index c5f5479..2e8c260 100644
32633--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32634+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32635@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
32636 */
32637 struct l2t_skb_cb {
32638 arp_failure_handler_func arp_failure_handler;
32639-};
32640+} __no_const;
32641
32642 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
32643
32644diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
32645index 871bcaa..4043505 100644
32646--- a/drivers/net/ethernet/dec/tulip/de4x5.c
32647+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
32648@@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32649 for (i=0; i<ETH_ALEN; i++) {
32650 tmp.addr[i] = dev->dev_addr[i];
32651 }
32652- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32653+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32654 break;
32655
32656 case DE4X5_SET_HWADDR: /* Set the hardware address */
32657@@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32658 spin_lock_irqsave(&lp->lock, flags);
32659 memcpy(&statbuf, &lp->pktStats, ioc->len);
32660 spin_unlock_irqrestore(&lp->lock, flags);
32661- if (copy_to_user(ioc->data, &statbuf, ioc->len))
32662+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32663 return -EFAULT;
32664 break;
32665 }
32666diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
32667index 14d5b61..1398636 100644
32668--- a/drivers/net/ethernet/dec/tulip/eeprom.c
32669+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
32670@@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
32671 {NULL}};
32672
32673
32674-static const char *block_name[] __devinitdata = {
32675+static const char *block_name[] __devinitconst = {
32676 "21140 non-MII",
32677 "21140 MII PHY",
32678 "21142 Serial PHY",
32679diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
32680index 4d01219..b58d26d 100644
32681--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
32682+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
32683@@ -236,7 +236,7 @@ struct pci_id_info {
32684 int drv_flags; /* Driver use, intended as capability flags. */
32685 };
32686
32687-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32688+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32689 { /* Sometime a Level-One switch card. */
32690 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
32691 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
32692diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
32693index dcd7f7a..ecb7fb3 100644
32694--- a/drivers/net/ethernet/dlink/sundance.c
32695+++ b/drivers/net/ethernet/dlink/sundance.c
32696@@ -218,7 +218,7 @@ enum {
32697 struct pci_id_info {
32698 const char *name;
32699 };
32700-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32701+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32702 {"D-Link DFE-550TX FAST Ethernet Adapter"},
32703 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
32704 {"D-Link DFE-580TX 4 port Server Adapter"},
32705diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
32706index bf266a0..e024af7 100644
32707--- a/drivers/net/ethernet/emulex/benet/be_main.c
32708+++ b/drivers/net/ethernet/emulex/benet/be_main.c
32709@@ -397,7 +397,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
32710
32711 if (wrapped)
32712 newacc += 65536;
32713- ACCESS_ONCE(*acc) = newacc;
32714+ ACCESS_ONCE_RW(*acc) = newacc;
32715 }
32716
32717 void be_parse_stats(struct be_adapter *adapter)
32718diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
32719index 61d2bdd..7f1154a 100644
32720--- a/drivers/net/ethernet/fealnx.c
32721+++ b/drivers/net/ethernet/fealnx.c
32722@@ -150,7 +150,7 @@ struct chip_info {
32723 int flags;
32724 };
32725
32726-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
32727+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
32728 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32729 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
32730 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32731diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32732index e1159e5..e18684d 100644
32733--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32734+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32735@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
32736 {
32737 struct e1000_hw *hw = &adapter->hw;
32738 struct e1000_mac_info *mac = &hw->mac;
32739- struct e1000_mac_operations *func = &mac->ops;
32740+ e1000_mac_operations_no_const *func = &mac->ops;
32741
32742 /* Set media type */
32743 switch (adapter->pdev->device) {
32744diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
32745index a3e65fd..f451444 100644
32746--- a/drivers/net/ethernet/intel/e1000e/82571.c
32747+++ b/drivers/net/ethernet/intel/e1000e/82571.c
32748@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
32749 {
32750 struct e1000_hw *hw = &adapter->hw;
32751 struct e1000_mac_info *mac = &hw->mac;
32752- struct e1000_mac_operations *func = &mac->ops;
32753+ e1000_mac_operations_no_const *func = &mac->ops;
32754 u32 swsm = 0;
32755 u32 swsm2 = 0;
32756 bool force_clear_smbi = false;
32757diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
32758index 2967039..ca8c40c 100644
32759--- a/drivers/net/ethernet/intel/e1000e/hw.h
32760+++ b/drivers/net/ethernet/intel/e1000e/hw.h
32761@@ -778,6 +778,7 @@ struct e1000_mac_operations {
32762 void (*write_vfta)(struct e1000_hw *, u32, u32);
32763 s32 (*read_mac_addr)(struct e1000_hw *);
32764 };
32765+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32766
32767 /*
32768 * When to use various PHY register access functions:
32769@@ -818,6 +819,7 @@ struct e1000_phy_operations {
32770 void (*power_up)(struct e1000_hw *);
32771 void (*power_down)(struct e1000_hw *);
32772 };
32773+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32774
32775 /* Function pointers for the NVM. */
32776 struct e1000_nvm_operations {
32777@@ -829,9 +831,10 @@ struct e1000_nvm_operations {
32778 s32 (*validate)(struct e1000_hw *);
32779 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
32780 };
32781+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32782
32783 struct e1000_mac_info {
32784- struct e1000_mac_operations ops;
32785+ e1000_mac_operations_no_const ops;
32786 u8 addr[ETH_ALEN];
32787 u8 perm_addr[ETH_ALEN];
32788
32789@@ -872,7 +875,7 @@ struct e1000_mac_info {
32790 };
32791
32792 struct e1000_phy_info {
32793- struct e1000_phy_operations ops;
32794+ e1000_phy_operations_no_const ops;
32795
32796 enum e1000_phy_type type;
32797
32798@@ -906,7 +909,7 @@ struct e1000_phy_info {
32799 };
32800
32801 struct e1000_nvm_info {
32802- struct e1000_nvm_operations ops;
32803+ e1000_nvm_operations_no_const ops;
32804
32805 enum e1000_nvm_type type;
32806 enum e1000_nvm_override override;
32807diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
32808index 4519a13..f97fcd0 100644
32809--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
32810+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
32811@@ -314,6 +314,7 @@ struct e1000_mac_operations {
32812 s32 (*read_mac_addr)(struct e1000_hw *);
32813 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
32814 };
32815+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32816
32817 struct e1000_phy_operations {
32818 s32 (*acquire)(struct e1000_hw *);
32819@@ -330,6 +331,7 @@ struct e1000_phy_operations {
32820 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
32821 s32 (*write_reg)(struct e1000_hw *, u32, u16);
32822 };
32823+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32824
32825 struct e1000_nvm_operations {
32826 s32 (*acquire)(struct e1000_hw *);
32827@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
32828 s32 (*update)(struct e1000_hw *);
32829 s32 (*validate)(struct e1000_hw *);
32830 };
32831+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32832
32833 struct e1000_info {
32834 s32 (*get_invariants)(struct e1000_hw *);
32835@@ -350,7 +353,7 @@ struct e1000_info {
32836 extern const struct e1000_info e1000_82575_info;
32837
32838 struct e1000_mac_info {
32839- struct e1000_mac_operations ops;
32840+ e1000_mac_operations_no_const ops;
32841
32842 u8 addr[6];
32843 u8 perm_addr[6];
32844@@ -388,7 +391,7 @@ struct e1000_mac_info {
32845 };
32846
32847 struct e1000_phy_info {
32848- struct e1000_phy_operations ops;
32849+ e1000_phy_operations_no_const ops;
32850
32851 enum e1000_phy_type type;
32852
32853@@ -423,7 +426,7 @@ struct e1000_phy_info {
32854 };
32855
32856 struct e1000_nvm_info {
32857- struct e1000_nvm_operations ops;
32858+ e1000_nvm_operations_no_const ops;
32859 enum e1000_nvm_type type;
32860 enum e1000_nvm_override override;
32861
32862@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
32863 s32 (*check_for_ack)(struct e1000_hw *, u16);
32864 s32 (*check_for_rst)(struct e1000_hw *, u16);
32865 };
32866+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32867
32868 struct e1000_mbx_stats {
32869 u32 msgs_tx;
32870@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
32871 };
32872
32873 struct e1000_mbx_info {
32874- struct e1000_mbx_operations ops;
32875+ e1000_mbx_operations_no_const ops;
32876 struct e1000_mbx_stats stats;
32877 u32 timeout;
32878 u32 usec_delay;
32879diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
32880index d7ed58f..64cde36 100644
32881--- a/drivers/net/ethernet/intel/igbvf/vf.h
32882+++ b/drivers/net/ethernet/intel/igbvf/vf.h
32883@@ -189,9 +189,10 @@ struct e1000_mac_operations {
32884 s32 (*read_mac_addr)(struct e1000_hw *);
32885 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
32886 };
32887+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32888
32889 struct e1000_mac_info {
32890- struct e1000_mac_operations ops;
32891+ e1000_mac_operations_no_const ops;
32892 u8 addr[6];
32893 u8 perm_addr[6];
32894
32895@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
32896 s32 (*check_for_ack)(struct e1000_hw *);
32897 s32 (*check_for_rst)(struct e1000_hw *);
32898 };
32899+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32900
32901 struct e1000_mbx_stats {
32902 u32 msgs_tx;
32903@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
32904 };
32905
32906 struct e1000_mbx_info {
32907- struct e1000_mbx_operations ops;
32908+ e1000_mbx_operations_no_const ops;
32909 struct e1000_mbx_stats stats;
32910 u32 timeout;
32911 u32 usec_delay;
32912diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32913index 6c5cca8..de8ef63 100644
32914--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32915+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32916@@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations {
32917 s32 (*update_checksum)(struct ixgbe_hw *);
32918 u16 (*calc_checksum)(struct ixgbe_hw *);
32919 };
32920+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
32921
32922 struct ixgbe_mac_operations {
32923 s32 (*init_hw)(struct ixgbe_hw *);
32924@@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations {
32925 /* Manageability interface */
32926 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
32927 };
32928+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
32929
32930 struct ixgbe_phy_operations {
32931 s32 (*identify)(struct ixgbe_hw *);
32932@@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations {
32933 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
32934 s32 (*check_overtemp)(struct ixgbe_hw *);
32935 };
32936+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
32937
32938 struct ixgbe_eeprom_info {
32939- struct ixgbe_eeprom_operations ops;
32940+ ixgbe_eeprom_operations_no_const ops;
32941 enum ixgbe_eeprom_type type;
32942 u32 semaphore_delay;
32943 u16 word_size;
32944@@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info {
32945
32946 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
32947 struct ixgbe_mac_info {
32948- struct ixgbe_mac_operations ops;
32949+ ixgbe_mac_operations_no_const ops;
32950 enum ixgbe_mac_type type;
32951 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32952 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32953@@ -2828,7 +2831,7 @@ struct ixgbe_mac_info {
32954 };
32955
32956 struct ixgbe_phy_info {
32957- struct ixgbe_phy_operations ops;
32958+ ixgbe_phy_operations_no_const ops;
32959 struct mdio_if_info mdio;
32960 enum ixgbe_phy_type type;
32961 u32 id;
32962@@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations {
32963 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
32964 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
32965 };
32966+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
32967
32968 struct ixgbe_mbx_stats {
32969 u32 msgs_tx;
32970@@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats {
32971 };
32972
32973 struct ixgbe_mbx_info {
32974- struct ixgbe_mbx_operations ops;
32975+ ixgbe_mbx_operations_no_const ops;
32976 struct ixgbe_mbx_stats stats;
32977 u32 timeout;
32978 u32 usec_delay;
32979diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
32980index 10306b4..28df758 100644
32981--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
32982+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
32983@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
32984 s32 (*clear_vfta)(struct ixgbe_hw *);
32985 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
32986 };
32987+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
32988
32989 enum ixgbe_mac_type {
32990 ixgbe_mac_unknown = 0,
32991@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
32992 };
32993
32994 struct ixgbe_mac_info {
32995- struct ixgbe_mac_operations ops;
32996+ ixgbe_mac_operations_no_const ops;
32997 u8 addr[6];
32998 u8 perm_addr[6];
32999
33000@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
33001 s32 (*check_for_ack)(struct ixgbe_hw *);
33002 s32 (*check_for_rst)(struct ixgbe_hw *);
33003 };
33004+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
33005
33006 struct ixgbe_mbx_stats {
33007 u32 msgs_tx;
33008@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
33009 };
33010
33011 struct ixgbe_mbx_info {
33012- struct ixgbe_mbx_operations ops;
33013+ ixgbe_mbx_operations_no_const ops;
33014 struct ixgbe_mbx_stats stats;
33015 u32 timeout;
33016 u32 udelay;
33017diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
33018index 94bbc85..78c12e6 100644
33019--- a/drivers/net/ethernet/mellanox/mlx4/main.c
33020+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
33021@@ -40,6 +40,7 @@
33022 #include <linux/dma-mapping.h>
33023 #include <linux/slab.h>
33024 #include <linux/io-mapping.h>
33025+#include <linux/sched.h>
33026
33027 #include <linux/mlx4/device.h>
33028 #include <linux/mlx4/doorbell.h>
33029diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
33030index 5046a64..71ca936 100644
33031--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
33032+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
33033@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
33034 void (*link_down)(struct __vxge_hw_device *devh);
33035 void (*crit_err)(struct __vxge_hw_device *devh,
33036 enum vxge_hw_event type, u64 ext_data);
33037-};
33038+} __no_const;
33039
33040 /*
33041 * struct __vxge_hw_blockpool_entry - Block private data structure
33042diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
33043index 4a518a3..936b334 100644
33044--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
33045+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
33046@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
33047 struct vxge_hw_mempool_dma *dma_object,
33048 u32 index,
33049 u32 is_last);
33050-};
33051+} __no_const;
33052
33053 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
33054 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
33055diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
33056index c8f47f1..5da9840 100644
33057--- a/drivers/net/ethernet/realtek/r8169.c
33058+++ b/drivers/net/ethernet/realtek/r8169.c
33059@@ -698,17 +698,17 @@ struct rtl8169_private {
33060 struct mdio_ops {
33061 void (*write)(void __iomem *, int, int);
33062 int (*read)(void __iomem *, int);
33063- } mdio_ops;
33064+ } __no_const mdio_ops;
33065
33066 struct pll_power_ops {
33067 void (*down)(struct rtl8169_private *);
33068 void (*up)(struct rtl8169_private *);
33069- } pll_power_ops;
33070+ } __no_const pll_power_ops;
33071
33072 struct jumbo_ops {
33073 void (*enable)(struct rtl8169_private *);
33074 void (*disable)(struct rtl8169_private *);
33075- } jumbo_ops;
33076+ } __no_const jumbo_ops;
33077
33078 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
33079 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
33080diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
33081index 1b4658c..a30dabb 100644
33082--- a/drivers/net/ethernet/sis/sis190.c
33083+++ b/drivers/net/ethernet/sis/sis190.c
33084@@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
33085 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
33086 struct net_device *dev)
33087 {
33088- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
33089+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
33090 struct sis190_private *tp = netdev_priv(dev);
33091 struct pci_dev *isa_bridge;
33092 u8 reg, tmp8;
33093diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
33094index edfa15d..002bfa9 100644
33095--- a/drivers/net/ppp/ppp_generic.c
33096+++ b/drivers/net/ppp/ppp_generic.c
33097@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
33098 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
33099 struct ppp_stats stats;
33100 struct ppp_comp_stats cstats;
33101- char *vers;
33102
33103 switch (cmd) {
33104 case SIOCGPPPSTATS:
33105@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
33106 break;
33107
33108 case SIOCGPPPVER:
33109- vers = PPP_VERSION;
33110- if (copy_to_user(addr, vers, strlen(vers) + 1))
33111+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
33112 break;
33113 err = 0;
33114 break;
33115diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
33116index 515f122..41dd273 100644
33117--- a/drivers/net/tokenring/abyss.c
33118+++ b/drivers/net/tokenring/abyss.c
33119@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
33120
33121 static int __init abyss_init (void)
33122 {
33123- abyss_netdev_ops = tms380tr_netdev_ops;
33124+ pax_open_kernel();
33125+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33126
33127- abyss_netdev_ops.ndo_open = abyss_open;
33128- abyss_netdev_ops.ndo_stop = abyss_close;
33129+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
33130+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
33131+ pax_close_kernel();
33132
33133 return pci_register_driver(&abyss_driver);
33134 }
33135diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
33136index 6153cfd..cf69c1c 100644
33137--- a/drivers/net/tokenring/madgemc.c
33138+++ b/drivers/net/tokenring/madgemc.c
33139@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
33140
33141 static int __init madgemc_init (void)
33142 {
33143- madgemc_netdev_ops = tms380tr_netdev_ops;
33144- madgemc_netdev_ops.ndo_open = madgemc_open;
33145- madgemc_netdev_ops.ndo_stop = madgemc_close;
33146+ pax_open_kernel();
33147+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33148+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
33149+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
33150+ pax_close_kernel();
33151
33152 return mca_register_driver (&madgemc_driver);
33153 }
33154diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
33155index 8d362e6..f91cc52 100644
33156--- a/drivers/net/tokenring/proteon.c
33157+++ b/drivers/net/tokenring/proteon.c
33158@@ -353,9 +353,11 @@ static int __init proteon_init(void)
33159 struct platform_device *pdev;
33160 int i, num = 0, err = 0;
33161
33162- proteon_netdev_ops = tms380tr_netdev_ops;
33163- proteon_netdev_ops.ndo_open = proteon_open;
33164- proteon_netdev_ops.ndo_stop = tms380tr_close;
33165+ pax_open_kernel();
33166+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33167+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
33168+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
33169+ pax_close_kernel();
33170
33171 err = platform_driver_register(&proteon_driver);
33172 if (err)
33173diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
33174index 46db5c5..37c1536 100644
33175--- a/drivers/net/tokenring/skisa.c
33176+++ b/drivers/net/tokenring/skisa.c
33177@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
33178 struct platform_device *pdev;
33179 int i, num = 0, err = 0;
33180
33181- sk_isa_netdev_ops = tms380tr_netdev_ops;
33182- sk_isa_netdev_ops.ndo_open = sk_isa_open;
33183- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
33184+ pax_open_kernel();
33185+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33186+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
33187+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
33188+ pax_close_kernel();
33189
33190 err = platform_driver_register(&sk_isa_driver);
33191 if (err)
33192diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
33193index 304fe78..db112fa 100644
33194--- a/drivers/net/usb/hso.c
33195+++ b/drivers/net/usb/hso.c
33196@@ -71,7 +71,7 @@
33197 #include <asm/byteorder.h>
33198 #include <linux/serial_core.h>
33199 #include <linux/serial.h>
33200-
33201+#include <asm/local.h>
33202
33203 #define MOD_AUTHOR "Option Wireless"
33204 #define MOD_DESCRIPTION "USB High Speed Option driver"
33205@@ -257,7 +257,7 @@ struct hso_serial {
33206
33207 /* from usb_serial_port */
33208 struct tty_struct *tty;
33209- int open_count;
33210+ local_t open_count;
33211 spinlock_t serial_lock;
33212
33213 int (*write_data) (struct hso_serial *serial);
33214@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
33215 struct urb *urb;
33216
33217 urb = serial->rx_urb[0];
33218- if (serial->open_count > 0) {
33219+ if (local_read(&serial->open_count) > 0) {
33220 count = put_rxbuf_data(urb, serial);
33221 if (count == -1)
33222 return;
33223@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
33224 DUMP1(urb->transfer_buffer, urb->actual_length);
33225
33226 /* Anyone listening? */
33227- if (serial->open_count == 0)
33228+ if (local_read(&serial->open_count) == 0)
33229 return;
33230
33231 if (status == 0) {
33232@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
33233 spin_unlock_irq(&serial->serial_lock);
33234
33235 /* check for port already opened, if not set the termios */
33236- serial->open_count++;
33237- if (serial->open_count == 1) {
33238+ if (local_inc_return(&serial->open_count) == 1) {
33239 serial->rx_state = RX_IDLE;
33240 /* Force default termio settings */
33241 _hso_serial_set_termios(tty, NULL);
33242@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
33243 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
33244 if (result) {
33245 hso_stop_serial_device(serial->parent);
33246- serial->open_count--;
33247+ local_dec(&serial->open_count);
33248 kref_put(&serial->parent->ref, hso_serial_ref_free);
33249 }
33250 } else {
33251@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
33252
33253 /* reset the rts and dtr */
33254 /* do the actual close */
33255- serial->open_count--;
33256+ local_dec(&serial->open_count);
33257
33258- if (serial->open_count <= 0) {
33259- serial->open_count = 0;
33260+ if (local_read(&serial->open_count) <= 0) {
33261+ local_set(&serial->open_count, 0);
33262 spin_lock_irq(&serial->serial_lock);
33263 if (serial->tty == tty) {
33264 serial->tty->driver_data = NULL;
33265@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
33266
33267 /* the actual setup */
33268 spin_lock_irqsave(&serial->serial_lock, flags);
33269- if (serial->open_count)
33270+ if (local_read(&serial->open_count))
33271 _hso_serial_set_termios(tty, old);
33272 else
33273 tty->termios = old;
33274@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
33275 D1("Pending read interrupt on port %d\n", i);
33276 spin_lock(&serial->serial_lock);
33277 if (serial->rx_state == RX_IDLE &&
33278- serial->open_count > 0) {
33279+ local_read(&serial->open_count) > 0) {
33280 /* Setup and send a ctrl req read on
33281 * port i */
33282 if (!serial->rx_urb_filled[0]) {
33283@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
33284 /* Start all serial ports */
33285 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
33286 if (serial_table[i] && (serial_table[i]->interface == iface)) {
33287- if (dev2ser(serial_table[i])->open_count) {
33288+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
33289 result =
33290 hso_start_serial_device(serial_table[i], GFP_NOIO);
33291 hso_kick_transmit(dev2ser(serial_table[i]));
33292diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
33293index e662cbc..8d4a102 100644
33294--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
33295+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
33296@@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
33297 * Return with error code if any of the queue indices
33298 * is out of range
33299 */
33300- if (p->ring_index[i] < 0 ||
33301- p->ring_index[i] >= adapter->num_rx_queues)
33302+ if (p->ring_index[i] >= adapter->num_rx_queues)
33303 return -EINVAL;
33304 }
33305
33306diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
33307index 0f9ee46..e2d6e65 100644
33308--- a/drivers/net/wireless/ath/ath.h
33309+++ b/drivers/net/wireless/ath/ath.h
33310@@ -119,6 +119,7 @@ struct ath_ops {
33311 void (*write_flush) (void *);
33312 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
33313 };
33314+typedef struct ath_ops __no_const ath_ops_no_const;
33315
33316 struct ath_common;
33317 struct ath_bus_ops;
33318diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33319index b592016..fe47870 100644
33320--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33321+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33322@@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33323 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
33324 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
33325
33326- ACCESS_ONCE(ads->ds_link) = i->link;
33327- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
33328+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
33329+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
33330
33331 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
33332 ctl6 = SM(i->keytype, AR_EncrType);
33333@@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33334
33335 if ((i->is_first || i->is_last) &&
33336 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
33337- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
33338+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
33339 | set11nTries(i->rates, 1)
33340 | set11nTries(i->rates, 2)
33341 | set11nTries(i->rates, 3)
33342 | (i->dur_update ? AR_DurUpdateEna : 0)
33343 | SM(0, AR_BurstDur);
33344
33345- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
33346+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
33347 | set11nRate(i->rates, 1)
33348 | set11nRate(i->rates, 2)
33349 | set11nRate(i->rates, 3);
33350 } else {
33351- ACCESS_ONCE(ads->ds_ctl2) = 0;
33352- ACCESS_ONCE(ads->ds_ctl3) = 0;
33353+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
33354+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
33355 }
33356
33357 if (!i->is_first) {
33358- ACCESS_ONCE(ads->ds_ctl0) = 0;
33359- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33360- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33361+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
33362+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33363+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33364 return;
33365 }
33366
33367@@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33368 break;
33369 }
33370
33371- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33372+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33373 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33374 | SM(i->txpower, AR_XmitPower)
33375 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33376@@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33377 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
33378 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
33379
33380- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33381- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33382+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33383+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33384
33385 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
33386 return;
33387
33388- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33389+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33390 | set11nPktDurRTSCTS(i->rates, 1);
33391
33392- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33393+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33394 | set11nPktDurRTSCTS(i->rates, 3);
33395
33396- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33397+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33398 | set11nRateFlags(i->rates, 1)
33399 | set11nRateFlags(i->rates, 2)
33400 | set11nRateFlags(i->rates, 3)
33401diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33402index f5ae3c6..7936af3 100644
33403--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33404+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33405@@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33406 (i->qcu << AR_TxQcuNum_S) | 0x17;
33407
33408 checksum += val;
33409- ACCESS_ONCE(ads->info) = val;
33410+ ACCESS_ONCE_RW(ads->info) = val;
33411
33412 checksum += i->link;
33413- ACCESS_ONCE(ads->link) = i->link;
33414+ ACCESS_ONCE_RW(ads->link) = i->link;
33415
33416 checksum += i->buf_addr[0];
33417- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
33418+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
33419 checksum += i->buf_addr[1];
33420- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
33421+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
33422 checksum += i->buf_addr[2];
33423- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
33424+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
33425 checksum += i->buf_addr[3];
33426- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
33427+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
33428
33429 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
33430- ACCESS_ONCE(ads->ctl3) = val;
33431+ ACCESS_ONCE_RW(ads->ctl3) = val;
33432 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
33433- ACCESS_ONCE(ads->ctl5) = val;
33434+ ACCESS_ONCE_RW(ads->ctl5) = val;
33435 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
33436- ACCESS_ONCE(ads->ctl7) = val;
33437+ ACCESS_ONCE_RW(ads->ctl7) = val;
33438 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
33439- ACCESS_ONCE(ads->ctl9) = val;
33440+ ACCESS_ONCE_RW(ads->ctl9) = val;
33441
33442 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
33443- ACCESS_ONCE(ads->ctl10) = checksum;
33444+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
33445
33446 if (i->is_first || i->is_last) {
33447- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
33448+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
33449 | set11nTries(i->rates, 1)
33450 | set11nTries(i->rates, 2)
33451 | set11nTries(i->rates, 3)
33452 | (i->dur_update ? AR_DurUpdateEna : 0)
33453 | SM(0, AR_BurstDur);
33454
33455- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
33456+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
33457 | set11nRate(i->rates, 1)
33458 | set11nRate(i->rates, 2)
33459 | set11nRate(i->rates, 3);
33460 } else {
33461- ACCESS_ONCE(ads->ctl13) = 0;
33462- ACCESS_ONCE(ads->ctl14) = 0;
33463+ ACCESS_ONCE_RW(ads->ctl13) = 0;
33464+ ACCESS_ONCE_RW(ads->ctl14) = 0;
33465 }
33466
33467 ads->ctl20 = 0;
33468@@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33469
33470 ctl17 = SM(i->keytype, AR_EncrType);
33471 if (!i->is_first) {
33472- ACCESS_ONCE(ads->ctl11) = 0;
33473- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33474- ACCESS_ONCE(ads->ctl15) = 0;
33475- ACCESS_ONCE(ads->ctl16) = 0;
33476- ACCESS_ONCE(ads->ctl17) = ctl17;
33477- ACCESS_ONCE(ads->ctl18) = 0;
33478- ACCESS_ONCE(ads->ctl19) = 0;
33479+ ACCESS_ONCE_RW(ads->ctl11) = 0;
33480+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33481+ ACCESS_ONCE_RW(ads->ctl15) = 0;
33482+ ACCESS_ONCE_RW(ads->ctl16) = 0;
33483+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33484+ ACCESS_ONCE_RW(ads->ctl18) = 0;
33485+ ACCESS_ONCE_RW(ads->ctl19) = 0;
33486 return;
33487 }
33488
33489- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33490+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33491 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33492 | SM(i->txpower, AR_XmitPower)
33493 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33494@@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33495 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
33496 ctl12 |= SM(val, AR_PAPRDChainMask);
33497
33498- ACCESS_ONCE(ads->ctl12) = ctl12;
33499- ACCESS_ONCE(ads->ctl17) = ctl17;
33500+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
33501+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33502
33503- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33504+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33505 | set11nPktDurRTSCTS(i->rates, 1);
33506
33507- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33508+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33509 | set11nPktDurRTSCTS(i->rates, 3);
33510
33511- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
33512+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
33513 | set11nRateFlags(i->rates, 1)
33514 | set11nRateFlags(i->rates, 2)
33515 | set11nRateFlags(i->rates, 3)
33516 | SM(i->rtscts_rate, AR_RTSCTSRate);
33517
33518- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
33519+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
33520 }
33521
33522 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
33523diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
33524index f389b3c..7359e18 100644
33525--- a/drivers/net/wireless/ath/ath9k/hw.h
33526+++ b/drivers/net/wireless/ath/ath9k/hw.h
33527@@ -605,7 +605,7 @@ struct ath_hw_private_ops {
33528
33529 /* ANI */
33530 void (*ani_cache_ini_regs)(struct ath_hw *ah);
33531-};
33532+} __no_const;
33533
33534 /**
33535 * struct ath_hw_ops - callbacks used by hardware code and driver code
33536@@ -635,7 +635,7 @@ struct ath_hw_ops {
33537 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
33538 struct ath_hw_antcomb_conf *antconf);
33539
33540-};
33541+} __no_const;
33542
33543 struct ath_nf_limits {
33544 s16 max;
33545@@ -655,7 +655,7 @@ enum ath_cal_list {
33546 #define AH_FASTCC 0x4
33547
33548 struct ath_hw {
33549- struct ath_ops reg_ops;
33550+ ath_ops_no_const reg_ops;
33551
33552 struct ieee80211_hw *hw;
33553 struct ath_common common;
33554diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33555index bea8524..c677c06 100644
33556--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33557+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33558@@ -547,7 +547,7 @@ struct phy_func_ptr {
33559 void (*carrsuppr)(struct brcms_phy *);
33560 s32 (*rxsigpwr)(struct brcms_phy *, s32);
33561 void (*detach)(struct brcms_phy *);
33562-};
33563+} __no_const;
33564
33565 struct brcms_phy {
33566 struct brcms_phy_pub pubpi_ro;
33567diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33568index 05f2ad1..ae00eea 100644
33569--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
33570+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33571@@ -3685,7 +3685,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
33572 */
33573 if (iwl3945_mod_params.disable_hw_scan) {
33574 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
33575- iwl3945_hw_ops.hw_scan = NULL;
33576+ pax_open_kernel();
33577+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
33578+ pax_close_kernel();
33579 }
33580
33581 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
33582diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
33583index 69a77e2..552b42c 100644
33584--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
33585+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
33586@@ -71,8 +71,8 @@ do { \
33587 } while (0)
33588
33589 #else
33590-#define IWL_DEBUG(m, level, fmt, args...)
33591-#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
33592+#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
33593+#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
33594 #define iwl_print_hex_dump(m, level, p, len)
33595 #endif /* CONFIG_IWLWIFI_DEBUG */
33596
33597diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
33598index 523ad55..f8c5dc5 100644
33599--- a/drivers/net/wireless/mac80211_hwsim.c
33600+++ b/drivers/net/wireless/mac80211_hwsim.c
33601@@ -1678,9 +1678,11 @@ static int __init init_mac80211_hwsim(void)
33602 return -EINVAL;
33603
33604 if (fake_hw_scan) {
33605- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33606- mac80211_hwsim_ops.sw_scan_start = NULL;
33607- mac80211_hwsim_ops.sw_scan_complete = NULL;
33608+ pax_open_kernel();
33609+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33610+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
33611+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
33612+ pax_close_kernel();
33613 }
33614
33615 spin_lock_init(&hwsim_radio_lock);
33616diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
33617index 30f138b..c904585 100644
33618--- a/drivers/net/wireless/mwifiex/main.h
33619+++ b/drivers/net/wireless/mwifiex/main.h
33620@@ -543,7 +543,7 @@ struct mwifiex_if_ops {
33621 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
33622 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
33623 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
33624-};
33625+} __no_const;
33626
33627 struct mwifiex_adapter {
33628 u8 iface_type;
33629diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
33630index 0c13840..a5c3ed6 100644
33631--- a/drivers/net/wireless/rndis_wlan.c
33632+++ b/drivers/net/wireless/rndis_wlan.c
33633@@ -1275,7 +1275,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
33634
33635 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
33636
33637- if (rts_threshold < 0 || rts_threshold > 2347)
33638+ if (rts_threshold > 2347)
33639 rts_threshold = 2347;
33640
33641 tmp = cpu_to_le32(rts_threshold);
33642diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
33643index a77f1bb..c608b2b 100644
33644--- a/drivers/net/wireless/wl1251/wl1251.h
33645+++ b/drivers/net/wireless/wl1251/wl1251.h
33646@@ -266,7 +266,7 @@ struct wl1251_if_operations {
33647 void (*reset)(struct wl1251 *wl);
33648 void (*enable_irq)(struct wl1251 *wl);
33649 void (*disable_irq)(struct wl1251 *wl);
33650-};
33651+} __no_const;
33652
33653 struct wl1251 {
33654 struct ieee80211_hw *hw;
33655diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
33656index f34b5b2..b5abb9f 100644
33657--- a/drivers/oprofile/buffer_sync.c
33658+++ b/drivers/oprofile/buffer_sync.c
33659@@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
33660 if (cookie == NO_COOKIE)
33661 offset = pc;
33662 if (cookie == INVALID_COOKIE) {
33663- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33664+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33665 offset = pc;
33666 }
33667 if (cookie != last_cookie) {
33668@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
33669 /* add userspace sample */
33670
33671 if (!mm) {
33672- atomic_inc(&oprofile_stats.sample_lost_no_mm);
33673+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
33674 return 0;
33675 }
33676
33677 cookie = lookup_dcookie(mm, s->eip, &offset);
33678
33679 if (cookie == INVALID_COOKIE) {
33680- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33681+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33682 return 0;
33683 }
33684
33685@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
33686 /* ignore backtraces if failed to add a sample */
33687 if (state == sb_bt_start) {
33688 state = sb_bt_ignore;
33689- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
33690+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
33691 }
33692 }
33693 release_mm(mm);
33694diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
33695index c0cc4e7..44d4e54 100644
33696--- a/drivers/oprofile/event_buffer.c
33697+++ b/drivers/oprofile/event_buffer.c
33698@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
33699 }
33700
33701 if (buffer_pos == buffer_size) {
33702- atomic_inc(&oprofile_stats.event_lost_overflow);
33703+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
33704 return;
33705 }
33706
33707diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
33708index f8c752e..28bf4fc 100644
33709--- a/drivers/oprofile/oprof.c
33710+++ b/drivers/oprofile/oprof.c
33711@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
33712 if (oprofile_ops.switch_events())
33713 return;
33714
33715- atomic_inc(&oprofile_stats.multiplex_counter);
33716+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
33717 start_switch_worker();
33718 }
33719
33720diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
33721index 917d28e..d62d981 100644
33722--- a/drivers/oprofile/oprofile_stats.c
33723+++ b/drivers/oprofile/oprofile_stats.c
33724@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
33725 cpu_buf->sample_invalid_eip = 0;
33726 }
33727
33728- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
33729- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
33730- atomic_set(&oprofile_stats.event_lost_overflow, 0);
33731- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
33732- atomic_set(&oprofile_stats.multiplex_counter, 0);
33733+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
33734+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
33735+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
33736+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
33737+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
33738 }
33739
33740
33741diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
33742index 38b6fc0..b5cbfce 100644
33743--- a/drivers/oprofile/oprofile_stats.h
33744+++ b/drivers/oprofile/oprofile_stats.h
33745@@ -13,11 +13,11 @@
33746 #include <linux/atomic.h>
33747
33748 struct oprofile_stat_struct {
33749- atomic_t sample_lost_no_mm;
33750- atomic_t sample_lost_no_mapping;
33751- atomic_t bt_lost_no_mapping;
33752- atomic_t event_lost_overflow;
33753- atomic_t multiplex_counter;
33754+ atomic_unchecked_t sample_lost_no_mm;
33755+ atomic_unchecked_t sample_lost_no_mapping;
33756+ atomic_unchecked_t bt_lost_no_mapping;
33757+ atomic_unchecked_t event_lost_overflow;
33758+ atomic_unchecked_t multiplex_counter;
33759 };
33760
33761 extern struct oprofile_stat_struct oprofile_stats;
33762diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
33763index 2f0aa0f..90fab02 100644
33764--- a/drivers/oprofile/oprofilefs.c
33765+++ b/drivers/oprofile/oprofilefs.c
33766@@ -193,7 +193,7 @@ static const struct file_operations atomic_ro_fops = {
33767
33768
33769 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
33770- char const *name, atomic_t *val)
33771+ char const *name, atomic_unchecked_t *val)
33772 {
33773 return __oprofilefs_create_file(sb, root, name,
33774 &atomic_ro_fops, 0444, val);
33775diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
33776index 3f56bc0..707d642 100644
33777--- a/drivers/parport/procfs.c
33778+++ b/drivers/parport/procfs.c
33779@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
33780
33781 *ppos += len;
33782
33783- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
33784+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
33785 }
33786
33787 #ifdef CONFIG_PARPORT_1284
33788@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
33789
33790 *ppos += len;
33791
33792- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
33793+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
33794 }
33795 #endif /* IEEE1284.3 support. */
33796
33797diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
33798index 9fff878..ad0ad53 100644
33799--- a/drivers/pci/hotplug/cpci_hotplug.h
33800+++ b/drivers/pci/hotplug/cpci_hotplug.h
33801@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
33802 int (*hardware_test) (struct slot* slot, u32 value);
33803 u8 (*get_power) (struct slot* slot);
33804 int (*set_power) (struct slot* slot, int value);
33805-};
33806+} __no_const;
33807
33808 struct cpci_hp_controller {
33809 unsigned int irq;
33810diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
33811index 76ba8a1..20ca857 100644
33812--- a/drivers/pci/hotplug/cpqphp_nvram.c
33813+++ b/drivers/pci/hotplug/cpqphp_nvram.c
33814@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
33815
33816 void compaq_nvram_init (void __iomem *rom_start)
33817 {
33818+
33819+#ifndef CONFIG_PAX_KERNEXEC
33820 if (rom_start) {
33821 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
33822 }
33823+#endif
33824+
33825 dbg("int15 entry = %p\n", compaq_int15_entry_point);
33826
33827 /* initialize our int15 lock */
33828diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
33829index 1cfbf22..be96487 100644
33830--- a/drivers/pci/pcie/aspm.c
33831+++ b/drivers/pci/pcie/aspm.c
33832@@ -27,9 +27,9 @@
33833 #define MODULE_PARAM_PREFIX "pcie_aspm."
33834
33835 /* Note: those are not register definitions */
33836-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
33837-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
33838-#define ASPM_STATE_L1 (4) /* L1 state */
33839+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
33840+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
33841+#define ASPM_STATE_L1 (4U) /* L1 state */
33842 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
33843 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
33844
33845diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
33846index dfee1b3..a454fb6 100644
33847--- a/drivers/pci/probe.c
33848+++ b/drivers/pci/probe.c
33849@@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
33850 u32 l, sz, mask;
33851 u16 orig_cmd;
33852
33853- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
33854+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
33855
33856 if (!dev->mmio_always_on) {
33857 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
33858diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
33859index 27911b5..5b6db88 100644
33860--- a/drivers/pci/proc.c
33861+++ b/drivers/pci/proc.c
33862@@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
33863 static int __init pci_proc_init(void)
33864 {
33865 struct pci_dev *dev = NULL;
33866+
33867+#ifdef CONFIG_GRKERNSEC_PROC_ADD
33868+#ifdef CONFIG_GRKERNSEC_PROC_USER
33869+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
33870+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33871+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33872+#endif
33873+#else
33874 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
33875+#endif
33876 proc_create("devices", 0, proc_bus_pci_dir,
33877 &proc_bus_pci_dev_operations);
33878 proc_initialized = 1;
33879diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
33880index 7b82868..b9344c9 100644
33881--- a/drivers/platform/x86/thinkpad_acpi.c
33882+++ b/drivers/platform/x86/thinkpad_acpi.c
33883@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
33884 return 0;
33885 }
33886
33887-void static hotkey_mask_warn_incomplete_mask(void)
33888+static void hotkey_mask_warn_incomplete_mask(void)
33889 {
33890 /* log only what the user can fix... */
33891 const u32 wantedmask = hotkey_driver_mask &
33892@@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
33893 }
33894 }
33895
33896-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33897- struct tp_nvram_state *newn,
33898- const u32 event_mask)
33899-{
33900-
33901 #define TPACPI_COMPARE_KEY(__scancode, __member) \
33902 do { \
33903 if ((event_mask & (1 << __scancode)) && \
33904@@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33905 tpacpi_hotkey_send_key(__scancode); \
33906 } while (0)
33907
33908- void issue_volchange(const unsigned int oldvol,
33909- const unsigned int newvol)
33910- {
33911- unsigned int i = oldvol;
33912+static void issue_volchange(const unsigned int oldvol,
33913+ const unsigned int newvol,
33914+ const u32 event_mask)
33915+{
33916+ unsigned int i = oldvol;
33917
33918- while (i > newvol) {
33919- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33920- i--;
33921- }
33922- while (i < newvol) {
33923- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33924- i++;
33925- }
33926+ while (i > newvol) {
33927+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33928+ i--;
33929 }
33930+ while (i < newvol) {
33931+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33932+ i++;
33933+ }
33934+}
33935
33936- void issue_brightnesschange(const unsigned int oldbrt,
33937- const unsigned int newbrt)
33938- {
33939- unsigned int i = oldbrt;
33940+static void issue_brightnesschange(const unsigned int oldbrt,
33941+ const unsigned int newbrt,
33942+ const u32 event_mask)
33943+{
33944+ unsigned int i = oldbrt;
33945
33946- while (i > newbrt) {
33947- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33948- i--;
33949- }
33950- while (i < newbrt) {
33951- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33952- i++;
33953- }
33954+ while (i > newbrt) {
33955+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33956+ i--;
33957+ }
33958+ while (i < newbrt) {
33959+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33960+ i++;
33961 }
33962+}
33963
33964+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33965+ struct tp_nvram_state *newn,
33966+ const u32 event_mask)
33967+{
33968 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
33969 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
33970 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
33971@@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33972 oldn->volume_level != newn->volume_level) {
33973 /* recently muted, or repeated mute keypress, or
33974 * multiple presses ending in mute */
33975- issue_volchange(oldn->volume_level, newn->volume_level);
33976+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
33977 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
33978 }
33979 } else {
33980@@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33981 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33982 }
33983 if (oldn->volume_level != newn->volume_level) {
33984- issue_volchange(oldn->volume_level, newn->volume_level);
33985+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
33986 } else if (oldn->volume_toggle != newn->volume_toggle) {
33987 /* repeated vol up/down keypress at end of scale ? */
33988 if (newn->volume_level == 0)
33989@@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33990 /* handle brightness */
33991 if (oldn->brightness_level != newn->brightness_level) {
33992 issue_brightnesschange(oldn->brightness_level,
33993- newn->brightness_level);
33994+ newn->brightness_level,
33995+ event_mask);
33996 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
33997 /* repeated key presses that didn't change state */
33998 if (newn->brightness_level == 0)
33999@@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
34000 && !tp_features.bright_unkfw)
34001 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
34002 }
34003+}
34004
34005 #undef TPACPI_COMPARE_KEY
34006 #undef TPACPI_MAY_SEND_KEY
34007-}
34008
34009 /*
34010 * Polling driver
34011diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
34012index b859d16..5cc6b1a 100644
34013--- a/drivers/pnp/pnpbios/bioscalls.c
34014+++ b/drivers/pnp/pnpbios/bioscalls.c
34015@@ -59,7 +59,7 @@ do { \
34016 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
34017 } while(0)
34018
34019-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
34020+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
34021 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
34022
34023 /*
34024@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
34025
34026 cpu = get_cpu();
34027 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
34028+
34029+ pax_open_kernel();
34030 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
34031+ pax_close_kernel();
34032
34033 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
34034 spin_lock_irqsave(&pnp_bios_lock, flags);
34035@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
34036 :"memory");
34037 spin_unlock_irqrestore(&pnp_bios_lock, flags);
34038
34039+ pax_open_kernel();
34040 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
34041+ pax_close_kernel();
34042+
34043 put_cpu();
34044
34045 /* If we get here and this is set then the PnP BIOS faulted on us. */
34046@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
34047 return status;
34048 }
34049
34050-void pnpbios_calls_init(union pnp_bios_install_struct *header)
34051+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
34052 {
34053 int i;
34054
34055@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
34056 pnp_bios_callpoint.offset = header->fields.pm16offset;
34057 pnp_bios_callpoint.segment = PNP_CS16;
34058
34059+ pax_open_kernel();
34060+
34061 for_each_possible_cpu(i) {
34062 struct desc_struct *gdt = get_cpu_gdt_table(i);
34063 if (!gdt)
34064@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
34065 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
34066 (unsigned long)__va(header->fields.pm16dseg));
34067 }
34068+
34069+ pax_close_kernel();
34070 }
34071diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
34072index b0ecacb..7c9da2e 100644
34073--- a/drivers/pnp/resource.c
34074+++ b/drivers/pnp/resource.c
34075@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
34076 return 1;
34077
34078 /* check if the resource is valid */
34079- if (*irq < 0 || *irq > 15)
34080+ if (*irq > 15)
34081 return 0;
34082
34083 /* check if the resource is reserved */
34084@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
34085 return 1;
34086
34087 /* check if the resource is valid */
34088- if (*dma < 0 || *dma == 4 || *dma > 7)
34089+ if (*dma == 4 || *dma > 7)
34090 return 0;
34091
34092 /* check if the resource is reserved */
34093diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
34094index bb16f5b..c751eef 100644
34095--- a/drivers/power/bq27x00_battery.c
34096+++ b/drivers/power/bq27x00_battery.c
34097@@ -67,7 +67,7 @@
34098 struct bq27x00_device_info;
34099 struct bq27x00_access_methods {
34100 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
34101-};
34102+} __no_const;
34103
34104 enum bq27x00_chip { BQ27000, BQ27500 };
34105
34106diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
34107index 33f5d9a..d957d3f 100644
34108--- a/drivers/regulator/max8660.c
34109+++ b/drivers/regulator/max8660.c
34110@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
34111 max8660->shadow_regs[MAX8660_OVER1] = 5;
34112 } else {
34113 /* Otherwise devices can be toggled via software */
34114- max8660_dcdc_ops.enable = max8660_dcdc_enable;
34115- max8660_dcdc_ops.disable = max8660_dcdc_disable;
34116+ pax_open_kernel();
34117+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
34118+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
34119+ pax_close_kernel();
34120 }
34121
34122 /*
34123diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
34124index 023d17d..74ef35b 100644
34125--- a/drivers/regulator/mc13892-regulator.c
34126+++ b/drivers/regulator/mc13892-regulator.c
34127@@ -565,10 +565,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
34128 }
34129 mc13xxx_unlock(mc13892);
34130
34131- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
34132+ pax_open_kernel();
34133+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
34134 = mc13892_vcam_set_mode;
34135- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
34136+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
34137 = mc13892_vcam_get_mode;
34138+ pax_close_kernel();
34139 for (i = 0; i < pdata->num_regulators; i++) {
34140 init_data = &pdata->regulators[i];
34141 priv->regulators[i] = regulator_register(
34142diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
34143index cace6d3..f623fda 100644
34144--- a/drivers/rtc/rtc-dev.c
34145+++ b/drivers/rtc/rtc-dev.c
34146@@ -14,6 +14,7 @@
34147 #include <linux/module.h>
34148 #include <linux/rtc.h>
34149 #include <linux/sched.h>
34150+#include <linux/grsecurity.h>
34151 #include "rtc-core.h"
34152
34153 static dev_t rtc_devt;
34154@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
34155 if (copy_from_user(&tm, uarg, sizeof(tm)))
34156 return -EFAULT;
34157
34158+ gr_log_timechange();
34159+
34160 return rtc_set_time(rtc, &tm);
34161
34162 case RTC_PIE_ON:
34163diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
34164index ffb5878..e6d785c 100644
34165--- a/drivers/scsi/aacraid/aacraid.h
34166+++ b/drivers/scsi/aacraid/aacraid.h
34167@@ -492,7 +492,7 @@ struct adapter_ops
34168 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
34169 /* Administrative operations */
34170 int (*adapter_comm)(struct aac_dev * dev, int comm);
34171-};
34172+} __no_const;
34173
34174 /*
34175 * Define which interrupt handler needs to be installed
34176diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
34177index 705e13e..91c873c 100644
34178--- a/drivers/scsi/aacraid/linit.c
34179+++ b/drivers/scsi/aacraid/linit.c
34180@@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
34181 #elif defined(__devinitconst)
34182 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
34183 #else
34184-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
34185+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
34186 #endif
34187 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
34188 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
34189diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
34190index d5ff142..49c0ebb 100644
34191--- a/drivers/scsi/aic94xx/aic94xx_init.c
34192+++ b/drivers/scsi/aic94xx/aic94xx_init.c
34193@@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
34194 .lldd_control_phy = asd_control_phy,
34195 };
34196
34197-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
34198+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
34199 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
34200 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
34201 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
34202diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
34203index a796de9..1ef20e1 100644
34204--- a/drivers/scsi/bfa/bfa.h
34205+++ b/drivers/scsi/bfa/bfa.h
34206@@ -196,7 +196,7 @@ struct bfa_hwif_s {
34207 u32 *end);
34208 int cpe_vec_q0;
34209 int rme_vec_q0;
34210-};
34211+} __no_const;
34212 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
34213
34214 struct bfa_faa_cbfn_s {
34215diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
34216index e07bd47..cd1bbbb 100644
34217--- a/drivers/scsi/bfa/bfa_fcpim.c
34218+++ b/drivers/scsi/bfa/bfa_fcpim.c
34219@@ -4121,7 +4121,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
34220
34221 bfa_iotag_attach(fcp);
34222
34223- fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
34224+ fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
34225 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
34226 (fcp->num_itns * sizeof(struct bfa_itn_s));
34227 memset(fcp->itn_arr, 0,
34228@@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
34229 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
34230 {
34231 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
34232- struct bfa_itn_s *itn;
34233+ bfa_itn_s_no_const *itn;
34234
34235 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
34236 itn->isr = isr;
34237diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
34238index 1080bcb..a3b39e3 100644
34239--- a/drivers/scsi/bfa/bfa_fcpim.h
34240+++ b/drivers/scsi/bfa/bfa_fcpim.h
34241@@ -37,6 +37,7 @@ struct bfa_iotag_s {
34242 struct bfa_itn_s {
34243 bfa_isr_func_t isr;
34244 };
34245+typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
34246
34247 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
34248 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
34249@@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
34250 struct list_head iotag_tio_free_q; /* free IO resources */
34251 struct list_head iotag_unused_q; /* unused IO resources*/
34252 struct bfa_iotag_s *iotag_arr;
34253- struct bfa_itn_s *itn_arr;
34254+ bfa_itn_s_no_const *itn_arr;
34255 int num_ioim_reqs;
34256 int num_fwtio_reqs;
34257 int num_itns;
34258diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
34259index 546d46b..642fa5b 100644
34260--- a/drivers/scsi/bfa/bfa_ioc.h
34261+++ b/drivers/scsi/bfa/bfa_ioc.h
34262@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
34263 bfa_ioc_disable_cbfn_t disable_cbfn;
34264 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
34265 bfa_ioc_reset_cbfn_t reset_cbfn;
34266-};
34267+} __no_const;
34268
34269 /*
34270 * IOC event notification mechanism.
34271@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
34272 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
34273 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
34274 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
34275-};
34276+} __no_const;
34277
34278 /*
34279 * Queue element to wait for room in request queue. FIFO order is
34280diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
34281index 351dc0b..951dc32 100644
34282--- a/drivers/scsi/hosts.c
34283+++ b/drivers/scsi/hosts.c
34284@@ -42,7 +42,7 @@
34285 #include "scsi_logging.h"
34286
34287
34288-static atomic_t scsi_host_next_hn; /* host_no for next new host */
34289+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
34290
34291
34292 static void scsi_host_cls_release(struct device *dev)
34293@@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
34294 * subtract one because we increment first then return, but we need to
34295 * know what the next host number was before increment
34296 */
34297- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
34298+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
34299 shost->dma_channel = 0xff;
34300
34301 /* These three are default values which can be overridden */
34302diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
34303index 865d452..e9b7fa7 100644
34304--- a/drivers/scsi/hpsa.c
34305+++ b/drivers/scsi/hpsa.c
34306@@ -505,7 +505,7 @@ static inline u32 next_command(struct ctlr_info *h)
34307 u32 a;
34308
34309 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
34310- return h->access.command_completed(h);
34311+ return h->access->command_completed(h);
34312
34313 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
34314 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
34315@@ -2989,7 +2989,7 @@ static void start_io(struct ctlr_info *h)
34316 while (!list_empty(&h->reqQ)) {
34317 c = list_entry(h->reqQ.next, struct CommandList, list);
34318 /* can't do anything if fifo is full */
34319- if ((h->access.fifo_full(h))) {
34320+ if ((h->access->fifo_full(h))) {
34321 dev_warn(&h->pdev->dev, "fifo full\n");
34322 break;
34323 }
34324@@ -2999,7 +2999,7 @@ static void start_io(struct ctlr_info *h)
34325 h->Qdepth--;
34326
34327 /* Tell the controller execute command */
34328- h->access.submit_command(h, c);
34329+ h->access->submit_command(h, c);
34330
34331 /* Put job onto the completed Q */
34332 addQ(&h->cmpQ, c);
34333@@ -3008,17 +3008,17 @@ static void start_io(struct ctlr_info *h)
34334
34335 static inline unsigned long get_next_completion(struct ctlr_info *h)
34336 {
34337- return h->access.command_completed(h);
34338+ return h->access->command_completed(h);
34339 }
34340
34341 static inline bool interrupt_pending(struct ctlr_info *h)
34342 {
34343- return h->access.intr_pending(h);
34344+ return h->access->intr_pending(h);
34345 }
34346
34347 static inline long interrupt_not_for_us(struct ctlr_info *h)
34348 {
34349- return (h->access.intr_pending(h) == 0) ||
34350+ return (h->access->intr_pending(h) == 0) ||
34351 (h->interrupts_enabled == 0);
34352 }
34353
34354@@ -3917,7 +3917,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
34355 if (prod_index < 0)
34356 return -ENODEV;
34357 h->product_name = products[prod_index].product_name;
34358- h->access = *(products[prod_index].access);
34359+ h->access = products[prod_index].access;
34360
34361 if (hpsa_board_disabled(h->pdev)) {
34362 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
34363@@ -4162,7 +4162,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
34364
34365 assert_spin_locked(&lockup_detector_lock);
34366 remove_ctlr_from_lockup_detector_list(h);
34367- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34368+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34369 spin_lock_irqsave(&h->lock, flags);
34370 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
34371 spin_unlock_irqrestore(&h->lock, flags);
34372@@ -4340,7 +4340,7 @@ reinit_after_soft_reset:
34373 }
34374
34375 /* make sure the board interrupts are off */
34376- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34377+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34378
34379 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
34380 goto clean2;
34381@@ -4374,7 +4374,7 @@ reinit_after_soft_reset:
34382 * fake ones to scoop up any residual completions.
34383 */
34384 spin_lock_irqsave(&h->lock, flags);
34385- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34386+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34387 spin_unlock_irqrestore(&h->lock, flags);
34388 free_irq(h->intr[h->intr_mode], h);
34389 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
34390@@ -4393,9 +4393,9 @@ reinit_after_soft_reset:
34391 dev_info(&h->pdev->dev, "Board READY.\n");
34392 dev_info(&h->pdev->dev,
34393 "Waiting for stale completions to drain.\n");
34394- h->access.set_intr_mask(h, HPSA_INTR_ON);
34395+ h->access->set_intr_mask(h, HPSA_INTR_ON);
34396 msleep(10000);
34397- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34398+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34399
34400 rc = controller_reset_failed(h->cfgtable);
34401 if (rc)
34402@@ -4416,7 +4416,7 @@ reinit_after_soft_reset:
34403 }
34404
34405 /* Turn the interrupts on so we can service requests */
34406- h->access.set_intr_mask(h, HPSA_INTR_ON);
34407+ h->access->set_intr_mask(h, HPSA_INTR_ON);
34408
34409 hpsa_hba_inquiry(h);
34410 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
34411@@ -4468,7 +4468,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
34412 * To write all data in the battery backed cache to disks
34413 */
34414 hpsa_flush_cache(h);
34415- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34416+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34417 free_irq(h->intr[h->intr_mode], h);
34418 #ifdef CONFIG_PCI_MSI
34419 if (h->msix_vector)
34420@@ -4632,7 +4632,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
34421 return;
34422 }
34423 /* Change the access methods to the performant access methods */
34424- h->access = SA5_performant_access;
34425+ h->access = &SA5_performant_access;
34426 h->transMethod = CFGTBL_Trans_Performant;
34427 }
34428
34429diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
34430index 91edafb..a9b88ec 100644
34431--- a/drivers/scsi/hpsa.h
34432+++ b/drivers/scsi/hpsa.h
34433@@ -73,7 +73,7 @@ struct ctlr_info {
34434 unsigned int msix_vector;
34435 unsigned int msi_vector;
34436 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
34437- struct access_method access;
34438+ struct access_method *access;
34439
34440 /* queue and queue Info */
34441 struct list_head reqQ;
34442diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
34443index f2df059..a3a9930 100644
34444--- a/drivers/scsi/ips.h
34445+++ b/drivers/scsi/ips.h
34446@@ -1027,7 +1027,7 @@ typedef struct {
34447 int (*intr)(struct ips_ha *);
34448 void (*enableint)(struct ips_ha *);
34449 uint32_t (*statupd)(struct ips_ha *);
34450-} ips_hw_func_t;
34451+} __no_const ips_hw_func_t;
34452
34453 typedef struct ips_ha {
34454 uint8_t ha_id[IPS_MAX_CHANNELS+1];
34455diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
34456index 9de9db2..1e09660 100644
34457--- a/drivers/scsi/libfc/fc_exch.c
34458+++ b/drivers/scsi/libfc/fc_exch.c
34459@@ -105,12 +105,12 @@ struct fc_exch_mgr {
34460 * all together if not used XXX
34461 */
34462 struct {
34463- atomic_t no_free_exch;
34464- atomic_t no_free_exch_xid;
34465- atomic_t xid_not_found;
34466- atomic_t xid_busy;
34467- atomic_t seq_not_found;
34468- atomic_t non_bls_resp;
34469+ atomic_unchecked_t no_free_exch;
34470+ atomic_unchecked_t no_free_exch_xid;
34471+ atomic_unchecked_t xid_not_found;
34472+ atomic_unchecked_t xid_busy;
34473+ atomic_unchecked_t seq_not_found;
34474+ atomic_unchecked_t non_bls_resp;
34475 } stats;
34476 };
34477
34478@@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
34479 /* allocate memory for exchange */
34480 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
34481 if (!ep) {
34482- atomic_inc(&mp->stats.no_free_exch);
34483+ atomic_inc_unchecked(&mp->stats.no_free_exch);
34484 goto out;
34485 }
34486 memset(ep, 0, sizeof(*ep));
34487@@ -780,7 +780,7 @@ out:
34488 return ep;
34489 err:
34490 spin_unlock_bh(&pool->lock);
34491- atomic_inc(&mp->stats.no_free_exch_xid);
34492+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
34493 mempool_free(ep, mp->ep_pool);
34494 return NULL;
34495 }
34496@@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34497 xid = ntohs(fh->fh_ox_id); /* we originated exch */
34498 ep = fc_exch_find(mp, xid);
34499 if (!ep) {
34500- atomic_inc(&mp->stats.xid_not_found);
34501+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34502 reject = FC_RJT_OX_ID;
34503 goto out;
34504 }
34505@@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34506 ep = fc_exch_find(mp, xid);
34507 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
34508 if (ep) {
34509- atomic_inc(&mp->stats.xid_busy);
34510+ atomic_inc_unchecked(&mp->stats.xid_busy);
34511 reject = FC_RJT_RX_ID;
34512 goto rel;
34513 }
34514@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34515 }
34516 xid = ep->xid; /* get our XID */
34517 } else if (!ep) {
34518- atomic_inc(&mp->stats.xid_not_found);
34519+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34520 reject = FC_RJT_RX_ID; /* XID not found */
34521 goto out;
34522 }
34523@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34524 } else {
34525 sp = &ep->seq;
34526 if (sp->id != fh->fh_seq_id) {
34527- atomic_inc(&mp->stats.seq_not_found);
34528+ atomic_inc_unchecked(&mp->stats.seq_not_found);
34529 if (f_ctl & FC_FC_END_SEQ) {
34530 /*
34531 * Update sequence_id based on incoming last
34532@@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34533
34534 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
34535 if (!ep) {
34536- atomic_inc(&mp->stats.xid_not_found);
34537+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34538 goto out;
34539 }
34540 if (ep->esb_stat & ESB_ST_COMPLETE) {
34541- atomic_inc(&mp->stats.xid_not_found);
34542+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34543 goto rel;
34544 }
34545 if (ep->rxid == FC_XID_UNKNOWN)
34546 ep->rxid = ntohs(fh->fh_rx_id);
34547 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
34548- atomic_inc(&mp->stats.xid_not_found);
34549+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34550 goto rel;
34551 }
34552 if (ep->did != ntoh24(fh->fh_s_id) &&
34553 ep->did != FC_FID_FLOGI) {
34554- atomic_inc(&mp->stats.xid_not_found);
34555+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34556 goto rel;
34557 }
34558 sof = fr_sof(fp);
34559@@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34560 sp->ssb_stat |= SSB_ST_RESP;
34561 sp->id = fh->fh_seq_id;
34562 } else if (sp->id != fh->fh_seq_id) {
34563- atomic_inc(&mp->stats.seq_not_found);
34564+ atomic_inc_unchecked(&mp->stats.seq_not_found);
34565 goto rel;
34566 }
34567
34568@@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34569 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
34570
34571 if (!sp)
34572- atomic_inc(&mp->stats.xid_not_found);
34573+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34574 else
34575- atomic_inc(&mp->stats.non_bls_resp);
34576+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
34577
34578 fc_frame_free(fp);
34579 }
34580diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
34581index db9238f..4378ed2 100644
34582--- a/drivers/scsi/libsas/sas_ata.c
34583+++ b/drivers/scsi/libsas/sas_ata.c
34584@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
34585 .postreset = ata_std_postreset,
34586 .error_handler = ata_std_error_handler,
34587 .post_internal_cmd = sas_ata_post_internal,
34588- .qc_defer = ata_std_qc_defer,
34589+ .qc_defer = ata_std_qc_defer,
34590 .qc_prep = ata_noop_qc_prep,
34591 .qc_issue = sas_ata_qc_issue,
34592 .qc_fill_rtf = sas_ata_qc_fill_rtf,
34593diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
34594index bb4c8e0..f33d849 100644
34595--- a/drivers/scsi/lpfc/lpfc.h
34596+++ b/drivers/scsi/lpfc/lpfc.h
34597@@ -425,7 +425,7 @@ struct lpfc_vport {
34598 struct dentry *debug_nodelist;
34599 struct dentry *vport_debugfs_root;
34600 struct lpfc_debugfs_trc *disc_trc;
34601- atomic_t disc_trc_cnt;
34602+ atomic_unchecked_t disc_trc_cnt;
34603 #endif
34604 uint8_t stat_data_enabled;
34605 uint8_t stat_data_blocked;
34606@@ -835,8 +835,8 @@ struct lpfc_hba {
34607 struct timer_list fabric_block_timer;
34608 unsigned long bit_flags;
34609 #define FABRIC_COMANDS_BLOCKED 0
34610- atomic_t num_rsrc_err;
34611- atomic_t num_cmd_success;
34612+ atomic_unchecked_t num_rsrc_err;
34613+ atomic_unchecked_t num_cmd_success;
34614 unsigned long last_rsrc_error_time;
34615 unsigned long last_ramp_down_time;
34616 unsigned long last_ramp_up_time;
34617@@ -866,7 +866,7 @@ struct lpfc_hba {
34618
34619 struct dentry *debug_slow_ring_trc;
34620 struct lpfc_debugfs_trc *slow_ring_trc;
34621- atomic_t slow_ring_trc_cnt;
34622+ atomic_unchecked_t slow_ring_trc_cnt;
34623 /* iDiag debugfs sub-directory */
34624 struct dentry *idiag_root;
34625 struct dentry *idiag_pci_cfg;
34626diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
34627index 2838259..a07cfb5 100644
34628--- a/drivers/scsi/lpfc/lpfc_debugfs.c
34629+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
34630@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
34631
34632 #include <linux/debugfs.h>
34633
34634-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34635+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34636 static unsigned long lpfc_debugfs_start_time = 0L;
34637
34638 /* iDiag */
34639@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
34640 lpfc_debugfs_enable = 0;
34641
34642 len = 0;
34643- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
34644+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
34645 (lpfc_debugfs_max_disc_trc - 1);
34646 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
34647 dtp = vport->disc_trc + i;
34648@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
34649 lpfc_debugfs_enable = 0;
34650
34651 len = 0;
34652- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
34653+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
34654 (lpfc_debugfs_max_slow_ring_trc - 1);
34655 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
34656 dtp = phba->slow_ring_trc + i;
34657@@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
34658 !vport || !vport->disc_trc)
34659 return;
34660
34661- index = atomic_inc_return(&vport->disc_trc_cnt) &
34662+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
34663 (lpfc_debugfs_max_disc_trc - 1);
34664 dtp = vport->disc_trc + index;
34665 dtp->fmt = fmt;
34666 dtp->data1 = data1;
34667 dtp->data2 = data2;
34668 dtp->data3 = data3;
34669- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34670+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34671 dtp->jif = jiffies;
34672 #endif
34673 return;
34674@@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
34675 !phba || !phba->slow_ring_trc)
34676 return;
34677
34678- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
34679+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
34680 (lpfc_debugfs_max_slow_ring_trc - 1);
34681 dtp = phba->slow_ring_trc + index;
34682 dtp->fmt = fmt;
34683 dtp->data1 = data1;
34684 dtp->data2 = data2;
34685 dtp->data3 = data3;
34686- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34687+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34688 dtp->jif = jiffies;
34689 #endif
34690 return;
34691@@ -3986,7 +3986,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34692 "slow_ring buffer\n");
34693 goto debug_failed;
34694 }
34695- atomic_set(&phba->slow_ring_trc_cnt, 0);
34696+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
34697 memset(phba->slow_ring_trc, 0,
34698 (sizeof(struct lpfc_debugfs_trc) *
34699 lpfc_debugfs_max_slow_ring_trc));
34700@@ -4032,7 +4032,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34701 "buffer\n");
34702 goto debug_failed;
34703 }
34704- atomic_set(&vport->disc_trc_cnt, 0);
34705+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
34706
34707 snprintf(name, sizeof(name), "discovery_trace");
34708 vport->debug_disc_trc =
34709diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
34710index 55bc4fc..a2a109c 100644
34711--- a/drivers/scsi/lpfc/lpfc_init.c
34712+++ b/drivers/scsi/lpfc/lpfc_init.c
34713@@ -10027,8 +10027,10 @@ lpfc_init(void)
34714 printk(LPFC_COPYRIGHT "\n");
34715
34716 if (lpfc_enable_npiv) {
34717- lpfc_transport_functions.vport_create = lpfc_vport_create;
34718- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34719+ pax_open_kernel();
34720+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
34721+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34722+ pax_close_kernel();
34723 }
34724 lpfc_transport_template =
34725 fc_attach_transport(&lpfc_transport_functions);
34726diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
34727index 2e1e54e..1af0a0d 100644
34728--- a/drivers/scsi/lpfc/lpfc_scsi.c
34729+++ b/drivers/scsi/lpfc/lpfc_scsi.c
34730@@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
34731 uint32_t evt_posted;
34732
34733 spin_lock_irqsave(&phba->hbalock, flags);
34734- atomic_inc(&phba->num_rsrc_err);
34735+ atomic_inc_unchecked(&phba->num_rsrc_err);
34736 phba->last_rsrc_error_time = jiffies;
34737
34738 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
34739@@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
34740 unsigned long flags;
34741 struct lpfc_hba *phba = vport->phba;
34742 uint32_t evt_posted;
34743- atomic_inc(&phba->num_cmd_success);
34744+ atomic_inc_unchecked(&phba->num_cmd_success);
34745
34746 if (vport->cfg_lun_queue_depth <= queue_depth)
34747 return;
34748@@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34749 unsigned long num_rsrc_err, num_cmd_success;
34750 int i;
34751
34752- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
34753- num_cmd_success = atomic_read(&phba->num_cmd_success);
34754+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
34755+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
34756
34757 vports = lpfc_create_vport_work_array(phba);
34758 if (vports != NULL)
34759@@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34760 }
34761 }
34762 lpfc_destroy_vport_work_array(phba, vports);
34763- atomic_set(&phba->num_rsrc_err, 0);
34764- atomic_set(&phba->num_cmd_success, 0);
34765+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
34766+ atomic_set_unchecked(&phba->num_cmd_success, 0);
34767 }
34768
34769 /**
34770@@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
34771 }
34772 }
34773 lpfc_destroy_vport_work_array(phba, vports);
34774- atomic_set(&phba->num_rsrc_err, 0);
34775- atomic_set(&phba->num_cmd_success, 0);
34776+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
34777+ atomic_set_unchecked(&phba->num_cmd_success, 0);
34778 }
34779
34780 /**
34781diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
34782index 5163edb..7b142bc 100644
34783--- a/drivers/scsi/pmcraid.c
34784+++ b/drivers/scsi/pmcraid.c
34785@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
34786 res->scsi_dev = scsi_dev;
34787 scsi_dev->hostdata = res;
34788 res->change_detected = 0;
34789- atomic_set(&res->read_failures, 0);
34790- atomic_set(&res->write_failures, 0);
34791+ atomic_set_unchecked(&res->read_failures, 0);
34792+ atomic_set_unchecked(&res->write_failures, 0);
34793 rc = 0;
34794 }
34795 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
34796@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
34797
34798 /* If this was a SCSI read/write command keep count of errors */
34799 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
34800- atomic_inc(&res->read_failures);
34801+ atomic_inc_unchecked(&res->read_failures);
34802 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
34803- atomic_inc(&res->write_failures);
34804+ atomic_inc_unchecked(&res->write_failures);
34805
34806 if (!RES_IS_GSCSI(res->cfg_entry) &&
34807 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
34808@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
34809 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34810 * hrrq_id assigned here in queuecommand
34811 */
34812- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34813+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34814 pinstance->num_hrrq;
34815 cmd->cmd_done = pmcraid_io_done;
34816
34817@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
34818 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34819 * hrrq_id assigned here in queuecommand
34820 */
34821- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34822+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34823 pinstance->num_hrrq;
34824
34825 if (request_size) {
34826@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
34827
34828 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
34829 /* add resources only after host is added into system */
34830- if (!atomic_read(&pinstance->expose_resources))
34831+ if (!atomic_read_unchecked(&pinstance->expose_resources))
34832 return;
34833
34834 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
34835@@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
34836 init_waitqueue_head(&pinstance->reset_wait_q);
34837
34838 atomic_set(&pinstance->outstanding_cmds, 0);
34839- atomic_set(&pinstance->last_message_id, 0);
34840- atomic_set(&pinstance->expose_resources, 0);
34841+ atomic_set_unchecked(&pinstance->last_message_id, 0);
34842+ atomic_set_unchecked(&pinstance->expose_resources, 0);
34843
34844 INIT_LIST_HEAD(&pinstance->free_res_q);
34845 INIT_LIST_HEAD(&pinstance->used_res_q);
34846@@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
34847 /* Schedule worker thread to handle CCN and take care of adding and
34848 * removing devices to OS
34849 */
34850- atomic_set(&pinstance->expose_resources, 1);
34851+ atomic_set_unchecked(&pinstance->expose_resources, 1);
34852 schedule_work(&pinstance->worker_q);
34853 return rc;
34854
34855diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
34856index ca496c7..9c791d5 100644
34857--- a/drivers/scsi/pmcraid.h
34858+++ b/drivers/scsi/pmcraid.h
34859@@ -748,7 +748,7 @@ struct pmcraid_instance {
34860 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
34861
34862 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
34863- atomic_t last_message_id;
34864+ atomic_unchecked_t last_message_id;
34865
34866 /* configuration table */
34867 struct pmcraid_config_table *cfg_table;
34868@@ -777,7 +777,7 @@ struct pmcraid_instance {
34869 atomic_t outstanding_cmds;
34870
34871 /* should add/delete resources to mid-layer now ?*/
34872- atomic_t expose_resources;
34873+ atomic_unchecked_t expose_resources;
34874
34875
34876
34877@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
34878 struct pmcraid_config_table_entry_ext cfg_entry_ext;
34879 };
34880 struct scsi_device *scsi_dev; /* Link scsi_device structure */
34881- atomic_t read_failures; /* count of failed READ commands */
34882- atomic_t write_failures; /* count of failed WRITE commands */
34883+ atomic_unchecked_t read_failures; /* count of failed READ commands */
34884+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34885
34886 /* To indicate add/delete/modify during CCN */
34887 u8 change_detected;
34888diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
34889index fcf052c..a8025a4 100644
34890--- a/drivers/scsi/qla2xxx/qla_def.h
34891+++ b/drivers/scsi/qla2xxx/qla_def.h
34892@@ -2244,7 +2244,7 @@ struct isp_operations {
34893 int (*get_flash_version) (struct scsi_qla_host *, void *);
34894 int (*start_scsi) (srb_t *);
34895 int (*abort_isp) (struct scsi_qla_host *);
34896-};
34897+} __no_const;
34898
34899 /* MSI-X Support *************************************************************/
34900
34901diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
34902index fd5edc6..4906148 100644
34903--- a/drivers/scsi/qla4xxx/ql4_def.h
34904+++ b/drivers/scsi/qla4xxx/ql4_def.h
34905@@ -258,7 +258,7 @@ struct ddb_entry {
34906 * (4000 only) */
34907 atomic_t relogin_timer; /* Max Time to wait for
34908 * relogin to complete */
34909- atomic_t relogin_retry_count; /* Num of times relogin has been
34910+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34911 * retried */
34912 uint32_t default_time2wait; /* Default Min time between
34913 * relogins (+aens) */
34914diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
34915index 4169c8b..a8b896b 100644
34916--- a/drivers/scsi/qla4xxx/ql4_os.c
34917+++ b/drivers/scsi/qla4xxx/ql4_os.c
34918@@ -2104,12 +2104,12 @@ void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
34919 */
34920 if (!iscsi_is_session_online(cls_sess)) {
34921 /* Reset retry relogin timer */
34922- atomic_inc(&ddb_entry->relogin_retry_count);
34923+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34924 DEBUG2(ql4_printk(KERN_INFO, ha,
34925 "%s: index[%d] relogin timed out-retrying"
34926 " relogin (%d), retry (%d)\n", __func__,
34927 ddb_entry->fw_ddb_index,
34928- atomic_read(&ddb_entry->relogin_retry_count),
34929+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
34930 ddb_entry->default_time2wait + 4));
34931 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
34932 atomic_set(&ddb_entry->retry_relogin_timer,
34933@@ -3835,7 +3835,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
34934
34935 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34936 atomic_set(&ddb_entry->relogin_timer, 0);
34937- atomic_set(&ddb_entry->relogin_retry_count, 0);
34938+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34939
34940 ddb_entry->default_relogin_timeout =
34941 le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
34942diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
34943index 2aeb2e9..46e3925 100644
34944--- a/drivers/scsi/scsi.c
34945+++ b/drivers/scsi/scsi.c
34946@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
34947 unsigned long timeout;
34948 int rtn = 0;
34949
34950- atomic_inc(&cmd->device->iorequest_cnt);
34951+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34952
34953 /* check if the device is still usable */
34954 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34955diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
34956index f85cfa6..a57c9e8 100644
34957--- a/drivers/scsi/scsi_lib.c
34958+++ b/drivers/scsi/scsi_lib.c
34959@@ -1416,7 +1416,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
34960 shost = sdev->host;
34961 scsi_init_cmd_errh(cmd);
34962 cmd->result = DID_NO_CONNECT << 16;
34963- atomic_inc(&cmd->device->iorequest_cnt);
34964+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34965
34966 /*
34967 * SCSI request completion path will do scsi_device_unbusy(),
34968@@ -1442,9 +1442,9 @@ static void scsi_softirq_done(struct request *rq)
34969
34970 INIT_LIST_HEAD(&cmd->eh_entry);
34971
34972- atomic_inc(&cmd->device->iodone_cnt);
34973+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
34974 if (cmd->result)
34975- atomic_inc(&cmd->device->ioerr_cnt);
34976+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34977
34978 disposition = scsi_decide_disposition(cmd);
34979 if (disposition != SUCCESS &&
34980diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
34981index 04c2a27..9d8bd66 100644
34982--- a/drivers/scsi/scsi_sysfs.c
34983+++ b/drivers/scsi/scsi_sysfs.c
34984@@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
34985 char *buf) \
34986 { \
34987 struct scsi_device *sdev = to_scsi_device(dev); \
34988- unsigned long long count = atomic_read(&sdev->field); \
34989+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
34990 return snprintf(buf, 20, "0x%llx\n", count); \
34991 } \
34992 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34993diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
34994index 84a1fdf..693b0d6 100644
34995--- a/drivers/scsi/scsi_tgt_lib.c
34996+++ b/drivers/scsi/scsi_tgt_lib.c
34997@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
34998 int err;
34999
35000 dprintk("%lx %u\n", uaddr, len);
35001- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
35002+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
35003 if (err) {
35004 /*
35005 * TODO: need to fixup sg_tablesize, max_segment_size,
35006diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
35007index 1b21491..1b7f60e 100644
35008--- a/drivers/scsi/scsi_transport_fc.c
35009+++ b/drivers/scsi/scsi_transport_fc.c
35010@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
35011 * Netlink Infrastructure
35012 */
35013
35014-static atomic_t fc_event_seq;
35015+static atomic_unchecked_t fc_event_seq;
35016
35017 /**
35018 * fc_get_event_number - Obtain the next sequential FC event number
35019@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
35020 u32
35021 fc_get_event_number(void)
35022 {
35023- return atomic_add_return(1, &fc_event_seq);
35024+ return atomic_add_return_unchecked(1, &fc_event_seq);
35025 }
35026 EXPORT_SYMBOL(fc_get_event_number);
35027
35028@@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
35029 {
35030 int error;
35031
35032- atomic_set(&fc_event_seq, 0);
35033+ atomic_set_unchecked(&fc_event_seq, 0);
35034
35035 error = transport_class_register(&fc_host_class);
35036 if (error)
35037@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
35038 char *cp;
35039
35040 *val = simple_strtoul(buf, &cp, 0);
35041- if ((*cp && (*cp != '\n')) || (*val < 0))
35042+ if (*cp && (*cp != '\n'))
35043 return -EINVAL;
35044 /*
35045 * Check for overflow; dev_loss_tmo is u32
35046diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
35047index 96029e6..4d77fa0 100644
35048--- a/drivers/scsi/scsi_transport_iscsi.c
35049+++ b/drivers/scsi/scsi_transport_iscsi.c
35050@@ -79,7 +79,7 @@ struct iscsi_internal {
35051 struct transport_container session_cont;
35052 };
35053
35054-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
35055+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
35056 static struct workqueue_struct *iscsi_eh_timer_workq;
35057
35058 static DEFINE_IDA(iscsi_sess_ida);
35059@@ -1062,7 +1062,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
35060 int err;
35061
35062 ihost = shost->shost_data;
35063- session->sid = atomic_add_return(1, &iscsi_session_nr);
35064+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
35065
35066 if (target_id == ISCSI_MAX_TARGET) {
35067 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
35068@@ -2663,7 +2663,7 @@ static __init int iscsi_transport_init(void)
35069 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
35070 ISCSI_TRANSPORT_VERSION);
35071
35072- atomic_set(&iscsi_session_nr, 0);
35073+ atomic_set_unchecked(&iscsi_session_nr, 0);
35074
35075 err = class_register(&iscsi_transport_class);
35076 if (err)
35077diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
35078index 21a045e..ec89e03 100644
35079--- a/drivers/scsi/scsi_transport_srp.c
35080+++ b/drivers/scsi/scsi_transport_srp.c
35081@@ -33,7 +33,7 @@
35082 #include "scsi_transport_srp_internal.h"
35083
35084 struct srp_host_attrs {
35085- atomic_t next_port_id;
35086+ atomic_unchecked_t next_port_id;
35087 };
35088 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
35089
35090@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
35091 struct Scsi_Host *shost = dev_to_shost(dev);
35092 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
35093
35094- atomic_set(&srp_host->next_port_id, 0);
35095+ atomic_set_unchecked(&srp_host->next_port_id, 0);
35096 return 0;
35097 }
35098
35099@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
35100 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
35101 rport->roles = ids->roles;
35102
35103- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
35104+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
35105 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
35106
35107 transport_setup_device(&rport->dev);
35108diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
35109index 441a1c5..07cece7 100644
35110--- a/drivers/scsi/sg.c
35111+++ b/drivers/scsi/sg.c
35112@@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
35113 sdp->disk->disk_name,
35114 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
35115 NULL,
35116- (char *)arg);
35117+ (char __user *)arg);
35118 case BLKTRACESTART:
35119 return blk_trace_startstop(sdp->device->request_queue, 1);
35120 case BLKTRACESTOP:
35121@@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
35122 const struct file_operations * fops;
35123 };
35124
35125-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
35126+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
35127 {"allow_dio", &adio_fops},
35128 {"debug", &debug_fops},
35129 {"def_reserved_size", &dressz_fops},
35130@@ -2327,7 +2327,7 @@ sg_proc_init(void)
35131 {
35132 int k, mask;
35133 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
35134- struct sg_proc_leaf * leaf;
35135+ const struct sg_proc_leaf * leaf;
35136
35137 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
35138 if (!sg_proc_sgp)
35139diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
35140index f64250e..1ee3049 100644
35141--- a/drivers/spi/spi-dw-pci.c
35142+++ b/drivers/spi/spi-dw-pci.c
35143@@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
35144 #define spi_resume NULL
35145 #endif
35146
35147-static const struct pci_device_id pci_ids[] __devinitdata = {
35148+static const struct pci_device_id pci_ids[] __devinitconst = {
35149 /* Intel MID platform SPI controller 0 */
35150 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
35151 {},
35152diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
35153index 77eae99..b7cdcc9 100644
35154--- a/drivers/spi/spi.c
35155+++ b/drivers/spi/spi.c
35156@@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
35157 EXPORT_SYMBOL_GPL(spi_bus_unlock);
35158
35159 /* portable code must never pass more than 32 bytes */
35160-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
35161+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
35162
35163 static u8 *buf;
35164
35165diff --git a/drivers/staging/gma500/power.c b/drivers/staging/gma500/power.c
35166index 436fe97..4082570 100644
35167--- a/drivers/staging/gma500/power.c
35168+++ b/drivers/staging/gma500/power.c
35169@@ -266,7 +266,7 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
35170 ret = gma_resume_pci(dev->pdev);
35171 if (ret == 0) {
35172 /* FIXME: we want to defer this for Medfield/Oaktrail */
35173- gma_resume_display(dev);
35174+ gma_resume_display(dev->pdev);
35175 psb_irq_preinstall(dev);
35176 psb_irq_postinstall(dev);
35177 pm_runtime_get(&dev->pdev->dev);
35178diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
35179index bafccb3..e3ac78d 100644
35180--- a/drivers/staging/hv/rndis_filter.c
35181+++ b/drivers/staging/hv/rndis_filter.c
35182@@ -42,7 +42,7 @@ struct rndis_device {
35183
35184 enum rndis_device_state state;
35185 bool link_state;
35186- atomic_t new_req_id;
35187+ atomic_unchecked_t new_req_id;
35188
35189 spinlock_t request_lock;
35190 struct list_head req_list;
35191@@ -116,7 +116,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
35192 * template
35193 */
35194 set = &rndis_msg->msg.set_req;
35195- set->req_id = atomic_inc_return(&dev->new_req_id);
35196+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35197
35198 /* Add to the request list */
35199 spin_lock_irqsave(&dev->request_lock, flags);
35200@@ -646,7 +646,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
35201
35202 /* Setup the rndis set */
35203 halt = &request->request_msg.msg.halt_req;
35204- halt->req_id = atomic_inc_return(&dev->new_req_id);
35205+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35206
35207 /* Ignore return since this msg is optional. */
35208 rndis_filter_send_request(dev, request);
35209diff --git a/drivers/staging/iio/buffer_generic.h b/drivers/staging/iio/buffer_generic.h
35210index 9e8f010..af9efb5 100644
35211--- a/drivers/staging/iio/buffer_generic.h
35212+++ b/drivers/staging/iio/buffer_generic.h
35213@@ -64,7 +64,7 @@ struct iio_buffer_access_funcs {
35214
35215 int (*is_enabled)(struct iio_buffer *buffer);
35216 int (*enable)(struct iio_buffer *buffer);
35217-};
35218+} __no_const;
35219
35220 /**
35221 * struct iio_buffer_setup_ops - buffer setup related callbacks
35222diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
35223index 8b307b4..a97ac91 100644
35224--- a/drivers/staging/octeon/ethernet-rx.c
35225+++ b/drivers/staging/octeon/ethernet-rx.c
35226@@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
35227 /* Increment RX stats for virtual ports */
35228 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
35229 #ifdef CONFIG_64BIT
35230- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
35231- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
35232+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
35233+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
35234 #else
35235- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
35236- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
35237+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
35238+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
35239 #endif
35240 }
35241 netif_receive_skb(skb);
35242@@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
35243 dev->name);
35244 */
35245 #ifdef CONFIG_64BIT
35246- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
35247+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35248 #else
35249- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
35250+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
35251 #endif
35252 dev_kfree_skb_irq(skb);
35253 }
35254diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
35255index 076f866..2308070 100644
35256--- a/drivers/staging/octeon/ethernet.c
35257+++ b/drivers/staging/octeon/ethernet.c
35258@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
35259 * since the RX tasklet also increments it.
35260 */
35261 #ifdef CONFIG_64BIT
35262- atomic64_add(rx_status.dropped_packets,
35263- (atomic64_t *)&priv->stats.rx_dropped);
35264+ atomic64_add_unchecked(rx_status.dropped_packets,
35265+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35266 #else
35267- atomic_add(rx_status.dropped_packets,
35268- (atomic_t *)&priv->stats.rx_dropped);
35269+ atomic_add_unchecked(rx_status.dropped_packets,
35270+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
35271 #endif
35272 }
35273
35274diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
35275index 7a19555..466456d 100644
35276--- a/drivers/staging/pohmelfs/inode.c
35277+++ b/drivers/staging/pohmelfs/inode.c
35278@@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
35279 mutex_init(&psb->mcache_lock);
35280 psb->mcache_root = RB_ROOT;
35281 psb->mcache_timeout = msecs_to_jiffies(5000);
35282- atomic_long_set(&psb->mcache_gen, 0);
35283+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
35284
35285 psb->trans_max_pages = 100;
35286
35287@@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
35288 INIT_LIST_HEAD(&psb->crypto_ready_list);
35289 INIT_LIST_HEAD(&psb->crypto_active_list);
35290
35291- atomic_set(&psb->trans_gen, 1);
35292+ atomic_set_unchecked(&psb->trans_gen, 1);
35293 atomic_long_set(&psb->total_inodes, 0);
35294
35295 mutex_init(&psb->state_lock);
35296diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
35297index e22665c..a2a9390 100644
35298--- a/drivers/staging/pohmelfs/mcache.c
35299+++ b/drivers/staging/pohmelfs/mcache.c
35300@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
35301 m->data = data;
35302 m->start = start;
35303 m->size = size;
35304- m->gen = atomic_long_inc_return(&psb->mcache_gen);
35305+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
35306
35307 mutex_lock(&psb->mcache_lock);
35308 err = pohmelfs_mcache_insert(psb, m);
35309diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
35310index 985b6b7..7699e05 100644
35311--- a/drivers/staging/pohmelfs/netfs.h
35312+++ b/drivers/staging/pohmelfs/netfs.h
35313@@ -571,14 +571,14 @@ struct pohmelfs_config;
35314 struct pohmelfs_sb {
35315 struct rb_root mcache_root;
35316 struct mutex mcache_lock;
35317- atomic_long_t mcache_gen;
35318+ atomic_long_unchecked_t mcache_gen;
35319 unsigned long mcache_timeout;
35320
35321 unsigned int idx;
35322
35323 unsigned int trans_retries;
35324
35325- atomic_t trans_gen;
35326+ atomic_unchecked_t trans_gen;
35327
35328 unsigned int crypto_attached_size;
35329 unsigned int crypto_align_size;
35330diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
35331index 06c1a74..866eebc 100644
35332--- a/drivers/staging/pohmelfs/trans.c
35333+++ b/drivers/staging/pohmelfs/trans.c
35334@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
35335 int err;
35336 struct netfs_cmd *cmd = t->iovec.iov_base;
35337
35338- t->gen = atomic_inc_return(&psb->trans_gen);
35339+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
35340
35341 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
35342 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
35343diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
35344index 86308a0..feaa925 100644
35345--- a/drivers/staging/rtl8712/rtl871x_io.h
35346+++ b/drivers/staging/rtl8712/rtl871x_io.h
35347@@ -108,7 +108,7 @@ struct _io_ops {
35348 u8 *pmem);
35349 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
35350 u8 *pmem);
35351-};
35352+} __no_const;
35353
35354 struct io_req {
35355 struct list_head list;
35356diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
35357index c7b5e8b..783d6cb 100644
35358--- a/drivers/staging/sbe-2t3e3/netdev.c
35359+++ b/drivers/staging/sbe-2t3e3/netdev.c
35360@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35361 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
35362
35363 if (rlen)
35364- if (copy_to_user(data, &resp, rlen))
35365+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
35366 return -EFAULT;
35367
35368 return 0;
35369diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
35370index be21617..0954e45 100644
35371--- a/drivers/staging/usbip/usbip_common.h
35372+++ b/drivers/staging/usbip/usbip_common.h
35373@@ -289,7 +289,7 @@ struct usbip_device {
35374 void (*shutdown)(struct usbip_device *);
35375 void (*reset)(struct usbip_device *);
35376 void (*unusable)(struct usbip_device *);
35377- } eh_ops;
35378+ } __no_const eh_ops;
35379 };
35380
35381 #if 0
35382diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
35383index 88b3298..3783eee 100644
35384--- a/drivers/staging/usbip/vhci.h
35385+++ b/drivers/staging/usbip/vhci.h
35386@@ -88,7 +88,7 @@ struct vhci_hcd {
35387 unsigned resuming:1;
35388 unsigned long re_timeout;
35389
35390- atomic_t seqnum;
35391+ atomic_unchecked_t seqnum;
35392
35393 /*
35394 * NOTE:
35395diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
35396index 2ee97e2..0420b86 100644
35397--- a/drivers/staging/usbip/vhci_hcd.c
35398+++ b/drivers/staging/usbip/vhci_hcd.c
35399@@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
35400 return;
35401 }
35402
35403- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
35404+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35405 if (priv->seqnum == 0xffff)
35406 dev_info(&urb->dev->dev, "seqnum max\n");
35407
35408@@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
35409 return -ENOMEM;
35410 }
35411
35412- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
35413+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35414 if (unlink->seqnum == 0xffff)
35415 pr_info("seqnum max\n");
35416
35417@@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
35418 vdev->rhport = rhport;
35419 }
35420
35421- atomic_set(&vhci->seqnum, 0);
35422+ atomic_set_unchecked(&vhci->seqnum, 0);
35423 spin_lock_init(&vhci->lock);
35424
35425 hcd->power_budget = 0; /* no limit */
35426diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
35427index 3872b8c..fe6d2f4 100644
35428--- a/drivers/staging/usbip/vhci_rx.c
35429+++ b/drivers/staging/usbip/vhci_rx.c
35430@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
35431 if (!urb) {
35432 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
35433 pr_info("max seqnum %d\n",
35434- atomic_read(&the_controller->seqnum));
35435+ atomic_read_unchecked(&the_controller->seqnum));
35436 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
35437 return;
35438 }
35439diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
35440index 7735027..30eed13 100644
35441--- a/drivers/staging/vt6655/hostap.c
35442+++ b/drivers/staging/vt6655/hostap.c
35443@@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
35444 *
35445 */
35446
35447+static net_device_ops_no_const apdev_netdev_ops;
35448+
35449 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35450 {
35451 PSDevice apdev_priv;
35452 struct net_device *dev = pDevice->dev;
35453 int ret;
35454- const struct net_device_ops apdev_netdev_ops = {
35455- .ndo_start_xmit = pDevice->tx_80211,
35456- };
35457
35458 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35459
35460@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35461 *apdev_priv = *pDevice;
35462 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35463
35464+ /* only half broken now */
35465+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35466 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35467
35468 pDevice->apdev->type = ARPHRD_IEEE80211;
35469diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
35470index 51b5adf..098e320 100644
35471--- a/drivers/staging/vt6656/hostap.c
35472+++ b/drivers/staging/vt6656/hostap.c
35473@@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
35474 *
35475 */
35476
35477+static net_device_ops_no_const apdev_netdev_ops;
35478+
35479 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35480 {
35481 PSDevice apdev_priv;
35482 struct net_device *dev = pDevice->dev;
35483 int ret;
35484- const struct net_device_ops apdev_netdev_ops = {
35485- .ndo_start_xmit = pDevice->tx_80211,
35486- };
35487
35488 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35489
35490@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35491 *apdev_priv = *pDevice;
35492 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35493
35494+ /* only half broken now */
35495+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35496 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35497
35498 pDevice->apdev->type = ARPHRD_IEEE80211;
35499diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
35500index 7843dfd..3db105f 100644
35501--- a/drivers/staging/wlan-ng/hfa384x_usb.c
35502+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
35503@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
35504
35505 struct usbctlx_completor {
35506 int (*complete) (struct usbctlx_completor *);
35507-};
35508+} __no_const;
35509
35510 static int
35511 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
35512diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
35513index 1ca66ea..76f1343 100644
35514--- a/drivers/staging/zcache/tmem.c
35515+++ b/drivers/staging/zcache/tmem.c
35516@@ -39,7 +39,7 @@
35517 * A tmem host implementation must use this function to register callbacks
35518 * for memory allocation.
35519 */
35520-static struct tmem_hostops tmem_hostops;
35521+static tmem_hostops_no_const tmem_hostops;
35522
35523 static void tmem_objnode_tree_init(void);
35524
35525@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
35526 * A tmem host implementation must use this function to register
35527 * callbacks for a page-accessible memory (PAM) implementation
35528 */
35529-static struct tmem_pamops tmem_pamops;
35530+static tmem_pamops_no_const tmem_pamops;
35531
35532 void tmem_register_pamops(struct tmem_pamops *m)
35533 {
35534diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
35535index ed147c4..94fc3c6 100644
35536--- a/drivers/staging/zcache/tmem.h
35537+++ b/drivers/staging/zcache/tmem.h
35538@@ -180,6 +180,7 @@ struct tmem_pamops {
35539 void (*new_obj)(struct tmem_obj *);
35540 int (*replace_in_obj)(void *, struct tmem_obj *);
35541 };
35542+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
35543 extern void tmem_register_pamops(struct tmem_pamops *m);
35544
35545 /* memory allocation methods provided by the host implementation */
35546@@ -189,6 +190,7 @@ struct tmem_hostops {
35547 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
35548 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
35549 };
35550+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
35551 extern void tmem_register_hostops(struct tmem_hostops *m);
35552
35553 /* core tmem accessor functions */
35554diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
35555index 0c1d5c73..88e90a8 100644
35556--- a/drivers/target/iscsi/iscsi_target.c
35557+++ b/drivers/target/iscsi/iscsi_target.c
35558@@ -1364,7 +1364,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
35559 * outstanding_r2ts reaches zero, go ahead and send the delayed
35560 * TASK_ABORTED status.
35561 */
35562- if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
35563+ if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
35564 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
35565 if (--cmd->outstanding_r2ts < 1) {
35566 iscsit_stop_dataout_timer(cmd);
35567diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
35568index 6845228..df77141 100644
35569--- a/drivers/target/target_core_tmr.c
35570+++ b/drivers/target/target_core_tmr.c
35571@@ -250,7 +250,7 @@ static void core_tmr_drain_task_list(
35572 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
35573 cmd->t_task_list_num,
35574 atomic_read(&cmd->t_task_cdbs_left),
35575- atomic_read(&cmd->t_task_cdbs_sent),
35576+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35577 atomic_read(&cmd->t_transport_active),
35578 atomic_read(&cmd->t_transport_stop),
35579 atomic_read(&cmd->t_transport_sent));
35580@@ -281,7 +281,7 @@ static void core_tmr_drain_task_list(
35581 pr_debug("LUN_RESET: got t_transport_active = 1 for"
35582 " task: %p, t_fe_count: %d dev: %p\n", task,
35583 fe_count, dev);
35584- atomic_set(&cmd->t_transport_aborted, 1);
35585+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35586 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35587
35588 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35589@@ -289,7 +289,7 @@ static void core_tmr_drain_task_list(
35590 }
35591 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
35592 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
35593- atomic_set(&cmd->t_transport_aborted, 1);
35594+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35595 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35596
35597 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35598diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
35599index e4ddb93..2fc6e0f 100644
35600--- a/drivers/target/target_core_transport.c
35601+++ b/drivers/target/target_core_transport.c
35602@@ -1343,7 +1343,7 @@ struct se_device *transport_add_device_to_core_hba(
35603
35604 dev->queue_depth = dev_limits->queue_depth;
35605 atomic_set(&dev->depth_left, dev->queue_depth);
35606- atomic_set(&dev->dev_ordered_id, 0);
35607+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
35608
35609 se_dev_set_default_attribs(dev, dev_limits);
35610
35611@@ -1530,7 +1530,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
35612 * Used to determine when ORDERED commands should go from
35613 * Dormant to Active status.
35614 */
35615- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
35616+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
35617 smp_mb__after_atomic_inc();
35618 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
35619 cmd->se_ordered_id, cmd->sam_task_attr,
35620@@ -1800,7 +1800,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
35621 " t_transport_active: %d t_transport_stop: %d"
35622 " t_transport_sent: %d\n", cmd->t_task_list_num,
35623 atomic_read(&cmd->t_task_cdbs_left),
35624- atomic_read(&cmd->t_task_cdbs_sent),
35625+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35626 atomic_read(&cmd->t_task_cdbs_ex_left),
35627 atomic_read(&cmd->t_transport_active),
35628 atomic_read(&cmd->t_transport_stop),
35629@@ -2089,9 +2089,9 @@ check_depth:
35630
35631 spin_lock_irqsave(&cmd->t_state_lock, flags);
35632 task->task_flags |= (TF_ACTIVE | TF_SENT);
35633- atomic_inc(&cmd->t_task_cdbs_sent);
35634+ atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
35635
35636- if (atomic_read(&cmd->t_task_cdbs_sent) ==
35637+ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
35638 cmd->t_task_list_num)
35639 atomic_set(&cmd->t_transport_sent, 1);
35640
35641@@ -4296,7 +4296,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
35642 atomic_set(&cmd->transport_lun_stop, 0);
35643 }
35644 if (!atomic_read(&cmd->t_transport_active) ||
35645- atomic_read(&cmd->t_transport_aborted)) {
35646+ atomic_read_unchecked(&cmd->t_transport_aborted)) {
35647 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35648 return false;
35649 }
35650@@ -4545,7 +4545,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
35651 {
35652 int ret = 0;
35653
35654- if (atomic_read(&cmd->t_transport_aborted) != 0) {
35655+ if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
35656 if (!send_status ||
35657 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
35658 return 1;
35659@@ -4582,7 +4582,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
35660 */
35661 if (cmd->data_direction == DMA_TO_DEVICE) {
35662 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
35663- atomic_inc(&cmd->t_transport_aborted);
35664+ atomic_inc_unchecked(&cmd->t_transport_aborted);
35665 smp_mb__after_atomic_inc();
35666 }
35667 }
35668diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
35669index b9040be..e3f5aab 100644
35670--- a/drivers/tty/hvc/hvcs.c
35671+++ b/drivers/tty/hvc/hvcs.c
35672@@ -83,6 +83,7 @@
35673 #include <asm/hvcserver.h>
35674 #include <asm/uaccess.h>
35675 #include <asm/vio.h>
35676+#include <asm/local.h>
35677
35678 /*
35679 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
35680@@ -270,7 +271,7 @@ struct hvcs_struct {
35681 unsigned int index;
35682
35683 struct tty_struct *tty;
35684- int open_count;
35685+ local_t open_count;
35686
35687 /*
35688 * Used to tell the driver kernel_thread what operations need to take
35689@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
35690
35691 spin_lock_irqsave(&hvcsd->lock, flags);
35692
35693- if (hvcsd->open_count > 0) {
35694+ if (local_read(&hvcsd->open_count) > 0) {
35695 spin_unlock_irqrestore(&hvcsd->lock, flags);
35696 printk(KERN_INFO "HVCS: vterm state unchanged. "
35697 "The hvcs device node is still in use.\n");
35698@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
35699 if ((retval = hvcs_partner_connect(hvcsd)))
35700 goto error_release;
35701
35702- hvcsd->open_count = 1;
35703+ local_set(&hvcsd->open_count, 1);
35704 hvcsd->tty = tty;
35705 tty->driver_data = hvcsd;
35706
35707@@ -1179,7 +1180,7 @@ fast_open:
35708
35709 spin_lock_irqsave(&hvcsd->lock, flags);
35710 kref_get(&hvcsd->kref);
35711- hvcsd->open_count++;
35712+ local_inc(&hvcsd->open_count);
35713 hvcsd->todo_mask |= HVCS_SCHED_READ;
35714 spin_unlock_irqrestore(&hvcsd->lock, flags);
35715
35716@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35717 hvcsd = tty->driver_data;
35718
35719 spin_lock_irqsave(&hvcsd->lock, flags);
35720- if (--hvcsd->open_count == 0) {
35721+ if (local_dec_and_test(&hvcsd->open_count)) {
35722
35723 vio_disable_interrupts(hvcsd->vdev);
35724
35725@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35726 free_irq(irq, hvcsd);
35727 kref_put(&hvcsd->kref, destroy_hvcs_struct);
35728 return;
35729- } else if (hvcsd->open_count < 0) {
35730+ } else if (local_read(&hvcsd->open_count) < 0) {
35731 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
35732 " is missmanaged.\n",
35733- hvcsd->vdev->unit_address, hvcsd->open_count);
35734+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
35735 }
35736
35737 spin_unlock_irqrestore(&hvcsd->lock, flags);
35738@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35739
35740 spin_lock_irqsave(&hvcsd->lock, flags);
35741 /* Preserve this so that we know how many kref refs to put */
35742- temp_open_count = hvcsd->open_count;
35743+ temp_open_count = local_read(&hvcsd->open_count);
35744
35745 /*
35746 * Don't kref put inside the spinlock because the destruction
35747@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35748 hvcsd->tty->driver_data = NULL;
35749 hvcsd->tty = NULL;
35750
35751- hvcsd->open_count = 0;
35752+ local_set(&hvcsd->open_count, 0);
35753
35754 /* This will drop any buffered data on the floor which is OK in a hangup
35755 * scenario. */
35756@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
35757 * the middle of a write operation? This is a crummy place to do this
35758 * but we want to keep it all in the spinlock.
35759 */
35760- if (hvcsd->open_count <= 0) {
35761+ if (local_read(&hvcsd->open_count) <= 0) {
35762 spin_unlock_irqrestore(&hvcsd->lock, flags);
35763 return -ENODEV;
35764 }
35765@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
35766 {
35767 struct hvcs_struct *hvcsd = tty->driver_data;
35768
35769- if (!hvcsd || hvcsd->open_count <= 0)
35770+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
35771 return 0;
35772
35773 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
35774diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
35775index ef92869..f4ebd88 100644
35776--- a/drivers/tty/ipwireless/tty.c
35777+++ b/drivers/tty/ipwireless/tty.c
35778@@ -29,6 +29,7 @@
35779 #include <linux/tty_driver.h>
35780 #include <linux/tty_flip.h>
35781 #include <linux/uaccess.h>
35782+#include <asm/local.h>
35783
35784 #include "tty.h"
35785 #include "network.h"
35786@@ -51,7 +52,7 @@ struct ipw_tty {
35787 int tty_type;
35788 struct ipw_network *network;
35789 struct tty_struct *linux_tty;
35790- int open_count;
35791+ local_t open_count;
35792 unsigned int control_lines;
35793 struct mutex ipw_tty_mutex;
35794 int tx_bytes_queued;
35795@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35796 mutex_unlock(&tty->ipw_tty_mutex);
35797 return -ENODEV;
35798 }
35799- if (tty->open_count == 0)
35800+ if (local_read(&tty->open_count) == 0)
35801 tty->tx_bytes_queued = 0;
35802
35803- tty->open_count++;
35804+ local_inc(&tty->open_count);
35805
35806 tty->linux_tty = linux_tty;
35807 linux_tty->driver_data = tty;
35808@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35809
35810 static void do_ipw_close(struct ipw_tty *tty)
35811 {
35812- tty->open_count--;
35813-
35814- if (tty->open_count == 0) {
35815+ if (local_dec_return(&tty->open_count) == 0) {
35816 struct tty_struct *linux_tty = tty->linux_tty;
35817
35818 if (linux_tty != NULL) {
35819@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
35820 return;
35821
35822 mutex_lock(&tty->ipw_tty_mutex);
35823- if (tty->open_count == 0) {
35824+ if (local_read(&tty->open_count) == 0) {
35825 mutex_unlock(&tty->ipw_tty_mutex);
35826 return;
35827 }
35828@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
35829 return;
35830 }
35831
35832- if (!tty->open_count) {
35833+ if (!local_read(&tty->open_count)) {
35834 mutex_unlock(&tty->ipw_tty_mutex);
35835 return;
35836 }
35837@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
35838 return -ENODEV;
35839
35840 mutex_lock(&tty->ipw_tty_mutex);
35841- if (!tty->open_count) {
35842+ if (!local_read(&tty->open_count)) {
35843 mutex_unlock(&tty->ipw_tty_mutex);
35844 return -EINVAL;
35845 }
35846@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
35847 if (!tty)
35848 return -ENODEV;
35849
35850- if (!tty->open_count)
35851+ if (!local_read(&tty->open_count))
35852 return -EINVAL;
35853
35854 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
35855@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
35856 if (!tty)
35857 return 0;
35858
35859- if (!tty->open_count)
35860+ if (!local_read(&tty->open_count))
35861 return 0;
35862
35863 return tty->tx_bytes_queued;
35864@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
35865 if (!tty)
35866 return -ENODEV;
35867
35868- if (!tty->open_count)
35869+ if (!local_read(&tty->open_count))
35870 return -EINVAL;
35871
35872 return get_control_lines(tty);
35873@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
35874 if (!tty)
35875 return -ENODEV;
35876
35877- if (!tty->open_count)
35878+ if (!local_read(&tty->open_count))
35879 return -EINVAL;
35880
35881 return set_control_lines(tty, set, clear);
35882@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
35883 if (!tty)
35884 return -ENODEV;
35885
35886- if (!tty->open_count)
35887+ if (!local_read(&tty->open_count))
35888 return -EINVAL;
35889
35890 /* FIXME: Exactly how is the tty object locked here .. */
35891@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
35892 against a parallel ioctl etc */
35893 mutex_lock(&ttyj->ipw_tty_mutex);
35894 }
35895- while (ttyj->open_count)
35896+ while (local_read(&ttyj->open_count))
35897 do_ipw_close(ttyj);
35898 ipwireless_disassociate_network_ttys(network,
35899 ttyj->channel_idx);
35900diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
35901index fc7bbba..9527e93 100644
35902--- a/drivers/tty/n_gsm.c
35903+++ b/drivers/tty/n_gsm.c
35904@@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
35905 kref_init(&dlci->ref);
35906 mutex_init(&dlci->mutex);
35907 dlci->fifo = &dlci->_fifo;
35908- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
35909+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
35910 kfree(dlci);
35911 return NULL;
35912 }
35913diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
35914index 39d6ab6..eb97f41 100644
35915--- a/drivers/tty/n_tty.c
35916+++ b/drivers/tty/n_tty.c
35917@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
35918 {
35919 *ops = tty_ldisc_N_TTY;
35920 ops->owner = NULL;
35921- ops->refcount = ops->flags = 0;
35922+ atomic_set(&ops->refcount, 0);
35923+ ops->flags = 0;
35924 }
35925 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
35926diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
35927index e18604b..a7d5a11 100644
35928--- a/drivers/tty/pty.c
35929+++ b/drivers/tty/pty.c
35930@@ -773,8 +773,10 @@ static void __init unix98_pty_init(void)
35931 register_sysctl_table(pty_root_table);
35932
35933 /* Now create the /dev/ptmx special device */
35934+ pax_open_kernel();
35935 tty_default_fops(&ptmx_fops);
35936- ptmx_fops.open = ptmx_open;
35937+ *(void **)&ptmx_fops.open = ptmx_open;
35938+ pax_close_kernel();
35939
35940 cdev_init(&ptmx_cdev, &ptmx_fops);
35941 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
35942diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
35943index 2b42a01..32a2ed3 100644
35944--- a/drivers/tty/serial/kgdboc.c
35945+++ b/drivers/tty/serial/kgdboc.c
35946@@ -24,8 +24,9 @@
35947 #define MAX_CONFIG_LEN 40
35948
35949 static struct kgdb_io kgdboc_io_ops;
35950+static struct kgdb_io kgdboc_io_ops_console;
35951
35952-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
35953+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
35954 static int configured = -1;
35955
35956 static char config[MAX_CONFIG_LEN];
35957@@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
35958 kgdboc_unregister_kbd();
35959 if (configured == 1)
35960 kgdb_unregister_io_module(&kgdboc_io_ops);
35961+ else if (configured == 2)
35962+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
35963 }
35964
35965 static int configure_kgdboc(void)
35966@@ -157,13 +160,13 @@ static int configure_kgdboc(void)
35967 int err;
35968 char *cptr = config;
35969 struct console *cons;
35970+ int is_console = 0;
35971
35972 err = kgdboc_option_setup(config);
35973 if (err || !strlen(config) || isspace(config[0]))
35974 goto noconfig;
35975
35976 err = -ENODEV;
35977- kgdboc_io_ops.is_console = 0;
35978 kgdb_tty_driver = NULL;
35979
35980 kgdboc_use_kms = 0;
35981@@ -184,7 +187,7 @@ static int configure_kgdboc(void)
35982 int idx;
35983 if (cons->device && cons->device(cons, &idx) == p &&
35984 idx == tty_line) {
35985- kgdboc_io_ops.is_console = 1;
35986+ is_console = 1;
35987 break;
35988 }
35989 cons = cons->next;
35990@@ -194,12 +197,16 @@ static int configure_kgdboc(void)
35991 kgdb_tty_line = tty_line;
35992
35993 do_register:
35994- err = kgdb_register_io_module(&kgdboc_io_ops);
35995+ if (is_console) {
35996+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
35997+ configured = 2;
35998+ } else {
35999+ err = kgdb_register_io_module(&kgdboc_io_ops);
36000+ configured = 1;
36001+ }
36002 if (err)
36003 goto noconfig;
36004
36005- configured = 1;
36006-
36007 return 0;
36008
36009 noconfig:
36010@@ -213,7 +220,7 @@ noconfig:
36011 static int __init init_kgdboc(void)
36012 {
36013 /* Already configured? */
36014- if (configured == 1)
36015+ if (configured >= 1)
36016 return 0;
36017
36018 return configure_kgdboc();
36019@@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
36020 if (config[len - 1] == '\n')
36021 config[len - 1] = '\0';
36022
36023- if (configured == 1)
36024+ if (configured >= 1)
36025 cleanup_kgdboc();
36026
36027 /* Go and configure with the new params. */
36028@@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
36029 .post_exception = kgdboc_post_exp_handler,
36030 };
36031
36032+static struct kgdb_io kgdboc_io_ops_console = {
36033+ .name = "kgdboc",
36034+ .read_char = kgdboc_get_char,
36035+ .write_char = kgdboc_put_char,
36036+ .pre_exception = kgdboc_pre_exp_handler,
36037+ .post_exception = kgdboc_post_exp_handler,
36038+ .is_console = 1
36039+};
36040+
36041 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
36042 /* This is only available if kgdboc is a built in for early debugging */
36043 static int __init kgdboc_early_init(char *opt)
36044diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
36045index 05085be..67eadb0 100644
36046--- a/drivers/tty/tty_io.c
36047+++ b/drivers/tty/tty_io.c
36048@@ -3240,7 +3240,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
36049
36050 void tty_default_fops(struct file_operations *fops)
36051 {
36052- *fops = tty_fops;
36053+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
36054 }
36055
36056 /*
36057diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
36058index 8e0924f..4204eb4 100644
36059--- a/drivers/tty/tty_ldisc.c
36060+++ b/drivers/tty/tty_ldisc.c
36061@@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *ld)
36062 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
36063 struct tty_ldisc_ops *ldo = ld->ops;
36064
36065- ldo->refcount--;
36066+ atomic_dec(&ldo->refcount);
36067 module_put(ldo->owner);
36068 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
36069
36070@@ -110,7 +110,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
36071 spin_lock_irqsave(&tty_ldisc_lock, flags);
36072 tty_ldiscs[disc] = new_ldisc;
36073 new_ldisc->num = disc;
36074- new_ldisc->refcount = 0;
36075+ atomic_set(&new_ldisc->refcount, 0);
36076 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
36077
36078 return ret;
36079@@ -138,7 +138,7 @@ int tty_unregister_ldisc(int disc)
36080 return -EINVAL;
36081
36082 spin_lock_irqsave(&tty_ldisc_lock, flags);
36083- if (tty_ldiscs[disc]->refcount)
36084+ if (atomic_read(&tty_ldiscs[disc]->refcount))
36085 ret = -EBUSY;
36086 else
36087 tty_ldiscs[disc] = NULL;
36088@@ -159,7 +159,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
36089 if (ldops) {
36090 ret = ERR_PTR(-EAGAIN);
36091 if (try_module_get(ldops->owner)) {
36092- ldops->refcount++;
36093+ atomic_inc(&ldops->refcount);
36094 ret = ldops;
36095 }
36096 }
36097@@ -172,7 +172,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
36098 unsigned long flags;
36099
36100 spin_lock_irqsave(&tty_ldisc_lock, flags);
36101- ldops->refcount--;
36102+ atomic_dec(&ldops->refcount);
36103 module_put(ldops->owner);
36104 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
36105 }
36106diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
36107index a605549..6bd3c96 100644
36108--- a/drivers/tty/vt/keyboard.c
36109+++ b/drivers/tty/vt/keyboard.c
36110@@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
36111 kbd->kbdmode == VC_OFF) &&
36112 value != KVAL(K_SAK))
36113 return; /* SAK is allowed even in raw mode */
36114+
36115+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
36116+ {
36117+ void *func = fn_handler[value];
36118+ if (func == fn_show_state || func == fn_show_ptregs ||
36119+ func == fn_show_mem)
36120+ return;
36121+ }
36122+#endif
36123+
36124 fn_handler[value](vc);
36125 }
36126
36127diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
36128index 65447c5..0526f0a 100644
36129--- a/drivers/tty/vt/vt_ioctl.c
36130+++ b/drivers/tty/vt/vt_ioctl.c
36131@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
36132 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
36133 return -EFAULT;
36134
36135- if (!capable(CAP_SYS_TTY_CONFIG))
36136- perm = 0;
36137-
36138 switch (cmd) {
36139 case KDGKBENT:
36140 key_map = key_maps[s];
36141@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
36142 val = (i ? K_HOLE : K_NOSUCHMAP);
36143 return put_user(val, &user_kbe->kb_value);
36144 case KDSKBENT:
36145+ if (!capable(CAP_SYS_TTY_CONFIG))
36146+ perm = 0;
36147+
36148 if (!perm)
36149 return -EPERM;
36150 if (!i && v == K_NOSUCHMAP) {
36151@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
36152 int i, j, k;
36153 int ret;
36154
36155- if (!capable(CAP_SYS_TTY_CONFIG))
36156- perm = 0;
36157-
36158 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
36159 if (!kbs) {
36160 ret = -ENOMEM;
36161@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
36162 kfree(kbs);
36163 return ((p && *p) ? -EOVERFLOW : 0);
36164 case KDSKBSENT:
36165+ if (!capable(CAP_SYS_TTY_CONFIG))
36166+ perm = 0;
36167+
36168 if (!perm) {
36169 ret = -EPERM;
36170 goto reterr;
36171diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
36172index a783d53..cb30d94 100644
36173--- a/drivers/uio/uio.c
36174+++ b/drivers/uio/uio.c
36175@@ -25,6 +25,7 @@
36176 #include <linux/kobject.h>
36177 #include <linux/cdev.h>
36178 #include <linux/uio_driver.h>
36179+#include <asm/local.h>
36180
36181 #define UIO_MAX_DEVICES (1U << MINORBITS)
36182
36183@@ -32,10 +33,10 @@ struct uio_device {
36184 struct module *owner;
36185 struct device *dev;
36186 int minor;
36187- atomic_t event;
36188+ atomic_unchecked_t event;
36189 struct fasync_struct *async_queue;
36190 wait_queue_head_t wait;
36191- int vma_count;
36192+ local_t vma_count;
36193 struct uio_info *info;
36194 struct kobject *map_dir;
36195 struct kobject *portio_dir;
36196@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
36197 struct device_attribute *attr, char *buf)
36198 {
36199 struct uio_device *idev = dev_get_drvdata(dev);
36200- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
36201+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
36202 }
36203
36204 static struct device_attribute uio_class_attributes[] = {
36205@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
36206 {
36207 struct uio_device *idev = info->uio_dev;
36208
36209- atomic_inc(&idev->event);
36210+ atomic_inc_unchecked(&idev->event);
36211 wake_up_interruptible(&idev->wait);
36212 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
36213 }
36214@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
36215 }
36216
36217 listener->dev = idev;
36218- listener->event_count = atomic_read(&idev->event);
36219+ listener->event_count = atomic_read_unchecked(&idev->event);
36220 filep->private_data = listener;
36221
36222 if (idev->info->open) {
36223@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
36224 return -EIO;
36225
36226 poll_wait(filep, &idev->wait, wait);
36227- if (listener->event_count != atomic_read(&idev->event))
36228+ if (listener->event_count != atomic_read_unchecked(&idev->event))
36229 return POLLIN | POLLRDNORM;
36230 return 0;
36231 }
36232@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
36233 do {
36234 set_current_state(TASK_INTERRUPTIBLE);
36235
36236- event_count = atomic_read(&idev->event);
36237+ event_count = atomic_read_unchecked(&idev->event);
36238 if (event_count != listener->event_count) {
36239 if (copy_to_user(buf, &event_count, count))
36240 retval = -EFAULT;
36241@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
36242 static void uio_vma_open(struct vm_area_struct *vma)
36243 {
36244 struct uio_device *idev = vma->vm_private_data;
36245- idev->vma_count++;
36246+ local_inc(&idev->vma_count);
36247 }
36248
36249 static void uio_vma_close(struct vm_area_struct *vma)
36250 {
36251 struct uio_device *idev = vma->vm_private_data;
36252- idev->vma_count--;
36253+ local_dec(&idev->vma_count);
36254 }
36255
36256 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36257@@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
36258 idev->owner = owner;
36259 idev->info = info;
36260 init_waitqueue_head(&idev->wait);
36261- atomic_set(&idev->event, 0);
36262+ atomic_set_unchecked(&idev->event, 0);
36263
36264 ret = uio_get_minor(idev);
36265 if (ret)
36266diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
36267index a845f8b..4f54072 100644
36268--- a/drivers/usb/atm/cxacru.c
36269+++ b/drivers/usb/atm/cxacru.c
36270@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
36271 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
36272 if (ret < 2)
36273 return -EINVAL;
36274- if (index < 0 || index > 0x7f)
36275+ if (index > 0x7f)
36276 return -EINVAL;
36277 pos += tmp;
36278
36279diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
36280index d3448ca..d2864ca 100644
36281--- a/drivers/usb/atm/usbatm.c
36282+++ b/drivers/usb/atm/usbatm.c
36283@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36284 if (printk_ratelimit())
36285 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
36286 __func__, vpi, vci);
36287- atomic_inc(&vcc->stats->rx_err);
36288+ atomic_inc_unchecked(&vcc->stats->rx_err);
36289 return;
36290 }
36291
36292@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36293 if (length > ATM_MAX_AAL5_PDU) {
36294 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
36295 __func__, length, vcc);
36296- atomic_inc(&vcc->stats->rx_err);
36297+ atomic_inc_unchecked(&vcc->stats->rx_err);
36298 goto out;
36299 }
36300
36301@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36302 if (sarb->len < pdu_length) {
36303 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
36304 __func__, pdu_length, sarb->len, vcc);
36305- atomic_inc(&vcc->stats->rx_err);
36306+ atomic_inc_unchecked(&vcc->stats->rx_err);
36307 goto out;
36308 }
36309
36310 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
36311 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
36312 __func__, vcc);
36313- atomic_inc(&vcc->stats->rx_err);
36314+ atomic_inc_unchecked(&vcc->stats->rx_err);
36315 goto out;
36316 }
36317
36318@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36319 if (printk_ratelimit())
36320 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
36321 __func__, length);
36322- atomic_inc(&vcc->stats->rx_drop);
36323+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36324 goto out;
36325 }
36326
36327@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36328
36329 vcc->push(vcc, skb);
36330
36331- atomic_inc(&vcc->stats->rx);
36332+ atomic_inc_unchecked(&vcc->stats->rx);
36333 out:
36334 skb_trim(sarb, 0);
36335 }
36336@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
36337 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
36338
36339 usbatm_pop(vcc, skb);
36340- atomic_inc(&vcc->stats->tx);
36341+ atomic_inc_unchecked(&vcc->stats->tx);
36342
36343 skb = skb_dequeue(&instance->sndqueue);
36344 }
36345@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
36346 if (!left--)
36347 return sprintf(page,
36348 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
36349- atomic_read(&atm_dev->stats.aal5.tx),
36350- atomic_read(&atm_dev->stats.aal5.tx_err),
36351- atomic_read(&atm_dev->stats.aal5.rx),
36352- atomic_read(&atm_dev->stats.aal5.rx_err),
36353- atomic_read(&atm_dev->stats.aal5.rx_drop));
36354+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
36355+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
36356+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
36357+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
36358+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
36359
36360 if (!left--) {
36361 if (instance->disconnected)
36362diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
36363index d956965..4179a77 100644
36364--- a/drivers/usb/core/devices.c
36365+++ b/drivers/usb/core/devices.c
36366@@ -126,7 +126,7 @@ static const char format_endpt[] =
36367 * time it gets called.
36368 */
36369 static struct device_connect_event {
36370- atomic_t count;
36371+ atomic_unchecked_t count;
36372 wait_queue_head_t wait;
36373 } device_event = {
36374 .count = ATOMIC_INIT(1),
36375@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
36376
36377 void usbfs_conn_disc_event(void)
36378 {
36379- atomic_add(2, &device_event.count);
36380+ atomic_add_unchecked(2, &device_event.count);
36381 wake_up(&device_event.wait);
36382 }
36383
36384@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
36385
36386 poll_wait(file, &device_event.wait, wait);
36387
36388- event_count = atomic_read(&device_event.count);
36389+ event_count = atomic_read_unchecked(&device_event.count);
36390 if (file->f_version != event_count) {
36391 file->f_version = event_count;
36392 return POLLIN | POLLRDNORM;
36393diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
36394index b3bdfed..a9460e0 100644
36395--- a/drivers/usb/core/message.c
36396+++ b/drivers/usb/core/message.c
36397@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
36398 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
36399 if (buf) {
36400 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
36401- if (len > 0) {
36402- smallbuf = kmalloc(++len, GFP_NOIO);
36403+ if (len++ > 0) {
36404+ smallbuf = kmalloc(len, GFP_NOIO);
36405 if (!smallbuf)
36406 return buf;
36407 memcpy(smallbuf, buf, len);
36408diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
36409index 1fc8f12..20647c1 100644
36410--- a/drivers/usb/early/ehci-dbgp.c
36411+++ b/drivers/usb/early/ehci-dbgp.c
36412@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
36413
36414 #ifdef CONFIG_KGDB
36415 static struct kgdb_io kgdbdbgp_io_ops;
36416-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
36417+static struct kgdb_io kgdbdbgp_io_ops_console;
36418+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
36419 #else
36420 #define dbgp_kgdb_mode (0)
36421 #endif
36422@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
36423 .write_char = kgdbdbgp_write_char,
36424 };
36425
36426+static struct kgdb_io kgdbdbgp_io_ops_console = {
36427+ .name = "kgdbdbgp",
36428+ .read_char = kgdbdbgp_read_char,
36429+ .write_char = kgdbdbgp_write_char,
36430+ .is_console = 1
36431+};
36432+
36433 static int kgdbdbgp_wait_time;
36434
36435 static int __init kgdbdbgp_parse_config(char *str)
36436@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
36437 ptr++;
36438 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
36439 }
36440- kgdb_register_io_module(&kgdbdbgp_io_ops);
36441- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
36442+ if (early_dbgp_console.index != -1)
36443+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
36444+ else
36445+ kgdb_register_io_module(&kgdbdbgp_io_ops);
36446
36447 return 0;
36448 }
36449diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
36450index d6bea3e..60b250e 100644
36451--- a/drivers/usb/wusbcore/wa-hc.h
36452+++ b/drivers/usb/wusbcore/wa-hc.h
36453@@ -192,7 +192,7 @@ struct wahc {
36454 struct list_head xfer_delayed_list;
36455 spinlock_t xfer_list_lock;
36456 struct work_struct xfer_work;
36457- atomic_t xfer_id_count;
36458+ atomic_unchecked_t xfer_id_count;
36459 };
36460
36461
36462@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
36463 INIT_LIST_HEAD(&wa->xfer_delayed_list);
36464 spin_lock_init(&wa->xfer_list_lock);
36465 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
36466- atomic_set(&wa->xfer_id_count, 1);
36467+ atomic_set_unchecked(&wa->xfer_id_count, 1);
36468 }
36469
36470 /**
36471diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
36472index 57c01ab..8a05959 100644
36473--- a/drivers/usb/wusbcore/wa-xfer.c
36474+++ b/drivers/usb/wusbcore/wa-xfer.c
36475@@ -296,7 +296,7 @@ out:
36476 */
36477 static void wa_xfer_id_init(struct wa_xfer *xfer)
36478 {
36479- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
36480+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
36481 }
36482
36483 /*
36484diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
36485index c14c42b..f955cc2 100644
36486--- a/drivers/vhost/vhost.c
36487+++ b/drivers/vhost/vhost.c
36488@@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
36489 return 0;
36490 }
36491
36492-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
36493+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
36494 {
36495 struct file *eventfp, *filep = NULL,
36496 *pollstart = NULL, *pollstop = NULL;
36497diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
36498index b0b2ac3..89a4399 100644
36499--- a/drivers/video/aty/aty128fb.c
36500+++ b/drivers/video/aty/aty128fb.c
36501@@ -148,7 +148,7 @@ enum {
36502 };
36503
36504 /* Must match above enum */
36505-static const char *r128_family[] __devinitdata = {
36506+static const char *r128_family[] __devinitconst = {
36507 "AGP",
36508 "PCI",
36509 "PRO AGP",
36510diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
36511index 5c3960d..15cf8fc 100644
36512--- a/drivers/video/fbcmap.c
36513+++ b/drivers/video/fbcmap.c
36514@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
36515 rc = -ENODEV;
36516 goto out;
36517 }
36518- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
36519- !info->fbops->fb_setcmap)) {
36520+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
36521 rc = -EINVAL;
36522 goto out1;
36523 }
36524diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
36525index ad93629..e020fc3 100644
36526--- a/drivers/video/fbmem.c
36527+++ b/drivers/video/fbmem.c
36528@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36529 image->dx += image->width + 8;
36530 }
36531 } else if (rotate == FB_ROTATE_UD) {
36532- for (x = 0; x < num && image->dx >= 0; x++) {
36533+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
36534 info->fbops->fb_imageblit(info, image);
36535 image->dx -= image->width + 8;
36536 }
36537@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36538 image->dy += image->height + 8;
36539 }
36540 } else if (rotate == FB_ROTATE_CCW) {
36541- for (x = 0; x < num && image->dy >= 0; x++) {
36542+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
36543 info->fbops->fb_imageblit(info, image);
36544 image->dy -= image->height + 8;
36545 }
36546@@ -1143,7 +1143,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
36547 return -EFAULT;
36548 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
36549 return -EINVAL;
36550- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
36551+ if (con2fb.framebuffer >= FB_MAX)
36552 return -EINVAL;
36553 if (!registered_fb[con2fb.framebuffer])
36554 request_module("fb%d", con2fb.framebuffer);
36555diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
36556index 5a5d092..265c5ed 100644
36557--- a/drivers/video/geode/gx1fb_core.c
36558+++ b/drivers/video/geode/gx1fb_core.c
36559@@ -29,7 +29,7 @@ static int crt_option = 1;
36560 static char panel_option[32] = "";
36561
36562 /* Modes relevant to the GX1 (taken from modedb.c) */
36563-static const struct fb_videomode __devinitdata gx1_modedb[] = {
36564+static const struct fb_videomode __devinitconst gx1_modedb[] = {
36565 /* 640x480-60 VESA */
36566 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
36567 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
36568diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
36569index 0fad23f..0e9afa4 100644
36570--- a/drivers/video/gxt4500.c
36571+++ b/drivers/video/gxt4500.c
36572@@ -156,7 +156,7 @@ struct gxt4500_par {
36573 static char *mode_option;
36574
36575 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
36576-static const struct fb_videomode defaultmode __devinitdata = {
36577+static const struct fb_videomode defaultmode __devinitconst = {
36578 .refresh = 60,
36579 .xres = 1280,
36580 .yres = 1024,
36581@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
36582 return 0;
36583 }
36584
36585-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
36586+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
36587 .id = "IBM GXT4500P",
36588 .type = FB_TYPE_PACKED_PIXELS,
36589 .visual = FB_VISUAL_PSEUDOCOLOR,
36590diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
36591index 7672d2e..b56437f 100644
36592--- a/drivers/video/i810/i810_accel.c
36593+++ b/drivers/video/i810/i810_accel.c
36594@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
36595 }
36596 }
36597 printk("ringbuffer lockup!!!\n");
36598+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
36599 i810_report_error(mmio);
36600 par->dev_flags |= LOCKUP;
36601 info->pixmap.scan_align = 1;
36602diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
36603index 318f6fb..9a389c1 100644
36604--- a/drivers/video/i810/i810_main.c
36605+++ b/drivers/video/i810/i810_main.c
36606@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
36607 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
36608
36609 /* PCI */
36610-static const char *i810_pci_list[] __devinitdata = {
36611+static const char *i810_pci_list[] __devinitconst = {
36612 "Intel(R) 810 Framebuffer Device" ,
36613 "Intel(R) 810-DC100 Framebuffer Device" ,
36614 "Intel(R) 810E Framebuffer Device" ,
36615diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
36616index de36693..3c63fc2 100644
36617--- a/drivers/video/jz4740_fb.c
36618+++ b/drivers/video/jz4740_fb.c
36619@@ -136,7 +136,7 @@ struct jzfb {
36620 uint32_t pseudo_palette[16];
36621 };
36622
36623-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
36624+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
36625 .id = "JZ4740 FB",
36626 .type = FB_TYPE_PACKED_PIXELS,
36627 .visual = FB_VISUAL_TRUECOLOR,
36628diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
36629index 3c14e43..eafa544 100644
36630--- a/drivers/video/logo/logo_linux_clut224.ppm
36631+++ b/drivers/video/logo/logo_linux_clut224.ppm
36632@@ -1,1604 +1,1123 @@
36633 P3
36634-# Standard 224-color Linux logo
36635 80 80
36636 255
36637- 0 0 0 0 0 0 0 0 0 0 0 0
36638- 0 0 0 0 0 0 0 0 0 0 0 0
36639- 0 0 0 0 0 0 0 0 0 0 0 0
36640- 0 0 0 0 0 0 0 0 0 0 0 0
36641- 0 0 0 0 0 0 0 0 0 0 0 0
36642- 0 0 0 0 0 0 0 0 0 0 0 0
36643- 0 0 0 0 0 0 0 0 0 0 0 0
36644- 0 0 0 0 0 0 0 0 0 0 0 0
36645- 0 0 0 0 0 0 0 0 0 0 0 0
36646- 6 6 6 6 6 6 10 10 10 10 10 10
36647- 10 10 10 6 6 6 6 6 6 6 6 6
36648- 0 0 0 0 0 0 0 0 0 0 0 0
36649- 0 0 0 0 0 0 0 0 0 0 0 0
36650- 0 0 0 0 0 0 0 0 0 0 0 0
36651- 0 0 0 0 0 0 0 0 0 0 0 0
36652- 0 0 0 0 0 0 0 0 0 0 0 0
36653- 0 0 0 0 0 0 0 0 0 0 0 0
36654- 0 0 0 0 0 0 0 0 0 0 0 0
36655- 0 0 0 0 0 0 0 0 0 0 0 0
36656- 0 0 0 0 0 0 0 0 0 0 0 0
36657- 0 0 0 0 0 0 0 0 0 0 0 0
36658- 0 0 0 0 0 0 0 0 0 0 0 0
36659- 0 0 0 0 0 0 0 0 0 0 0 0
36660- 0 0 0 0 0 0 0 0 0 0 0 0
36661- 0 0 0 0 0 0 0 0 0 0 0 0
36662- 0 0 0 0 0 0 0 0 0 0 0 0
36663- 0 0 0 0 0 0 0 0 0 0 0 0
36664- 0 0 0 0 0 0 0 0 0 0 0 0
36665- 0 0 0 6 6 6 10 10 10 14 14 14
36666- 22 22 22 26 26 26 30 30 30 34 34 34
36667- 30 30 30 30 30 30 26 26 26 18 18 18
36668- 14 14 14 10 10 10 6 6 6 0 0 0
36669- 0 0 0 0 0 0 0 0 0 0 0 0
36670- 0 0 0 0 0 0 0 0 0 0 0 0
36671- 0 0 0 0 0 0 0 0 0 0 0 0
36672- 0 0 0 0 0 0 0 0 0 0 0 0
36673- 0 0 0 0 0 0 0 0 0 0 0 0
36674- 0 0 0 0 0 0 0 0 0 0 0 0
36675- 0 0 0 0 0 0 0 0 0 0 0 0
36676- 0 0 0 0 0 0 0 0 0 0 0 0
36677- 0 0 0 0 0 0 0 0 0 0 0 0
36678- 0 0 0 0 0 1 0 0 1 0 0 0
36679- 0 0 0 0 0 0 0 0 0 0 0 0
36680- 0 0 0 0 0 0 0 0 0 0 0 0
36681- 0 0 0 0 0 0 0 0 0 0 0 0
36682- 0 0 0 0 0 0 0 0 0 0 0 0
36683- 0 0 0 0 0 0 0 0 0 0 0 0
36684- 0 0 0 0 0 0 0 0 0 0 0 0
36685- 6 6 6 14 14 14 26 26 26 42 42 42
36686- 54 54 54 66 66 66 78 78 78 78 78 78
36687- 78 78 78 74 74 74 66 66 66 54 54 54
36688- 42 42 42 26 26 26 18 18 18 10 10 10
36689- 6 6 6 0 0 0 0 0 0 0 0 0
36690- 0 0 0 0 0 0 0 0 0 0 0 0
36691- 0 0 0 0 0 0 0 0 0 0 0 0
36692- 0 0 0 0 0 0 0 0 0 0 0 0
36693- 0 0 0 0 0 0 0 0 0 0 0 0
36694- 0 0 0 0 0 0 0 0 0 0 0 0
36695- 0 0 0 0 0 0 0 0 0 0 0 0
36696- 0 0 0 0 0 0 0 0 0 0 0 0
36697- 0 0 0 0 0 0 0 0 0 0 0 0
36698- 0 0 1 0 0 0 0 0 0 0 0 0
36699- 0 0 0 0 0 0 0 0 0 0 0 0
36700- 0 0 0 0 0 0 0 0 0 0 0 0
36701- 0 0 0 0 0 0 0 0 0 0 0 0
36702- 0 0 0 0 0 0 0 0 0 0 0 0
36703- 0 0 0 0 0 0 0 0 0 0 0 0
36704- 0 0 0 0 0 0 0 0 0 10 10 10
36705- 22 22 22 42 42 42 66 66 66 86 86 86
36706- 66 66 66 38 38 38 38 38 38 22 22 22
36707- 26 26 26 34 34 34 54 54 54 66 66 66
36708- 86 86 86 70 70 70 46 46 46 26 26 26
36709- 14 14 14 6 6 6 0 0 0 0 0 0
36710- 0 0 0 0 0 0 0 0 0 0 0 0
36711- 0 0 0 0 0 0 0 0 0 0 0 0
36712- 0 0 0 0 0 0 0 0 0 0 0 0
36713- 0 0 0 0 0 0 0 0 0 0 0 0
36714- 0 0 0 0 0 0 0 0 0 0 0 0
36715- 0 0 0 0 0 0 0 0 0 0 0 0
36716- 0 0 0 0 0 0 0 0 0 0 0 0
36717- 0 0 0 0 0 0 0 0 0 0 0 0
36718- 0 0 1 0 0 1 0 0 1 0 0 0
36719- 0 0 0 0 0 0 0 0 0 0 0 0
36720- 0 0 0 0 0 0 0 0 0 0 0 0
36721- 0 0 0 0 0 0 0 0 0 0 0 0
36722- 0 0 0 0 0 0 0 0 0 0 0 0
36723- 0 0 0 0 0 0 0 0 0 0 0 0
36724- 0 0 0 0 0 0 10 10 10 26 26 26
36725- 50 50 50 82 82 82 58 58 58 6 6 6
36726- 2 2 6 2 2 6 2 2 6 2 2 6
36727- 2 2 6 2 2 6 2 2 6 2 2 6
36728- 6 6 6 54 54 54 86 86 86 66 66 66
36729- 38 38 38 18 18 18 6 6 6 0 0 0
36730- 0 0 0 0 0 0 0 0 0 0 0 0
36731- 0 0 0 0 0 0 0 0 0 0 0 0
36732- 0 0 0 0 0 0 0 0 0 0 0 0
36733- 0 0 0 0 0 0 0 0 0 0 0 0
36734- 0 0 0 0 0 0 0 0 0 0 0 0
36735- 0 0 0 0 0 0 0 0 0 0 0 0
36736- 0 0 0 0 0 0 0 0 0 0 0 0
36737- 0 0 0 0 0 0 0 0 0 0 0 0
36738- 0 0 0 0 0 0 0 0 0 0 0 0
36739- 0 0 0 0 0 0 0 0 0 0 0 0
36740- 0 0 0 0 0 0 0 0 0 0 0 0
36741- 0 0 0 0 0 0 0 0 0 0 0 0
36742- 0 0 0 0 0 0 0 0 0 0 0 0
36743- 0 0 0 0 0 0 0 0 0 0 0 0
36744- 0 0 0 6 6 6 22 22 22 50 50 50
36745- 78 78 78 34 34 34 2 2 6 2 2 6
36746- 2 2 6 2 2 6 2 2 6 2 2 6
36747- 2 2 6 2 2 6 2 2 6 2 2 6
36748- 2 2 6 2 2 6 6 6 6 70 70 70
36749- 78 78 78 46 46 46 22 22 22 6 6 6
36750- 0 0 0 0 0 0 0 0 0 0 0 0
36751- 0 0 0 0 0 0 0 0 0 0 0 0
36752- 0 0 0 0 0 0 0 0 0 0 0 0
36753- 0 0 0 0 0 0 0 0 0 0 0 0
36754- 0 0 0 0 0 0 0 0 0 0 0 0
36755- 0 0 0 0 0 0 0 0 0 0 0 0
36756- 0 0 0 0 0 0 0 0 0 0 0 0
36757- 0 0 0 0 0 0 0 0 0 0 0 0
36758- 0 0 1 0 0 1 0 0 1 0 0 0
36759- 0 0 0 0 0 0 0 0 0 0 0 0
36760- 0 0 0 0 0 0 0 0 0 0 0 0
36761- 0 0 0 0 0 0 0 0 0 0 0 0
36762- 0 0 0 0 0 0 0 0 0 0 0 0
36763- 0 0 0 0 0 0 0 0 0 0 0 0
36764- 6 6 6 18 18 18 42 42 42 82 82 82
36765- 26 26 26 2 2 6 2 2 6 2 2 6
36766- 2 2 6 2 2 6 2 2 6 2 2 6
36767- 2 2 6 2 2 6 2 2 6 14 14 14
36768- 46 46 46 34 34 34 6 6 6 2 2 6
36769- 42 42 42 78 78 78 42 42 42 18 18 18
36770- 6 6 6 0 0 0 0 0 0 0 0 0
36771- 0 0 0 0 0 0 0 0 0 0 0 0
36772- 0 0 0 0 0 0 0 0 0 0 0 0
36773- 0 0 0 0 0 0 0 0 0 0 0 0
36774- 0 0 0 0 0 0 0 0 0 0 0 0
36775- 0 0 0 0 0 0 0 0 0 0 0 0
36776- 0 0 0 0 0 0 0 0 0 0 0 0
36777- 0 0 0 0 0 0 0 0 0 0 0 0
36778- 0 0 1 0 0 0 0 0 1 0 0 0
36779- 0 0 0 0 0 0 0 0 0 0 0 0
36780- 0 0 0 0 0 0 0 0 0 0 0 0
36781- 0 0 0 0 0 0 0 0 0 0 0 0
36782- 0 0 0 0 0 0 0 0 0 0 0 0
36783- 0 0 0 0 0 0 0 0 0 0 0 0
36784- 10 10 10 30 30 30 66 66 66 58 58 58
36785- 2 2 6 2 2 6 2 2 6 2 2 6
36786- 2 2 6 2 2 6 2 2 6 2 2 6
36787- 2 2 6 2 2 6 2 2 6 26 26 26
36788- 86 86 86 101 101 101 46 46 46 10 10 10
36789- 2 2 6 58 58 58 70 70 70 34 34 34
36790- 10 10 10 0 0 0 0 0 0 0 0 0
36791- 0 0 0 0 0 0 0 0 0 0 0 0
36792- 0 0 0 0 0 0 0 0 0 0 0 0
36793- 0 0 0 0 0 0 0 0 0 0 0 0
36794- 0 0 0 0 0 0 0 0 0 0 0 0
36795- 0 0 0 0 0 0 0 0 0 0 0 0
36796- 0 0 0 0 0 0 0 0 0 0 0 0
36797- 0 0 0 0 0 0 0 0 0 0 0 0
36798- 0 0 1 0 0 1 0 0 1 0 0 0
36799- 0 0 0 0 0 0 0 0 0 0 0 0
36800- 0 0 0 0 0 0 0 0 0 0 0 0
36801- 0 0 0 0 0 0 0 0 0 0 0 0
36802- 0 0 0 0 0 0 0 0 0 0 0 0
36803- 0 0 0 0 0 0 0 0 0 0 0 0
36804- 14 14 14 42 42 42 86 86 86 10 10 10
36805- 2 2 6 2 2 6 2 2 6 2 2 6
36806- 2 2 6 2 2 6 2 2 6 2 2 6
36807- 2 2 6 2 2 6 2 2 6 30 30 30
36808- 94 94 94 94 94 94 58 58 58 26 26 26
36809- 2 2 6 6 6 6 78 78 78 54 54 54
36810- 22 22 22 6 6 6 0 0 0 0 0 0
36811- 0 0 0 0 0 0 0 0 0 0 0 0
36812- 0 0 0 0 0 0 0 0 0 0 0 0
36813- 0 0 0 0 0 0 0 0 0 0 0 0
36814- 0 0 0 0 0 0 0 0 0 0 0 0
36815- 0 0 0 0 0 0 0 0 0 0 0 0
36816- 0 0 0 0 0 0 0 0 0 0 0 0
36817- 0 0 0 0 0 0 0 0 0 0 0 0
36818- 0 0 0 0 0 0 0 0 0 0 0 0
36819- 0 0 0 0 0 0 0 0 0 0 0 0
36820- 0 0 0 0 0 0 0 0 0 0 0 0
36821- 0 0 0 0 0 0 0 0 0 0 0 0
36822- 0 0 0 0 0 0 0 0 0 0 0 0
36823- 0 0 0 0 0 0 0 0 0 6 6 6
36824- 22 22 22 62 62 62 62 62 62 2 2 6
36825- 2 2 6 2 2 6 2 2 6 2 2 6
36826- 2 2 6 2 2 6 2 2 6 2 2 6
36827- 2 2 6 2 2 6 2 2 6 26 26 26
36828- 54 54 54 38 38 38 18 18 18 10 10 10
36829- 2 2 6 2 2 6 34 34 34 82 82 82
36830- 38 38 38 14 14 14 0 0 0 0 0 0
36831- 0 0 0 0 0 0 0 0 0 0 0 0
36832- 0 0 0 0 0 0 0 0 0 0 0 0
36833- 0 0 0 0 0 0 0 0 0 0 0 0
36834- 0 0 0 0 0 0 0 0 0 0 0 0
36835- 0 0 0 0 0 0 0 0 0 0 0 0
36836- 0 0 0 0 0 0 0 0 0 0 0 0
36837- 0 0 0 0 0 0 0 0 0 0 0 0
36838- 0 0 0 0 0 1 0 0 1 0 0 0
36839- 0 0 0 0 0 0 0 0 0 0 0 0
36840- 0 0 0 0 0 0 0 0 0 0 0 0
36841- 0 0 0 0 0 0 0 0 0 0 0 0
36842- 0 0 0 0 0 0 0 0 0 0 0 0
36843- 0 0 0 0 0 0 0 0 0 6 6 6
36844- 30 30 30 78 78 78 30 30 30 2 2 6
36845- 2 2 6 2 2 6 2 2 6 2 2 6
36846- 2 2 6 2 2 6 2 2 6 2 2 6
36847- 2 2 6 2 2 6 2 2 6 10 10 10
36848- 10 10 10 2 2 6 2 2 6 2 2 6
36849- 2 2 6 2 2 6 2 2 6 78 78 78
36850- 50 50 50 18 18 18 6 6 6 0 0 0
36851- 0 0 0 0 0 0 0 0 0 0 0 0
36852- 0 0 0 0 0 0 0 0 0 0 0 0
36853- 0 0 0 0 0 0 0 0 0 0 0 0
36854- 0 0 0 0 0 0 0 0 0 0 0 0
36855- 0 0 0 0 0 0 0 0 0 0 0 0
36856- 0 0 0 0 0 0 0 0 0 0 0 0
36857- 0 0 0 0 0 0 0 0 0 0 0 0
36858- 0 0 1 0 0 0 0 0 0 0 0 0
36859- 0 0 0 0 0 0 0 0 0 0 0 0
36860- 0 0 0 0 0 0 0 0 0 0 0 0
36861- 0 0 0 0 0 0 0 0 0 0 0 0
36862- 0 0 0 0 0 0 0 0 0 0 0 0
36863- 0 0 0 0 0 0 0 0 0 10 10 10
36864- 38 38 38 86 86 86 14 14 14 2 2 6
36865- 2 2 6 2 2 6 2 2 6 2 2 6
36866- 2 2 6 2 2 6 2 2 6 2 2 6
36867- 2 2 6 2 2 6 2 2 6 2 2 6
36868- 2 2 6 2 2 6 2 2 6 2 2 6
36869- 2 2 6 2 2 6 2 2 6 54 54 54
36870- 66 66 66 26 26 26 6 6 6 0 0 0
36871- 0 0 0 0 0 0 0 0 0 0 0 0
36872- 0 0 0 0 0 0 0 0 0 0 0 0
36873- 0 0 0 0 0 0 0 0 0 0 0 0
36874- 0 0 0 0 0 0 0 0 0 0 0 0
36875- 0 0 0 0 0 0 0 0 0 0 0 0
36876- 0 0 0 0 0 0 0 0 0 0 0 0
36877- 0 0 0 0 0 0 0 0 0 0 0 0
36878- 0 0 0 0 0 1 0 0 1 0 0 0
36879- 0 0 0 0 0 0 0 0 0 0 0 0
36880- 0 0 0 0 0 0 0 0 0 0 0 0
36881- 0 0 0 0 0 0 0 0 0 0 0 0
36882- 0 0 0 0 0 0 0 0 0 0 0 0
36883- 0 0 0 0 0 0 0 0 0 14 14 14
36884- 42 42 42 82 82 82 2 2 6 2 2 6
36885- 2 2 6 6 6 6 10 10 10 2 2 6
36886- 2 2 6 2 2 6 2 2 6 2 2 6
36887- 2 2 6 2 2 6 2 2 6 6 6 6
36888- 14 14 14 10 10 10 2 2 6 2 2 6
36889- 2 2 6 2 2 6 2 2 6 18 18 18
36890- 82 82 82 34 34 34 10 10 10 0 0 0
36891- 0 0 0 0 0 0 0 0 0 0 0 0
36892- 0 0 0 0 0 0 0 0 0 0 0 0
36893- 0 0 0 0 0 0 0 0 0 0 0 0
36894- 0 0 0 0 0 0 0 0 0 0 0 0
36895- 0 0 0 0 0 0 0 0 0 0 0 0
36896- 0 0 0 0 0 0 0 0 0 0 0 0
36897- 0 0 0 0 0 0 0 0 0 0 0 0
36898- 0 0 1 0 0 0 0 0 0 0 0 0
36899- 0 0 0 0 0 0 0 0 0 0 0 0
36900- 0 0 0 0 0 0 0 0 0 0 0 0
36901- 0 0 0 0 0 0 0 0 0 0 0 0
36902- 0 0 0 0 0 0 0 0 0 0 0 0
36903- 0 0 0 0 0 0 0 0 0 14 14 14
36904- 46 46 46 86 86 86 2 2 6 2 2 6
36905- 6 6 6 6 6 6 22 22 22 34 34 34
36906- 6 6 6 2 2 6 2 2 6 2 2 6
36907- 2 2 6 2 2 6 18 18 18 34 34 34
36908- 10 10 10 50 50 50 22 22 22 2 2 6
36909- 2 2 6 2 2 6 2 2 6 10 10 10
36910- 86 86 86 42 42 42 14 14 14 0 0 0
36911- 0 0 0 0 0 0 0 0 0 0 0 0
36912- 0 0 0 0 0 0 0 0 0 0 0 0
36913- 0 0 0 0 0 0 0 0 0 0 0 0
36914- 0 0 0 0 0 0 0 0 0 0 0 0
36915- 0 0 0 0 0 0 0 0 0 0 0 0
36916- 0 0 0 0 0 0 0 0 0 0 0 0
36917- 0 0 0 0 0 0 0 0 0 0 0 0
36918- 0 0 1 0 0 1 0 0 1 0 0 0
36919- 0 0 0 0 0 0 0 0 0 0 0 0
36920- 0 0 0 0 0 0 0 0 0 0 0 0
36921- 0 0 0 0 0 0 0 0 0 0 0 0
36922- 0 0 0 0 0 0 0 0 0 0 0 0
36923- 0 0 0 0 0 0 0 0 0 14 14 14
36924- 46 46 46 86 86 86 2 2 6 2 2 6
36925- 38 38 38 116 116 116 94 94 94 22 22 22
36926- 22 22 22 2 2 6 2 2 6 2 2 6
36927- 14 14 14 86 86 86 138 138 138 162 162 162
36928-154 154 154 38 38 38 26 26 26 6 6 6
36929- 2 2 6 2 2 6 2 2 6 2 2 6
36930- 86 86 86 46 46 46 14 14 14 0 0 0
36931- 0 0 0 0 0 0 0 0 0 0 0 0
36932- 0 0 0 0 0 0 0 0 0 0 0 0
36933- 0 0 0 0 0 0 0 0 0 0 0 0
36934- 0 0 0 0 0 0 0 0 0 0 0 0
36935- 0 0 0 0 0 0 0 0 0 0 0 0
36936- 0 0 0 0 0 0 0 0 0 0 0 0
36937- 0 0 0 0 0 0 0 0 0 0 0 0
36938- 0 0 0 0 0 0 0 0 0 0 0 0
36939- 0 0 0 0 0 0 0 0 0 0 0 0
36940- 0 0 0 0 0 0 0 0 0 0 0 0
36941- 0 0 0 0 0 0 0 0 0 0 0 0
36942- 0 0 0 0 0 0 0 0 0 0 0 0
36943- 0 0 0 0 0 0 0 0 0 14 14 14
36944- 46 46 46 86 86 86 2 2 6 14 14 14
36945-134 134 134 198 198 198 195 195 195 116 116 116
36946- 10 10 10 2 2 6 2 2 6 6 6 6
36947-101 98 89 187 187 187 210 210 210 218 218 218
36948-214 214 214 134 134 134 14 14 14 6 6 6
36949- 2 2 6 2 2 6 2 2 6 2 2 6
36950- 86 86 86 50 50 50 18 18 18 6 6 6
36951- 0 0 0 0 0 0 0 0 0 0 0 0
36952- 0 0 0 0 0 0 0 0 0 0 0 0
36953- 0 0 0 0 0 0 0 0 0 0 0 0
36954- 0 0 0 0 0 0 0 0 0 0 0 0
36955- 0 0 0 0 0 0 0 0 0 0 0 0
36956- 0 0 0 0 0 0 0 0 0 0 0 0
36957- 0 0 0 0 0 0 0 0 1 0 0 0
36958- 0 0 1 0 0 1 0 0 1 0 0 0
36959- 0 0 0 0 0 0 0 0 0 0 0 0
36960- 0 0 0 0 0 0 0 0 0 0 0 0
36961- 0 0 0 0 0 0 0 0 0 0 0 0
36962- 0 0 0 0 0 0 0 0 0 0 0 0
36963- 0 0 0 0 0 0 0 0 0 14 14 14
36964- 46 46 46 86 86 86 2 2 6 54 54 54
36965-218 218 218 195 195 195 226 226 226 246 246 246
36966- 58 58 58 2 2 6 2 2 6 30 30 30
36967-210 210 210 253 253 253 174 174 174 123 123 123
36968-221 221 221 234 234 234 74 74 74 2 2 6
36969- 2 2 6 2 2 6 2 2 6 2 2 6
36970- 70 70 70 58 58 58 22 22 22 6 6 6
36971- 0 0 0 0 0 0 0 0 0 0 0 0
36972- 0 0 0 0 0 0 0 0 0 0 0 0
36973- 0 0 0 0 0 0 0 0 0 0 0 0
36974- 0 0 0 0 0 0 0 0 0 0 0 0
36975- 0 0 0 0 0 0 0 0 0 0 0 0
36976- 0 0 0 0 0 0 0 0 0 0 0 0
36977- 0 0 0 0 0 0 0 0 0 0 0 0
36978- 0 0 0 0 0 0 0 0 0 0 0 0
36979- 0 0 0 0 0 0 0 0 0 0 0 0
36980- 0 0 0 0 0 0 0 0 0 0 0 0
36981- 0 0 0 0 0 0 0 0 0 0 0 0
36982- 0 0 0 0 0 0 0 0 0 0 0 0
36983- 0 0 0 0 0 0 0 0 0 14 14 14
36984- 46 46 46 82 82 82 2 2 6 106 106 106
36985-170 170 170 26 26 26 86 86 86 226 226 226
36986-123 123 123 10 10 10 14 14 14 46 46 46
36987-231 231 231 190 190 190 6 6 6 70 70 70
36988- 90 90 90 238 238 238 158 158 158 2 2 6
36989- 2 2 6 2 2 6 2 2 6 2 2 6
36990- 70 70 70 58 58 58 22 22 22 6 6 6
36991- 0 0 0 0 0 0 0 0 0 0 0 0
36992- 0 0 0 0 0 0 0 0 0 0 0 0
36993- 0 0 0 0 0 0 0 0 0 0 0 0
36994- 0 0 0 0 0 0 0 0 0 0 0 0
36995- 0 0 0 0 0 0 0 0 0 0 0 0
36996- 0 0 0 0 0 0 0 0 0 0 0 0
36997- 0 0 0 0 0 0 0 0 1 0 0 0
36998- 0 0 1 0 0 1 0 0 1 0 0 0
36999- 0 0 0 0 0 0 0 0 0 0 0 0
37000- 0 0 0 0 0 0 0 0 0 0 0 0
37001- 0 0 0 0 0 0 0 0 0 0 0 0
37002- 0 0 0 0 0 0 0 0 0 0 0 0
37003- 0 0 0 0 0 0 0 0 0 14 14 14
37004- 42 42 42 86 86 86 6 6 6 116 116 116
37005-106 106 106 6 6 6 70 70 70 149 149 149
37006-128 128 128 18 18 18 38 38 38 54 54 54
37007-221 221 221 106 106 106 2 2 6 14 14 14
37008- 46 46 46 190 190 190 198 198 198 2 2 6
37009- 2 2 6 2 2 6 2 2 6 2 2 6
37010- 74 74 74 62 62 62 22 22 22 6 6 6
37011- 0 0 0 0 0 0 0 0 0 0 0 0
37012- 0 0 0 0 0 0 0 0 0 0 0 0
37013- 0 0 0 0 0 0 0 0 0 0 0 0
37014- 0 0 0 0 0 0 0 0 0 0 0 0
37015- 0 0 0 0 0 0 0 0 0 0 0 0
37016- 0 0 0 0 0 0 0 0 0 0 0 0
37017- 0 0 0 0 0 0 0 0 1 0 0 0
37018- 0 0 1 0 0 0 0 0 1 0 0 0
37019- 0 0 0 0 0 0 0 0 0 0 0 0
37020- 0 0 0 0 0 0 0 0 0 0 0 0
37021- 0 0 0 0 0 0 0 0 0 0 0 0
37022- 0 0 0 0 0 0 0 0 0 0 0 0
37023- 0 0 0 0 0 0 0 0 0 14 14 14
37024- 42 42 42 94 94 94 14 14 14 101 101 101
37025-128 128 128 2 2 6 18 18 18 116 116 116
37026-118 98 46 121 92 8 121 92 8 98 78 10
37027-162 162 162 106 106 106 2 2 6 2 2 6
37028- 2 2 6 195 195 195 195 195 195 6 6 6
37029- 2 2 6 2 2 6 2 2 6 2 2 6
37030- 74 74 74 62 62 62 22 22 22 6 6 6
37031- 0 0 0 0 0 0 0 0 0 0 0 0
37032- 0 0 0 0 0 0 0 0 0 0 0 0
37033- 0 0 0 0 0 0 0 0 0 0 0 0
37034- 0 0 0 0 0 0 0 0 0 0 0 0
37035- 0 0 0 0 0 0 0 0 0 0 0 0
37036- 0 0 0 0 0 0 0 0 0 0 0 0
37037- 0 0 0 0 0 0 0 0 1 0 0 1
37038- 0 0 1 0 0 0 0 0 1 0 0 0
37039- 0 0 0 0 0 0 0 0 0 0 0 0
37040- 0 0 0 0 0 0 0 0 0 0 0 0
37041- 0 0 0 0 0 0 0 0 0 0 0 0
37042- 0 0 0 0 0 0 0 0 0 0 0 0
37043- 0 0 0 0 0 0 0 0 0 10 10 10
37044- 38 38 38 90 90 90 14 14 14 58 58 58
37045-210 210 210 26 26 26 54 38 6 154 114 10
37046-226 170 11 236 186 11 225 175 15 184 144 12
37047-215 174 15 175 146 61 37 26 9 2 2 6
37048- 70 70 70 246 246 246 138 138 138 2 2 6
37049- 2 2 6 2 2 6 2 2 6 2 2 6
37050- 70 70 70 66 66 66 26 26 26 6 6 6
37051- 0 0 0 0 0 0 0 0 0 0 0 0
37052- 0 0 0 0 0 0 0 0 0 0 0 0
37053- 0 0 0 0 0 0 0 0 0 0 0 0
37054- 0 0 0 0 0 0 0 0 0 0 0 0
37055- 0 0 0 0 0 0 0 0 0 0 0 0
37056- 0 0 0 0 0 0 0 0 0 0 0 0
37057- 0 0 0 0 0 0 0 0 0 0 0 0
37058- 0 0 0 0 0 0 0 0 0 0 0 0
37059- 0 0 0 0 0 0 0 0 0 0 0 0
37060- 0 0 0 0 0 0 0 0 0 0 0 0
37061- 0 0 0 0 0 0 0 0 0 0 0 0
37062- 0 0 0 0 0 0 0 0 0 0 0 0
37063- 0 0 0 0 0 0 0 0 0 10 10 10
37064- 38 38 38 86 86 86 14 14 14 10 10 10
37065-195 195 195 188 164 115 192 133 9 225 175 15
37066-239 182 13 234 190 10 232 195 16 232 200 30
37067-245 207 45 241 208 19 232 195 16 184 144 12
37068-218 194 134 211 206 186 42 42 42 2 2 6
37069- 2 2 6 2 2 6 2 2 6 2 2 6
37070- 50 50 50 74 74 74 30 30 30 6 6 6
37071- 0 0 0 0 0 0 0 0 0 0 0 0
37072- 0 0 0 0 0 0 0 0 0 0 0 0
37073- 0 0 0 0 0 0 0 0 0 0 0 0
37074- 0 0 0 0 0 0 0 0 0 0 0 0
37075- 0 0 0 0 0 0 0 0 0 0 0 0
37076- 0 0 0 0 0 0 0 0 0 0 0 0
37077- 0 0 0 0 0 0 0 0 0 0 0 0
37078- 0 0 0 0 0 0 0 0 0 0 0 0
37079- 0 0 0 0 0 0 0 0 0 0 0 0
37080- 0 0 0 0 0 0 0 0 0 0 0 0
37081- 0 0 0 0 0 0 0 0 0 0 0 0
37082- 0 0 0 0 0 0 0 0 0 0 0 0
37083- 0 0 0 0 0 0 0 0 0 10 10 10
37084- 34 34 34 86 86 86 14 14 14 2 2 6
37085-121 87 25 192 133 9 219 162 10 239 182 13
37086-236 186 11 232 195 16 241 208 19 244 214 54
37087-246 218 60 246 218 38 246 215 20 241 208 19
37088-241 208 19 226 184 13 121 87 25 2 2 6
37089- 2 2 6 2 2 6 2 2 6 2 2 6
37090- 50 50 50 82 82 82 34 34 34 10 10 10
37091- 0 0 0 0 0 0 0 0 0 0 0 0
37092- 0 0 0 0 0 0 0 0 0 0 0 0
37093- 0 0 0 0 0 0 0 0 0 0 0 0
37094- 0 0 0 0 0 0 0 0 0 0 0 0
37095- 0 0 0 0 0 0 0 0 0 0 0 0
37096- 0 0 0 0 0 0 0 0 0 0 0 0
37097- 0 0 0 0 0 0 0 0 0 0 0 0
37098- 0 0 0 0 0 0 0 0 0 0 0 0
37099- 0 0 0 0 0 0 0 0 0 0 0 0
37100- 0 0 0 0 0 0 0 0 0 0 0 0
37101- 0 0 0 0 0 0 0 0 0 0 0 0
37102- 0 0 0 0 0 0 0 0 0 0 0 0
37103- 0 0 0 0 0 0 0 0 0 10 10 10
37104- 34 34 34 82 82 82 30 30 30 61 42 6
37105-180 123 7 206 145 10 230 174 11 239 182 13
37106-234 190 10 238 202 15 241 208 19 246 218 74
37107-246 218 38 246 215 20 246 215 20 246 215 20
37108-226 184 13 215 174 15 184 144 12 6 6 6
37109- 2 2 6 2 2 6 2 2 6 2 2 6
37110- 26 26 26 94 94 94 42 42 42 14 14 14
37111- 0 0 0 0 0 0 0 0 0 0 0 0
37112- 0 0 0 0 0 0 0 0 0 0 0 0
37113- 0 0 0 0 0 0 0 0 0 0 0 0
37114- 0 0 0 0 0 0 0 0 0 0 0 0
37115- 0 0 0 0 0 0 0 0 0 0 0 0
37116- 0 0 0 0 0 0 0 0 0 0 0 0
37117- 0 0 0 0 0 0 0 0 0 0 0 0
37118- 0 0 0 0 0 0 0 0 0 0 0 0
37119- 0 0 0 0 0 0 0 0 0 0 0 0
37120- 0 0 0 0 0 0 0 0 0 0 0 0
37121- 0 0 0 0 0 0 0 0 0 0 0 0
37122- 0 0 0 0 0 0 0 0 0 0 0 0
37123- 0 0 0 0 0 0 0 0 0 10 10 10
37124- 30 30 30 78 78 78 50 50 50 104 69 6
37125-192 133 9 216 158 10 236 178 12 236 186 11
37126-232 195 16 241 208 19 244 214 54 245 215 43
37127-246 215 20 246 215 20 241 208 19 198 155 10
37128-200 144 11 216 158 10 156 118 10 2 2 6
37129- 2 2 6 2 2 6 2 2 6 2 2 6
37130- 6 6 6 90 90 90 54 54 54 18 18 18
37131- 6 6 6 0 0 0 0 0 0 0 0 0
37132- 0 0 0 0 0 0 0 0 0 0 0 0
37133- 0 0 0 0 0 0 0 0 0 0 0 0
37134- 0 0 0 0 0 0 0 0 0 0 0 0
37135- 0 0 0 0 0 0 0 0 0 0 0 0
37136- 0 0 0 0 0 0 0 0 0 0 0 0
37137- 0 0 0 0 0 0 0 0 0 0 0 0
37138- 0 0 0 0 0 0 0 0 0 0 0 0
37139- 0 0 0 0 0 0 0 0 0 0 0 0
37140- 0 0 0 0 0 0 0 0 0 0 0 0
37141- 0 0 0 0 0 0 0 0 0 0 0 0
37142- 0 0 0 0 0 0 0 0 0 0 0 0
37143- 0 0 0 0 0 0 0 0 0 10 10 10
37144- 30 30 30 78 78 78 46 46 46 22 22 22
37145-137 92 6 210 162 10 239 182 13 238 190 10
37146-238 202 15 241 208 19 246 215 20 246 215 20
37147-241 208 19 203 166 17 185 133 11 210 150 10
37148-216 158 10 210 150 10 102 78 10 2 2 6
37149- 6 6 6 54 54 54 14 14 14 2 2 6
37150- 2 2 6 62 62 62 74 74 74 30 30 30
37151- 10 10 10 0 0 0 0 0 0 0 0 0
37152- 0 0 0 0 0 0 0 0 0 0 0 0
37153- 0 0 0 0 0 0 0 0 0 0 0 0
37154- 0 0 0 0 0 0 0 0 0 0 0 0
37155- 0 0 0 0 0 0 0 0 0 0 0 0
37156- 0 0 0 0 0 0 0 0 0 0 0 0
37157- 0 0 0 0 0 0 0 0 0 0 0 0
37158- 0 0 0 0 0 0 0 0 0 0 0 0
37159- 0 0 0 0 0 0 0 0 0 0 0 0
37160- 0 0 0 0 0 0 0 0 0 0 0 0
37161- 0 0 0 0 0 0 0 0 0 0 0 0
37162- 0 0 0 0 0 0 0 0 0 0 0 0
37163- 0 0 0 0 0 0 0 0 0 10 10 10
37164- 34 34 34 78 78 78 50 50 50 6 6 6
37165- 94 70 30 139 102 15 190 146 13 226 184 13
37166-232 200 30 232 195 16 215 174 15 190 146 13
37167-168 122 10 192 133 9 210 150 10 213 154 11
37168-202 150 34 182 157 106 101 98 89 2 2 6
37169- 2 2 6 78 78 78 116 116 116 58 58 58
37170- 2 2 6 22 22 22 90 90 90 46 46 46
37171- 18 18 18 6 6 6 0 0 0 0 0 0
37172- 0 0 0 0 0 0 0 0 0 0 0 0
37173- 0 0 0 0 0 0 0 0 0 0 0 0
37174- 0 0 0 0 0 0 0 0 0 0 0 0
37175- 0 0 0 0 0 0 0 0 0 0 0 0
37176- 0 0 0 0 0 0 0 0 0 0 0 0
37177- 0 0 0 0 0 0 0 0 0 0 0 0
37178- 0 0 0 0 0 0 0 0 0 0 0 0
37179- 0 0 0 0 0 0 0 0 0 0 0 0
37180- 0 0 0 0 0 0 0 0 0 0 0 0
37181- 0 0 0 0 0 0 0 0 0 0 0 0
37182- 0 0 0 0 0 0 0 0 0 0 0 0
37183- 0 0 0 0 0 0 0 0 0 10 10 10
37184- 38 38 38 86 86 86 50 50 50 6 6 6
37185-128 128 128 174 154 114 156 107 11 168 122 10
37186-198 155 10 184 144 12 197 138 11 200 144 11
37187-206 145 10 206 145 10 197 138 11 188 164 115
37188-195 195 195 198 198 198 174 174 174 14 14 14
37189- 2 2 6 22 22 22 116 116 116 116 116 116
37190- 22 22 22 2 2 6 74 74 74 70 70 70
37191- 30 30 30 10 10 10 0 0 0 0 0 0
37192- 0 0 0 0 0 0 0 0 0 0 0 0
37193- 0 0 0 0 0 0 0 0 0 0 0 0
37194- 0 0 0 0 0 0 0 0 0 0 0 0
37195- 0 0 0 0 0 0 0 0 0 0 0 0
37196- 0 0 0 0 0 0 0 0 0 0 0 0
37197- 0 0 0 0 0 0 0 0 0 0 0 0
37198- 0 0 0 0 0 0 0 0 0 0 0 0
37199- 0 0 0 0 0 0 0 0 0 0 0 0
37200- 0 0 0 0 0 0 0 0 0 0 0 0
37201- 0 0 0 0 0 0 0 0 0 0 0 0
37202- 0 0 0 0 0 0 0 0 0 0 0 0
37203- 0 0 0 0 0 0 6 6 6 18 18 18
37204- 50 50 50 101 101 101 26 26 26 10 10 10
37205-138 138 138 190 190 190 174 154 114 156 107 11
37206-197 138 11 200 144 11 197 138 11 192 133 9
37207-180 123 7 190 142 34 190 178 144 187 187 187
37208-202 202 202 221 221 221 214 214 214 66 66 66
37209- 2 2 6 2 2 6 50 50 50 62 62 62
37210- 6 6 6 2 2 6 10 10 10 90 90 90
37211- 50 50 50 18 18 18 6 6 6 0 0 0
37212- 0 0 0 0 0 0 0 0 0 0 0 0
37213- 0 0 0 0 0 0 0 0 0 0 0 0
37214- 0 0 0 0 0 0 0 0 0 0 0 0
37215- 0 0 0 0 0 0 0 0 0 0 0 0
37216- 0 0 0 0 0 0 0 0 0 0 0 0
37217- 0 0 0 0 0 0 0 0 0 0 0 0
37218- 0 0 0 0 0 0 0 0 0 0 0 0
37219- 0 0 0 0 0 0 0 0 0 0 0 0
37220- 0 0 0 0 0 0 0 0 0 0 0 0
37221- 0 0 0 0 0 0 0 0 0 0 0 0
37222- 0 0 0 0 0 0 0 0 0 0 0 0
37223- 0 0 0 0 0 0 10 10 10 34 34 34
37224- 74 74 74 74 74 74 2 2 6 6 6 6
37225-144 144 144 198 198 198 190 190 190 178 166 146
37226-154 121 60 156 107 11 156 107 11 168 124 44
37227-174 154 114 187 187 187 190 190 190 210 210 210
37228-246 246 246 253 253 253 253 253 253 182 182 182
37229- 6 6 6 2 2 6 2 2 6 2 2 6
37230- 2 2 6 2 2 6 2 2 6 62 62 62
37231- 74 74 74 34 34 34 14 14 14 0 0 0
37232- 0 0 0 0 0 0 0 0 0 0 0 0
37233- 0 0 0 0 0 0 0 0 0 0 0 0
37234- 0 0 0 0 0 0 0 0 0 0 0 0
37235- 0 0 0 0 0 0 0 0 0 0 0 0
37236- 0 0 0 0 0 0 0 0 0 0 0 0
37237- 0 0 0 0 0 0 0 0 0 0 0 0
37238- 0 0 0 0 0 0 0 0 0 0 0 0
37239- 0 0 0 0 0 0 0 0 0 0 0 0
37240- 0 0 0 0 0 0 0 0 0 0 0 0
37241- 0 0 0 0 0 0 0 0 0 0 0 0
37242- 0 0 0 0 0 0 0 0 0 0 0 0
37243- 0 0 0 10 10 10 22 22 22 54 54 54
37244- 94 94 94 18 18 18 2 2 6 46 46 46
37245-234 234 234 221 221 221 190 190 190 190 190 190
37246-190 190 190 187 187 187 187 187 187 190 190 190
37247-190 190 190 195 195 195 214 214 214 242 242 242
37248-253 253 253 253 253 253 253 253 253 253 253 253
37249- 82 82 82 2 2 6 2 2 6 2 2 6
37250- 2 2 6 2 2 6 2 2 6 14 14 14
37251- 86 86 86 54 54 54 22 22 22 6 6 6
37252- 0 0 0 0 0 0 0 0 0 0 0 0
37253- 0 0 0 0 0 0 0 0 0 0 0 0
37254- 0 0 0 0 0 0 0 0 0 0 0 0
37255- 0 0 0 0 0 0 0 0 0 0 0 0
37256- 0 0 0 0 0 0 0 0 0 0 0 0
37257- 0 0 0 0 0 0 0 0 0 0 0 0
37258- 0 0 0 0 0 0 0 0 0 0 0 0
37259- 0 0 0 0 0 0 0 0 0 0 0 0
37260- 0 0 0 0 0 0 0 0 0 0 0 0
37261- 0 0 0 0 0 0 0 0 0 0 0 0
37262- 0 0 0 0 0 0 0 0 0 0 0 0
37263- 6 6 6 18 18 18 46 46 46 90 90 90
37264- 46 46 46 18 18 18 6 6 6 182 182 182
37265-253 253 253 246 246 246 206 206 206 190 190 190
37266-190 190 190 190 190 190 190 190 190 190 190 190
37267-206 206 206 231 231 231 250 250 250 253 253 253
37268-253 253 253 253 253 253 253 253 253 253 253 253
37269-202 202 202 14 14 14 2 2 6 2 2 6
37270- 2 2 6 2 2 6 2 2 6 2 2 6
37271- 42 42 42 86 86 86 42 42 42 18 18 18
37272- 6 6 6 0 0 0 0 0 0 0 0 0
37273- 0 0 0 0 0 0 0 0 0 0 0 0
37274- 0 0 0 0 0 0 0 0 0 0 0 0
37275- 0 0 0 0 0 0 0 0 0 0 0 0
37276- 0 0 0 0 0 0 0 0 0 0 0 0
37277- 0 0 0 0 0 0 0 0 0 0 0 0
37278- 0 0 0 0 0 0 0 0 0 0 0 0
37279- 0 0 0 0 0 0 0 0 0 0 0 0
37280- 0 0 0 0 0 0 0 0 0 0 0 0
37281- 0 0 0 0 0 0 0 0 0 0 0 0
37282- 0 0 0 0 0 0 0 0 0 6 6 6
37283- 14 14 14 38 38 38 74 74 74 66 66 66
37284- 2 2 6 6 6 6 90 90 90 250 250 250
37285-253 253 253 253 253 253 238 238 238 198 198 198
37286-190 190 190 190 190 190 195 195 195 221 221 221
37287-246 246 246 253 253 253 253 253 253 253 253 253
37288-253 253 253 253 253 253 253 253 253 253 253 253
37289-253 253 253 82 82 82 2 2 6 2 2 6
37290- 2 2 6 2 2 6 2 2 6 2 2 6
37291- 2 2 6 78 78 78 70 70 70 34 34 34
37292- 14 14 14 6 6 6 0 0 0 0 0 0
37293- 0 0 0 0 0 0 0 0 0 0 0 0
37294- 0 0 0 0 0 0 0 0 0 0 0 0
37295- 0 0 0 0 0 0 0 0 0 0 0 0
37296- 0 0 0 0 0 0 0 0 0 0 0 0
37297- 0 0 0 0 0 0 0 0 0 0 0 0
37298- 0 0 0 0 0 0 0 0 0 0 0 0
37299- 0 0 0 0 0 0 0 0 0 0 0 0
37300- 0 0 0 0 0 0 0 0 0 0 0 0
37301- 0 0 0 0 0 0 0 0 0 0 0 0
37302- 0 0 0 0 0 0 0 0 0 14 14 14
37303- 34 34 34 66 66 66 78 78 78 6 6 6
37304- 2 2 6 18 18 18 218 218 218 253 253 253
37305-253 253 253 253 253 253 253 253 253 246 246 246
37306-226 226 226 231 231 231 246 246 246 253 253 253
37307-253 253 253 253 253 253 253 253 253 253 253 253
37308-253 253 253 253 253 253 253 253 253 253 253 253
37309-253 253 253 178 178 178 2 2 6 2 2 6
37310- 2 2 6 2 2 6 2 2 6 2 2 6
37311- 2 2 6 18 18 18 90 90 90 62 62 62
37312- 30 30 30 10 10 10 0 0 0 0 0 0
37313- 0 0 0 0 0 0 0 0 0 0 0 0
37314- 0 0 0 0 0 0 0 0 0 0 0 0
37315- 0 0 0 0 0 0 0 0 0 0 0 0
37316- 0 0 0 0 0 0 0 0 0 0 0 0
37317- 0 0 0 0 0 0 0 0 0 0 0 0
37318- 0 0 0 0 0 0 0 0 0 0 0 0
37319- 0 0 0 0 0 0 0 0 0 0 0 0
37320- 0 0 0 0 0 0 0 0 0 0 0 0
37321- 0 0 0 0 0 0 0 0 0 0 0 0
37322- 0 0 0 0 0 0 10 10 10 26 26 26
37323- 58 58 58 90 90 90 18 18 18 2 2 6
37324- 2 2 6 110 110 110 253 253 253 253 253 253
37325-253 253 253 253 253 253 253 253 253 253 253 253
37326-250 250 250 253 253 253 253 253 253 253 253 253
37327-253 253 253 253 253 253 253 253 253 253 253 253
37328-253 253 253 253 253 253 253 253 253 253 253 253
37329-253 253 253 231 231 231 18 18 18 2 2 6
37330- 2 2 6 2 2 6 2 2 6 2 2 6
37331- 2 2 6 2 2 6 18 18 18 94 94 94
37332- 54 54 54 26 26 26 10 10 10 0 0 0
37333- 0 0 0 0 0 0 0 0 0 0 0 0
37334- 0 0 0 0 0 0 0 0 0 0 0 0
37335- 0 0 0 0 0 0 0 0 0 0 0 0
37336- 0 0 0 0 0 0 0 0 0 0 0 0
37337- 0 0 0 0 0 0 0 0 0 0 0 0
37338- 0 0 0 0 0 0 0 0 0 0 0 0
37339- 0 0 0 0 0 0 0 0 0 0 0 0
37340- 0 0 0 0 0 0 0 0 0 0 0 0
37341- 0 0 0 0 0 0 0 0 0 0 0 0
37342- 0 0 0 6 6 6 22 22 22 50 50 50
37343- 90 90 90 26 26 26 2 2 6 2 2 6
37344- 14 14 14 195 195 195 250 250 250 253 253 253
37345-253 253 253 253 253 253 253 253 253 253 253 253
37346-253 253 253 253 253 253 253 253 253 253 253 253
37347-253 253 253 253 253 253 253 253 253 253 253 253
37348-253 253 253 253 253 253 253 253 253 253 253 253
37349-250 250 250 242 242 242 54 54 54 2 2 6
37350- 2 2 6 2 2 6 2 2 6 2 2 6
37351- 2 2 6 2 2 6 2 2 6 38 38 38
37352- 86 86 86 50 50 50 22 22 22 6 6 6
37353- 0 0 0 0 0 0 0 0 0 0 0 0
37354- 0 0 0 0 0 0 0 0 0 0 0 0
37355- 0 0 0 0 0 0 0 0 0 0 0 0
37356- 0 0 0 0 0 0 0 0 0 0 0 0
37357- 0 0 0 0 0 0 0 0 0 0 0 0
37358- 0 0 0 0 0 0 0 0 0 0 0 0
37359- 0 0 0 0 0 0 0 0 0 0 0 0
37360- 0 0 0 0 0 0 0 0 0 0 0 0
37361- 0 0 0 0 0 0 0 0 0 0 0 0
37362- 6 6 6 14 14 14 38 38 38 82 82 82
37363- 34 34 34 2 2 6 2 2 6 2 2 6
37364- 42 42 42 195 195 195 246 246 246 253 253 253
37365-253 253 253 253 253 253 253 253 253 250 250 250
37366-242 242 242 242 242 242 250 250 250 253 253 253
37367-253 253 253 253 253 253 253 253 253 253 253 253
37368-253 253 253 250 250 250 246 246 246 238 238 238
37369-226 226 226 231 231 231 101 101 101 6 6 6
37370- 2 2 6 2 2 6 2 2 6 2 2 6
37371- 2 2 6 2 2 6 2 2 6 2 2 6
37372- 38 38 38 82 82 82 42 42 42 14 14 14
37373- 6 6 6 0 0 0 0 0 0 0 0 0
37374- 0 0 0 0 0 0 0 0 0 0 0 0
37375- 0 0 0 0 0 0 0 0 0 0 0 0
37376- 0 0 0 0 0 0 0 0 0 0 0 0
37377- 0 0 0 0 0 0 0 0 0 0 0 0
37378- 0 0 0 0 0 0 0 0 0 0 0 0
37379- 0 0 0 0 0 0 0 0 0 0 0 0
37380- 0 0 0 0 0 0 0 0 0 0 0 0
37381- 0 0 0 0 0 0 0 0 0 0 0 0
37382- 10 10 10 26 26 26 62 62 62 66 66 66
37383- 2 2 6 2 2 6 2 2 6 6 6 6
37384- 70 70 70 170 170 170 206 206 206 234 234 234
37385-246 246 246 250 250 250 250 250 250 238 238 238
37386-226 226 226 231 231 231 238 238 238 250 250 250
37387-250 250 250 250 250 250 246 246 246 231 231 231
37388-214 214 214 206 206 206 202 202 202 202 202 202
37389-198 198 198 202 202 202 182 182 182 18 18 18
37390- 2 2 6 2 2 6 2 2 6 2 2 6
37391- 2 2 6 2 2 6 2 2 6 2 2 6
37392- 2 2 6 62 62 62 66 66 66 30 30 30
37393- 10 10 10 0 0 0 0 0 0 0 0 0
37394- 0 0 0 0 0 0 0 0 0 0 0 0
37395- 0 0 0 0 0 0 0 0 0 0 0 0
37396- 0 0 0 0 0 0 0 0 0 0 0 0
37397- 0 0 0 0 0 0 0 0 0 0 0 0
37398- 0 0 0 0 0 0 0 0 0 0 0 0
37399- 0 0 0 0 0 0 0 0 0 0 0 0
37400- 0 0 0 0 0 0 0 0 0 0 0 0
37401- 0 0 0 0 0 0 0 0 0 0 0 0
37402- 14 14 14 42 42 42 82 82 82 18 18 18
37403- 2 2 6 2 2 6 2 2 6 10 10 10
37404- 94 94 94 182 182 182 218 218 218 242 242 242
37405-250 250 250 253 253 253 253 253 253 250 250 250
37406-234 234 234 253 253 253 253 253 253 253 253 253
37407-253 253 253 253 253 253 253 253 253 246 246 246
37408-238 238 238 226 226 226 210 210 210 202 202 202
37409-195 195 195 195 195 195 210 210 210 158 158 158
37410- 6 6 6 14 14 14 50 50 50 14 14 14
37411- 2 2 6 2 2 6 2 2 6 2 2 6
37412- 2 2 6 6 6 6 86 86 86 46 46 46
37413- 18 18 18 6 6 6 0 0 0 0 0 0
37414- 0 0 0 0 0 0 0 0 0 0 0 0
37415- 0 0 0 0 0 0 0 0 0 0 0 0
37416- 0 0 0 0 0 0 0 0 0 0 0 0
37417- 0 0 0 0 0 0 0 0 0 0 0 0
37418- 0 0 0 0 0 0 0 0 0 0 0 0
37419- 0 0 0 0 0 0 0 0 0 0 0 0
37420- 0 0 0 0 0 0 0 0 0 0 0 0
37421- 0 0 0 0 0 0 0 0 0 6 6 6
37422- 22 22 22 54 54 54 70 70 70 2 2 6
37423- 2 2 6 10 10 10 2 2 6 22 22 22
37424-166 166 166 231 231 231 250 250 250 253 253 253
37425-253 253 253 253 253 253 253 253 253 250 250 250
37426-242 242 242 253 253 253 253 253 253 253 253 253
37427-253 253 253 253 253 253 253 253 253 253 253 253
37428-253 253 253 253 253 253 253 253 253 246 246 246
37429-231 231 231 206 206 206 198 198 198 226 226 226
37430- 94 94 94 2 2 6 6 6 6 38 38 38
37431- 30 30 30 2 2 6 2 2 6 2 2 6
37432- 2 2 6 2 2 6 62 62 62 66 66 66
37433- 26 26 26 10 10 10 0 0 0 0 0 0
37434- 0 0 0 0 0 0 0 0 0 0 0 0
37435- 0 0 0 0 0 0 0 0 0 0 0 0
37436- 0 0 0 0 0 0 0 0 0 0 0 0
37437- 0 0 0 0 0 0 0 0 0 0 0 0
37438- 0 0 0 0 0 0 0 0 0 0 0 0
37439- 0 0 0 0 0 0 0 0 0 0 0 0
37440- 0 0 0 0 0 0 0 0 0 0 0 0
37441- 0 0 0 0 0 0 0 0 0 10 10 10
37442- 30 30 30 74 74 74 50 50 50 2 2 6
37443- 26 26 26 26 26 26 2 2 6 106 106 106
37444-238 238 238 253 253 253 253 253 253 253 253 253
37445-253 253 253 253 253 253 253 253 253 253 253 253
37446-253 253 253 253 253 253 253 253 253 253 253 253
37447-253 253 253 253 253 253 253 253 253 253 253 253
37448-253 253 253 253 253 253 253 253 253 253 253 253
37449-253 253 253 246 246 246 218 218 218 202 202 202
37450-210 210 210 14 14 14 2 2 6 2 2 6
37451- 30 30 30 22 22 22 2 2 6 2 2 6
37452- 2 2 6 2 2 6 18 18 18 86 86 86
37453- 42 42 42 14 14 14 0 0 0 0 0 0
37454- 0 0 0 0 0 0 0 0 0 0 0 0
37455- 0 0 0 0 0 0 0 0 0 0 0 0
37456- 0 0 0 0 0 0 0 0 0 0 0 0
37457- 0 0 0 0 0 0 0 0 0 0 0 0
37458- 0 0 0 0 0 0 0 0 0 0 0 0
37459- 0 0 0 0 0 0 0 0 0 0 0 0
37460- 0 0 0 0 0 0 0 0 0 0 0 0
37461- 0 0 0 0 0 0 0 0 0 14 14 14
37462- 42 42 42 90 90 90 22 22 22 2 2 6
37463- 42 42 42 2 2 6 18 18 18 218 218 218
37464-253 253 253 253 253 253 253 253 253 253 253 253
37465-253 253 253 253 253 253 253 253 253 253 253 253
37466-253 253 253 253 253 253 253 253 253 253 253 253
37467-253 253 253 253 253 253 253 253 253 253 253 253
37468-253 253 253 253 253 253 253 253 253 253 253 253
37469-253 253 253 253 253 253 250 250 250 221 221 221
37470-218 218 218 101 101 101 2 2 6 14 14 14
37471- 18 18 18 38 38 38 10 10 10 2 2 6
37472- 2 2 6 2 2 6 2 2 6 78 78 78
37473- 58 58 58 22 22 22 6 6 6 0 0 0
37474- 0 0 0 0 0 0 0 0 0 0 0 0
37475- 0 0 0 0 0 0 0 0 0 0 0 0
37476- 0 0 0 0 0 0 0 0 0 0 0 0
37477- 0 0 0 0 0 0 0 0 0 0 0 0
37478- 0 0 0 0 0 0 0 0 0 0 0 0
37479- 0 0 0 0 0 0 0 0 0 0 0 0
37480- 0 0 0 0 0 0 0 0 0 0 0 0
37481- 0 0 0 0 0 0 6 6 6 18 18 18
37482- 54 54 54 82 82 82 2 2 6 26 26 26
37483- 22 22 22 2 2 6 123 123 123 253 253 253
37484-253 253 253 253 253 253 253 253 253 253 253 253
37485-253 253 253 253 253 253 253 253 253 253 253 253
37486-253 253 253 253 253 253 253 253 253 253 253 253
37487-253 253 253 253 253 253 253 253 253 253 253 253
37488-253 253 253 253 253 253 253 253 253 253 253 253
37489-253 253 253 253 253 253 253 253 253 250 250 250
37490-238 238 238 198 198 198 6 6 6 38 38 38
37491- 58 58 58 26 26 26 38 38 38 2 2 6
37492- 2 2 6 2 2 6 2 2 6 46 46 46
37493- 78 78 78 30 30 30 10 10 10 0 0 0
37494- 0 0 0 0 0 0 0 0 0 0 0 0
37495- 0 0 0 0 0 0 0 0 0 0 0 0
37496- 0 0 0 0 0 0 0 0 0 0 0 0
37497- 0 0 0 0 0 0 0 0 0 0 0 0
37498- 0 0 0 0 0 0 0 0 0 0 0 0
37499- 0 0 0 0 0 0 0 0 0 0 0 0
37500- 0 0 0 0 0 0 0 0 0 0 0 0
37501- 0 0 0 0 0 0 10 10 10 30 30 30
37502- 74 74 74 58 58 58 2 2 6 42 42 42
37503- 2 2 6 22 22 22 231 231 231 253 253 253
37504-253 253 253 253 253 253 253 253 253 253 253 253
37505-253 253 253 253 253 253 253 253 253 250 250 250
37506-253 253 253 253 253 253 253 253 253 253 253 253
37507-253 253 253 253 253 253 253 253 253 253 253 253
37508-253 253 253 253 253 253 253 253 253 253 253 253
37509-253 253 253 253 253 253 253 253 253 253 253 253
37510-253 253 253 246 246 246 46 46 46 38 38 38
37511- 42 42 42 14 14 14 38 38 38 14 14 14
37512- 2 2 6 2 2 6 2 2 6 6 6 6
37513- 86 86 86 46 46 46 14 14 14 0 0 0
37514- 0 0 0 0 0 0 0 0 0 0 0 0
37515- 0 0 0 0 0 0 0 0 0 0 0 0
37516- 0 0 0 0 0 0 0 0 0 0 0 0
37517- 0 0 0 0 0 0 0 0 0 0 0 0
37518- 0 0 0 0 0 0 0 0 0 0 0 0
37519- 0 0 0 0 0 0 0 0 0 0 0 0
37520- 0 0 0 0 0 0 0 0 0 0 0 0
37521- 0 0 0 6 6 6 14 14 14 42 42 42
37522- 90 90 90 18 18 18 18 18 18 26 26 26
37523- 2 2 6 116 116 116 253 253 253 253 253 253
37524-253 253 253 253 253 253 253 253 253 253 253 253
37525-253 253 253 253 253 253 250 250 250 238 238 238
37526-253 253 253 253 253 253 253 253 253 253 253 253
37527-253 253 253 253 253 253 253 253 253 253 253 253
37528-253 253 253 253 253 253 253 253 253 253 253 253
37529-253 253 253 253 253 253 253 253 253 253 253 253
37530-253 253 253 253 253 253 94 94 94 6 6 6
37531- 2 2 6 2 2 6 10 10 10 34 34 34
37532- 2 2 6 2 2 6 2 2 6 2 2 6
37533- 74 74 74 58 58 58 22 22 22 6 6 6
37534- 0 0 0 0 0 0 0 0 0 0 0 0
37535- 0 0 0 0 0 0 0 0 0 0 0 0
37536- 0 0 0 0 0 0 0 0 0 0 0 0
37537- 0 0 0 0 0 0 0 0 0 0 0 0
37538- 0 0 0 0 0 0 0 0 0 0 0 0
37539- 0 0 0 0 0 0 0 0 0 0 0 0
37540- 0 0 0 0 0 0 0 0 0 0 0 0
37541- 0 0 0 10 10 10 26 26 26 66 66 66
37542- 82 82 82 2 2 6 38 38 38 6 6 6
37543- 14 14 14 210 210 210 253 253 253 253 253 253
37544-253 253 253 253 253 253 253 253 253 253 253 253
37545-253 253 253 253 253 253 246 246 246 242 242 242
37546-253 253 253 253 253 253 253 253 253 253 253 253
37547-253 253 253 253 253 253 253 253 253 253 253 253
37548-253 253 253 253 253 253 253 253 253 253 253 253
37549-253 253 253 253 253 253 253 253 253 253 253 253
37550-253 253 253 253 253 253 144 144 144 2 2 6
37551- 2 2 6 2 2 6 2 2 6 46 46 46
37552- 2 2 6 2 2 6 2 2 6 2 2 6
37553- 42 42 42 74 74 74 30 30 30 10 10 10
37554- 0 0 0 0 0 0 0 0 0 0 0 0
37555- 0 0 0 0 0 0 0 0 0 0 0 0
37556- 0 0 0 0 0 0 0 0 0 0 0 0
37557- 0 0 0 0 0 0 0 0 0 0 0 0
37558- 0 0 0 0 0 0 0 0 0 0 0 0
37559- 0 0 0 0 0 0 0 0 0 0 0 0
37560- 0 0 0 0 0 0 0 0 0 0 0 0
37561- 6 6 6 14 14 14 42 42 42 90 90 90
37562- 26 26 26 6 6 6 42 42 42 2 2 6
37563- 74 74 74 250 250 250 253 253 253 253 253 253
37564-253 253 253 253 253 253 253 253 253 253 253 253
37565-253 253 253 253 253 253 242 242 242 242 242 242
37566-253 253 253 253 253 253 253 253 253 253 253 253
37567-253 253 253 253 253 253 253 253 253 253 253 253
37568-253 253 253 253 253 253 253 253 253 253 253 253
37569-253 253 253 253 253 253 253 253 253 253 253 253
37570-253 253 253 253 253 253 182 182 182 2 2 6
37571- 2 2 6 2 2 6 2 2 6 46 46 46
37572- 2 2 6 2 2 6 2 2 6 2 2 6
37573- 10 10 10 86 86 86 38 38 38 10 10 10
37574- 0 0 0 0 0 0 0 0 0 0 0 0
37575- 0 0 0 0 0 0 0 0 0 0 0 0
37576- 0 0 0 0 0 0 0 0 0 0 0 0
37577- 0 0 0 0 0 0 0 0 0 0 0 0
37578- 0 0 0 0 0 0 0 0 0 0 0 0
37579- 0 0 0 0 0 0 0 0 0 0 0 0
37580- 0 0 0 0 0 0 0 0 0 0 0 0
37581- 10 10 10 26 26 26 66 66 66 82 82 82
37582- 2 2 6 22 22 22 18 18 18 2 2 6
37583-149 149 149 253 253 253 253 253 253 253 253 253
37584-253 253 253 253 253 253 253 253 253 253 253 253
37585-253 253 253 253 253 253 234 234 234 242 242 242
37586-253 253 253 253 253 253 253 253 253 253 253 253
37587-253 253 253 253 253 253 253 253 253 253 253 253
37588-253 253 253 253 253 253 253 253 253 253 253 253
37589-253 253 253 253 253 253 253 253 253 253 253 253
37590-253 253 253 253 253 253 206 206 206 2 2 6
37591- 2 2 6 2 2 6 2 2 6 38 38 38
37592- 2 2 6 2 2 6 2 2 6 2 2 6
37593- 6 6 6 86 86 86 46 46 46 14 14 14
37594- 0 0 0 0 0 0 0 0 0 0 0 0
37595- 0 0 0 0 0 0 0 0 0 0 0 0
37596- 0 0 0 0 0 0 0 0 0 0 0 0
37597- 0 0 0 0 0 0 0 0 0 0 0 0
37598- 0 0 0 0 0 0 0 0 0 0 0 0
37599- 0 0 0 0 0 0 0 0 0 0 0 0
37600- 0 0 0 0 0 0 0 0 0 6 6 6
37601- 18 18 18 46 46 46 86 86 86 18 18 18
37602- 2 2 6 34 34 34 10 10 10 6 6 6
37603-210 210 210 253 253 253 253 253 253 253 253 253
37604-253 253 253 253 253 253 253 253 253 253 253 253
37605-253 253 253 253 253 253 234 234 234 242 242 242
37606-253 253 253 253 253 253 253 253 253 253 253 253
37607-253 253 253 253 253 253 253 253 253 253 253 253
37608-253 253 253 253 253 253 253 253 253 253 253 253
37609-253 253 253 253 253 253 253 253 253 253 253 253
37610-253 253 253 253 253 253 221 221 221 6 6 6
37611- 2 2 6 2 2 6 6 6 6 30 30 30
37612- 2 2 6 2 2 6 2 2 6 2 2 6
37613- 2 2 6 82 82 82 54 54 54 18 18 18
37614- 6 6 6 0 0 0 0 0 0 0 0 0
37615- 0 0 0 0 0 0 0 0 0 0 0 0
37616- 0 0 0 0 0 0 0 0 0 0 0 0
37617- 0 0 0 0 0 0 0 0 0 0 0 0
37618- 0 0 0 0 0 0 0 0 0 0 0 0
37619- 0 0 0 0 0 0 0 0 0 0 0 0
37620- 0 0 0 0 0 0 0 0 0 10 10 10
37621- 26 26 26 66 66 66 62 62 62 2 2 6
37622- 2 2 6 38 38 38 10 10 10 26 26 26
37623-238 238 238 253 253 253 253 253 253 253 253 253
37624-253 253 253 253 253 253 253 253 253 253 253 253
37625-253 253 253 253 253 253 231 231 231 238 238 238
37626-253 253 253 253 253 253 253 253 253 253 253 253
37627-253 253 253 253 253 253 253 253 253 253 253 253
37628-253 253 253 253 253 253 253 253 253 253 253 253
37629-253 253 253 253 253 253 253 253 253 253 253 253
37630-253 253 253 253 253 253 231 231 231 6 6 6
37631- 2 2 6 2 2 6 10 10 10 30 30 30
37632- 2 2 6 2 2 6 2 2 6 2 2 6
37633- 2 2 6 66 66 66 58 58 58 22 22 22
37634- 6 6 6 0 0 0 0 0 0 0 0 0
37635- 0 0 0 0 0 0 0 0 0 0 0 0
37636- 0 0 0 0 0 0 0 0 0 0 0 0
37637- 0 0 0 0 0 0 0 0 0 0 0 0
37638- 0 0 0 0 0 0 0 0 0 0 0 0
37639- 0 0 0 0 0 0 0 0 0 0 0 0
37640- 0 0 0 0 0 0 0 0 0 10 10 10
37641- 38 38 38 78 78 78 6 6 6 2 2 6
37642- 2 2 6 46 46 46 14 14 14 42 42 42
37643-246 246 246 253 253 253 253 253 253 253 253 253
37644-253 253 253 253 253 253 253 253 253 253 253 253
37645-253 253 253 253 253 253 231 231 231 242 242 242
37646-253 253 253 253 253 253 253 253 253 253 253 253
37647-253 253 253 253 253 253 253 253 253 253 253 253
37648-253 253 253 253 253 253 253 253 253 253 253 253
37649-253 253 253 253 253 253 253 253 253 253 253 253
37650-253 253 253 253 253 253 234 234 234 10 10 10
37651- 2 2 6 2 2 6 22 22 22 14 14 14
37652- 2 2 6 2 2 6 2 2 6 2 2 6
37653- 2 2 6 66 66 66 62 62 62 22 22 22
37654- 6 6 6 0 0 0 0 0 0 0 0 0
37655- 0 0 0 0 0 0 0 0 0 0 0 0
37656- 0 0 0 0 0 0 0 0 0 0 0 0
37657- 0 0 0 0 0 0 0 0 0 0 0 0
37658- 0 0 0 0 0 0 0 0 0 0 0 0
37659- 0 0 0 0 0 0 0 0 0 0 0 0
37660- 0 0 0 0 0 0 6 6 6 18 18 18
37661- 50 50 50 74 74 74 2 2 6 2 2 6
37662- 14 14 14 70 70 70 34 34 34 62 62 62
37663-250 250 250 253 253 253 253 253 253 253 253 253
37664-253 253 253 253 253 253 253 253 253 253 253 253
37665-253 253 253 253 253 253 231 231 231 246 246 246
37666-253 253 253 253 253 253 253 253 253 253 253 253
37667-253 253 253 253 253 253 253 253 253 253 253 253
37668-253 253 253 253 253 253 253 253 253 253 253 253
37669-253 253 253 253 253 253 253 253 253 253 253 253
37670-253 253 253 253 253 253 234 234 234 14 14 14
37671- 2 2 6 2 2 6 30 30 30 2 2 6
37672- 2 2 6 2 2 6 2 2 6 2 2 6
37673- 2 2 6 66 66 66 62 62 62 22 22 22
37674- 6 6 6 0 0 0 0 0 0 0 0 0
37675- 0 0 0 0 0 0 0 0 0 0 0 0
37676- 0 0 0 0 0 0 0 0 0 0 0 0
37677- 0 0 0 0 0 0 0 0 0 0 0 0
37678- 0 0 0 0 0 0 0 0 0 0 0 0
37679- 0 0 0 0 0 0 0 0 0 0 0 0
37680- 0 0 0 0 0 0 6 6 6 18 18 18
37681- 54 54 54 62 62 62 2 2 6 2 2 6
37682- 2 2 6 30 30 30 46 46 46 70 70 70
37683-250 250 250 253 253 253 253 253 253 253 253 253
37684-253 253 253 253 253 253 253 253 253 253 253 253
37685-253 253 253 253 253 253 231 231 231 246 246 246
37686-253 253 253 253 253 253 253 253 253 253 253 253
37687-253 253 253 253 253 253 253 253 253 253 253 253
37688-253 253 253 253 253 253 253 253 253 253 253 253
37689-253 253 253 253 253 253 253 253 253 253 253 253
37690-253 253 253 253 253 253 226 226 226 10 10 10
37691- 2 2 6 6 6 6 30 30 30 2 2 6
37692- 2 2 6 2 2 6 2 2 6 2 2 6
37693- 2 2 6 66 66 66 58 58 58 22 22 22
37694- 6 6 6 0 0 0 0 0 0 0 0 0
37695- 0 0 0 0 0 0 0 0 0 0 0 0
37696- 0 0 0 0 0 0 0 0 0 0 0 0
37697- 0 0 0 0 0 0 0 0 0 0 0 0
37698- 0 0 0 0 0 0 0 0 0 0 0 0
37699- 0 0 0 0 0 0 0 0 0 0 0 0
37700- 0 0 0 0 0 0 6 6 6 22 22 22
37701- 58 58 58 62 62 62 2 2 6 2 2 6
37702- 2 2 6 2 2 6 30 30 30 78 78 78
37703-250 250 250 253 253 253 253 253 253 253 253 253
37704-253 253 253 253 253 253 253 253 253 253 253 253
37705-253 253 253 253 253 253 231 231 231 246 246 246
37706-253 253 253 253 253 253 253 253 253 253 253 253
37707-253 253 253 253 253 253 253 253 253 253 253 253
37708-253 253 253 253 253 253 253 253 253 253 253 253
37709-253 253 253 253 253 253 253 253 253 253 253 253
37710-253 253 253 253 253 253 206 206 206 2 2 6
37711- 22 22 22 34 34 34 18 14 6 22 22 22
37712- 26 26 26 18 18 18 6 6 6 2 2 6
37713- 2 2 6 82 82 82 54 54 54 18 18 18
37714- 6 6 6 0 0 0 0 0 0 0 0 0
37715- 0 0 0 0 0 0 0 0 0 0 0 0
37716- 0 0 0 0 0 0 0 0 0 0 0 0
37717- 0 0 0 0 0 0 0 0 0 0 0 0
37718- 0 0 0 0 0 0 0 0 0 0 0 0
37719- 0 0 0 0 0 0 0 0 0 0 0 0
37720- 0 0 0 0 0 0 6 6 6 26 26 26
37721- 62 62 62 106 106 106 74 54 14 185 133 11
37722-210 162 10 121 92 8 6 6 6 62 62 62
37723-238 238 238 253 253 253 253 253 253 253 253 253
37724-253 253 253 253 253 253 253 253 253 253 253 253
37725-253 253 253 253 253 253 231 231 231 246 246 246
37726-253 253 253 253 253 253 253 253 253 253 253 253
37727-253 253 253 253 253 253 253 253 253 253 253 253
37728-253 253 253 253 253 253 253 253 253 253 253 253
37729-253 253 253 253 253 253 253 253 253 253 253 253
37730-253 253 253 253 253 253 158 158 158 18 18 18
37731- 14 14 14 2 2 6 2 2 6 2 2 6
37732- 6 6 6 18 18 18 66 66 66 38 38 38
37733- 6 6 6 94 94 94 50 50 50 18 18 18
37734- 6 6 6 0 0 0 0 0 0 0 0 0
37735- 0 0 0 0 0 0 0 0 0 0 0 0
37736- 0 0 0 0 0 0 0 0 0 0 0 0
37737- 0 0 0 0 0 0 0 0 0 0 0 0
37738- 0 0 0 0 0 0 0 0 0 0 0 0
37739- 0 0 0 0 0 0 0 0 0 6 6 6
37740- 10 10 10 10 10 10 18 18 18 38 38 38
37741- 78 78 78 142 134 106 216 158 10 242 186 14
37742-246 190 14 246 190 14 156 118 10 10 10 10
37743- 90 90 90 238 238 238 253 253 253 253 253 253
37744-253 253 253 253 253 253 253 253 253 253 253 253
37745-253 253 253 253 253 253 231 231 231 250 250 250
37746-253 253 253 253 253 253 253 253 253 253 253 253
37747-253 253 253 253 253 253 253 253 253 253 253 253
37748-253 253 253 253 253 253 253 253 253 253 253 253
37749-253 253 253 253 253 253 253 253 253 246 230 190
37750-238 204 91 238 204 91 181 142 44 37 26 9
37751- 2 2 6 2 2 6 2 2 6 2 2 6
37752- 2 2 6 2 2 6 38 38 38 46 46 46
37753- 26 26 26 106 106 106 54 54 54 18 18 18
37754- 6 6 6 0 0 0 0 0 0 0 0 0
37755- 0 0 0 0 0 0 0 0 0 0 0 0
37756- 0 0 0 0 0 0 0 0 0 0 0 0
37757- 0 0 0 0 0 0 0 0 0 0 0 0
37758- 0 0 0 0 0 0 0 0 0 0 0 0
37759- 0 0 0 6 6 6 14 14 14 22 22 22
37760- 30 30 30 38 38 38 50 50 50 70 70 70
37761-106 106 106 190 142 34 226 170 11 242 186 14
37762-246 190 14 246 190 14 246 190 14 154 114 10
37763- 6 6 6 74 74 74 226 226 226 253 253 253
37764-253 253 253 253 253 253 253 253 253 253 253 253
37765-253 253 253 253 253 253 231 231 231 250 250 250
37766-253 253 253 253 253 253 253 253 253 253 253 253
37767-253 253 253 253 253 253 253 253 253 253 253 253
37768-253 253 253 253 253 253 253 253 253 253 253 253
37769-253 253 253 253 253 253 253 253 253 228 184 62
37770-241 196 14 241 208 19 232 195 16 38 30 10
37771- 2 2 6 2 2 6 2 2 6 2 2 6
37772- 2 2 6 6 6 6 30 30 30 26 26 26
37773-203 166 17 154 142 90 66 66 66 26 26 26
37774- 6 6 6 0 0 0 0 0 0 0 0 0
37775- 0 0 0 0 0 0 0 0 0 0 0 0
37776- 0 0 0 0 0 0 0 0 0 0 0 0
37777- 0 0 0 0 0 0 0 0 0 0 0 0
37778- 0 0 0 0 0 0 0 0 0 0 0 0
37779- 6 6 6 18 18 18 38 38 38 58 58 58
37780- 78 78 78 86 86 86 101 101 101 123 123 123
37781-175 146 61 210 150 10 234 174 13 246 186 14
37782-246 190 14 246 190 14 246 190 14 238 190 10
37783-102 78 10 2 2 6 46 46 46 198 198 198
37784-253 253 253 253 253 253 253 253 253 253 253 253
37785-253 253 253 253 253 253 234 234 234 242 242 242
37786-253 253 253 253 253 253 253 253 253 253 253 253
37787-253 253 253 253 253 253 253 253 253 253 253 253
37788-253 253 253 253 253 253 253 253 253 253 253 253
37789-253 253 253 253 253 253 253 253 253 224 178 62
37790-242 186 14 241 196 14 210 166 10 22 18 6
37791- 2 2 6 2 2 6 2 2 6 2 2 6
37792- 2 2 6 2 2 6 6 6 6 121 92 8
37793-238 202 15 232 195 16 82 82 82 34 34 34
37794- 10 10 10 0 0 0 0 0 0 0 0 0
37795- 0 0 0 0 0 0 0 0 0 0 0 0
37796- 0 0 0 0 0 0 0 0 0 0 0 0
37797- 0 0 0 0 0 0 0 0 0 0 0 0
37798- 0 0 0 0 0 0 0 0 0 0 0 0
37799- 14 14 14 38 38 38 70 70 70 154 122 46
37800-190 142 34 200 144 11 197 138 11 197 138 11
37801-213 154 11 226 170 11 242 186 14 246 190 14
37802-246 190 14 246 190 14 246 190 14 246 190 14
37803-225 175 15 46 32 6 2 2 6 22 22 22
37804-158 158 158 250 250 250 253 253 253 253 253 253
37805-253 253 253 253 253 253 253 253 253 253 253 253
37806-253 253 253 253 253 253 253 253 253 253 253 253
37807-253 253 253 253 253 253 253 253 253 253 253 253
37808-253 253 253 253 253 253 253 253 253 253 253 253
37809-253 253 253 250 250 250 242 242 242 224 178 62
37810-239 182 13 236 186 11 213 154 11 46 32 6
37811- 2 2 6 2 2 6 2 2 6 2 2 6
37812- 2 2 6 2 2 6 61 42 6 225 175 15
37813-238 190 10 236 186 11 112 100 78 42 42 42
37814- 14 14 14 0 0 0 0 0 0 0 0 0
37815- 0 0 0 0 0 0 0 0 0 0 0 0
37816- 0 0 0 0 0 0 0 0 0 0 0 0
37817- 0 0 0 0 0 0 0 0 0 0 0 0
37818- 0 0 0 0 0 0 0 0 0 6 6 6
37819- 22 22 22 54 54 54 154 122 46 213 154 11
37820-226 170 11 230 174 11 226 170 11 226 170 11
37821-236 178 12 242 186 14 246 190 14 246 190 14
37822-246 190 14 246 190 14 246 190 14 246 190 14
37823-241 196 14 184 144 12 10 10 10 2 2 6
37824- 6 6 6 116 116 116 242 242 242 253 253 253
37825-253 253 253 253 253 253 253 253 253 253 253 253
37826-253 253 253 253 253 253 253 253 253 253 253 253
37827-253 253 253 253 253 253 253 253 253 253 253 253
37828-253 253 253 253 253 253 253 253 253 253 253 253
37829-253 253 253 231 231 231 198 198 198 214 170 54
37830-236 178 12 236 178 12 210 150 10 137 92 6
37831- 18 14 6 2 2 6 2 2 6 2 2 6
37832- 6 6 6 70 47 6 200 144 11 236 178 12
37833-239 182 13 239 182 13 124 112 88 58 58 58
37834- 22 22 22 6 6 6 0 0 0 0 0 0
37835- 0 0 0 0 0 0 0 0 0 0 0 0
37836- 0 0 0 0 0 0 0 0 0 0 0 0
37837- 0 0 0 0 0 0 0 0 0 0 0 0
37838- 0 0 0 0 0 0 0 0 0 10 10 10
37839- 30 30 30 70 70 70 180 133 36 226 170 11
37840-239 182 13 242 186 14 242 186 14 246 186 14
37841-246 190 14 246 190 14 246 190 14 246 190 14
37842-246 190 14 246 190 14 246 190 14 246 190 14
37843-246 190 14 232 195 16 98 70 6 2 2 6
37844- 2 2 6 2 2 6 66 66 66 221 221 221
37845-253 253 253 253 253 253 253 253 253 253 253 253
37846-253 253 253 253 253 253 253 253 253 253 253 253
37847-253 253 253 253 253 253 253 253 253 253 253 253
37848-253 253 253 253 253 253 253 253 253 253 253 253
37849-253 253 253 206 206 206 198 198 198 214 166 58
37850-230 174 11 230 174 11 216 158 10 192 133 9
37851-163 110 8 116 81 8 102 78 10 116 81 8
37852-167 114 7 197 138 11 226 170 11 239 182 13
37853-242 186 14 242 186 14 162 146 94 78 78 78
37854- 34 34 34 14 14 14 6 6 6 0 0 0
37855- 0 0 0 0 0 0 0 0 0 0 0 0
37856- 0 0 0 0 0 0 0 0 0 0 0 0
37857- 0 0 0 0 0 0 0 0 0 0 0 0
37858- 0 0 0 0 0 0 0 0 0 6 6 6
37859- 30 30 30 78 78 78 190 142 34 226 170 11
37860-239 182 13 246 190 14 246 190 14 246 190 14
37861-246 190 14 246 190 14 246 190 14 246 190 14
37862-246 190 14 246 190 14 246 190 14 246 190 14
37863-246 190 14 241 196 14 203 166 17 22 18 6
37864- 2 2 6 2 2 6 2 2 6 38 38 38
37865-218 218 218 253 253 253 253 253 253 253 253 253
37866-253 253 253 253 253 253 253 253 253 253 253 253
37867-253 253 253 253 253 253 253 253 253 253 253 253
37868-253 253 253 253 253 253 253 253 253 253 253 253
37869-250 250 250 206 206 206 198 198 198 202 162 69
37870-226 170 11 236 178 12 224 166 10 210 150 10
37871-200 144 11 197 138 11 192 133 9 197 138 11
37872-210 150 10 226 170 11 242 186 14 246 190 14
37873-246 190 14 246 186 14 225 175 15 124 112 88
37874- 62 62 62 30 30 30 14 14 14 6 6 6
37875- 0 0 0 0 0 0 0 0 0 0 0 0
37876- 0 0 0 0 0 0 0 0 0 0 0 0
37877- 0 0 0 0 0 0 0 0 0 0 0 0
37878- 0 0 0 0 0 0 0 0 0 10 10 10
37879- 30 30 30 78 78 78 174 135 50 224 166 10
37880-239 182 13 246 190 14 246 190 14 246 190 14
37881-246 190 14 246 190 14 246 190 14 246 190 14
37882-246 190 14 246 190 14 246 190 14 246 190 14
37883-246 190 14 246 190 14 241 196 14 139 102 15
37884- 2 2 6 2 2 6 2 2 6 2 2 6
37885- 78 78 78 250 250 250 253 253 253 253 253 253
37886-253 253 253 253 253 253 253 253 253 253 253 253
37887-253 253 253 253 253 253 253 253 253 253 253 253
37888-253 253 253 253 253 253 253 253 253 253 253 253
37889-250 250 250 214 214 214 198 198 198 190 150 46
37890-219 162 10 236 178 12 234 174 13 224 166 10
37891-216 158 10 213 154 11 213 154 11 216 158 10
37892-226 170 11 239 182 13 246 190 14 246 190 14
37893-246 190 14 246 190 14 242 186 14 206 162 42
37894-101 101 101 58 58 58 30 30 30 14 14 14
37895- 6 6 6 0 0 0 0 0 0 0 0 0
37896- 0 0 0 0 0 0 0 0 0 0 0 0
37897- 0 0 0 0 0 0 0 0 0 0 0 0
37898- 0 0 0 0 0 0 0 0 0 10 10 10
37899- 30 30 30 74 74 74 174 135 50 216 158 10
37900-236 178 12 246 190 14 246 190 14 246 190 14
37901-246 190 14 246 190 14 246 190 14 246 190 14
37902-246 190 14 246 190 14 246 190 14 246 190 14
37903-246 190 14 246 190 14 241 196 14 226 184 13
37904- 61 42 6 2 2 6 2 2 6 2 2 6
37905- 22 22 22 238 238 238 253 253 253 253 253 253
37906-253 253 253 253 253 253 253 253 253 253 253 253
37907-253 253 253 253 253 253 253 253 253 253 253 253
37908-253 253 253 253 253 253 253 253 253 253 253 253
37909-253 253 253 226 226 226 187 187 187 180 133 36
37910-216 158 10 236 178 12 239 182 13 236 178 12
37911-230 174 11 226 170 11 226 170 11 230 174 11
37912-236 178 12 242 186 14 246 190 14 246 190 14
37913-246 190 14 246 190 14 246 186 14 239 182 13
37914-206 162 42 106 106 106 66 66 66 34 34 34
37915- 14 14 14 6 6 6 0 0 0 0 0 0
37916- 0 0 0 0 0 0 0 0 0 0 0 0
37917- 0 0 0 0 0 0 0 0 0 0 0 0
37918- 0 0 0 0 0 0 0 0 0 6 6 6
37919- 26 26 26 70 70 70 163 133 67 213 154 11
37920-236 178 12 246 190 14 246 190 14 246 190 14
37921-246 190 14 246 190 14 246 190 14 246 190 14
37922-246 190 14 246 190 14 246 190 14 246 190 14
37923-246 190 14 246 190 14 246 190 14 241 196 14
37924-190 146 13 18 14 6 2 2 6 2 2 6
37925- 46 46 46 246 246 246 253 253 253 253 253 253
37926-253 253 253 253 253 253 253 253 253 253 253 253
37927-253 253 253 253 253 253 253 253 253 253 253 253
37928-253 253 253 253 253 253 253 253 253 253 253 253
37929-253 253 253 221 221 221 86 86 86 156 107 11
37930-216 158 10 236 178 12 242 186 14 246 186 14
37931-242 186 14 239 182 13 239 182 13 242 186 14
37932-242 186 14 246 186 14 246 190 14 246 190 14
37933-246 190 14 246 190 14 246 190 14 246 190 14
37934-242 186 14 225 175 15 142 122 72 66 66 66
37935- 30 30 30 10 10 10 0 0 0 0 0 0
37936- 0 0 0 0 0 0 0 0 0 0 0 0
37937- 0 0 0 0 0 0 0 0 0 0 0 0
37938- 0 0 0 0 0 0 0 0 0 6 6 6
37939- 26 26 26 70 70 70 163 133 67 210 150 10
37940-236 178 12 246 190 14 246 190 14 246 190 14
37941-246 190 14 246 190 14 246 190 14 246 190 14
37942-246 190 14 246 190 14 246 190 14 246 190 14
37943-246 190 14 246 190 14 246 190 14 246 190 14
37944-232 195 16 121 92 8 34 34 34 106 106 106
37945-221 221 221 253 253 253 253 253 253 253 253 253
37946-253 253 253 253 253 253 253 253 253 253 253 253
37947-253 253 253 253 253 253 253 253 253 253 253 253
37948-253 253 253 253 253 253 253 253 253 253 253 253
37949-242 242 242 82 82 82 18 14 6 163 110 8
37950-216 158 10 236 178 12 242 186 14 246 190 14
37951-246 190 14 246 190 14 246 190 14 246 190 14
37952-246 190 14 246 190 14 246 190 14 246 190 14
37953-246 190 14 246 190 14 246 190 14 246 190 14
37954-246 190 14 246 190 14 242 186 14 163 133 67
37955- 46 46 46 18 18 18 6 6 6 0 0 0
37956- 0 0 0 0 0 0 0 0 0 0 0 0
37957- 0 0 0 0 0 0 0 0 0 0 0 0
37958- 0 0 0 0 0 0 0 0 0 10 10 10
37959- 30 30 30 78 78 78 163 133 67 210 150 10
37960-236 178 12 246 186 14 246 190 14 246 190 14
37961-246 190 14 246 190 14 246 190 14 246 190 14
37962-246 190 14 246 190 14 246 190 14 246 190 14
37963-246 190 14 246 190 14 246 190 14 246 190 14
37964-241 196 14 215 174 15 190 178 144 253 253 253
37965-253 253 253 253 253 253 253 253 253 253 253 253
37966-253 253 253 253 253 253 253 253 253 253 253 253
37967-253 253 253 253 253 253 253 253 253 253 253 253
37968-253 253 253 253 253 253 253 253 253 218 218 218
37969- 58 58 58 2 2 6 22 18 6 167 114 7
37970-216 158 10 236 178 12 246 186 14 246 190 14
37971-246 190 14 246 190 14 246 190 14 246 190 14
37972-246 190 14 246 190 14 246 190 14 246 190 14
37973-246 190 14 246 190 14 246 190 14 246 190 14
37974-246 190 14 246 186 14 242 186 14 190 150 46
37975- 54 54 54 22 22 22 6 6 6 0 0 0
37976- 0 0 0 0 0 0 0 0 0 0 0 0
37977- 0 0 0 0 0 0 0 0 0 0 0 0
37978- 0 0 0 0 0 0 0 0 0 14 14 14
37979- 38 38 38 86 86 86 180 133 36 213 154 11
37980-236 178 12 246 186 14 246 190 14 246 190 14
37981-246 190 14 246 190 14 246 190 14 246 190 14
37982-246 190 14 246 190 14 246 190 14 246 190 14
37983-246 190 14 246 190 14 246 190 14 246 190 14
37984-246 190 14 232 195 16 190 146 13 214 214 214
37985-253 253 253 253 253 253 253 253 253 253 253 253
37986-253 253 253 253 253 253 253 253 253 253 253 253
37987-253 253 253 253 253 253 253 253 253 253 253 253
37988-253 253 253 250 250 250 170 170 170 26 26 26
37989- 2 2 6 2 2 6 37 26 9 163 110 8
37990-219 162 10 239 182 13 246 186 14 246 190 14
37991-246 190 14 246 190 14 246 190 14 246 190 14
37992-246 190 14 246 190 14 246 190 14 246 190 14
37993-246 190 14 246 190 14 246 190 14 246 190 14
37994-246 186 14 236 178 12 224 166 10 142 122 72
37995- 46 46 46 18 18 18 6 6 6 0 0 0
37996- 0 0 0 0 0 0 0 0 0 0 0 0
37997- 0 0 0 0 0 0 0 0 0 0 0 0
37998- 0 0 0 0 0 0 6 6 6 18 18 18
37999- 50 50 50 109 106 95 192 133 9 224 166 10
38000-242 186 14 246 190 14 246 190 14 246 190 14
38001-246 190 14 246 190 14 246 190 14 246 190 14
38002-246 190 14 246 190 14 246 190 14 246 190 14
38003-246 190 14 246 190 14 246 190 14 246 190 14
38004-242 186 14 226 184 13 210 162 10 142 110 46
38005-226 226 226 253 253 253 253 253 253 253 253 253
38006-253 253 253 253 253 253 253 253 253 253 253 253
38007-253 253 253 253 253 253 253 253 253 253 253 253
38008-198 198 198 66 66 66 2 2 6 2 2 6
38009- 2 2 6 2 2 6 50 34 6 156 107 11
38010-219 162 10 239 182 13 246 186 14 246 190 14
38011-246 190 14 246 190 14 246 190 14 246 190 14
38012-246 190 14 246 190 14 246 190 14 246 190 14
38013-246 190 14 246 190 14 246 190 14 242 186 14
38014-234 174 13 213 154 11 154 122 46 66 66 66
38015- 30 30 30 10 10 10 0 0 0 0 0 0
38016- 0 0 0 0 0 0 0 0 0 0 0 0
38017- 0 0 0 0 0 0 0 0 0 0 0 0
38018- 0 0 0 0 0 0 6 6 6 22 22 22
38019- 58 58 58 154 121 60 206 145 10 234 174 13
38020-242 186 14 246 186 14 246 190 14 246 190 14
38021-246 190 14 246 190 14 246 190 14 246 190 14
38022-246 190 14 246 190 14 246 190 14 246 190 14
38023-246 190 14 246 190 14 246 190 14 246 190 14
38024-246 186 14 236 178 12 210 162 10 163 110 8
38025- 61 42 6 138 138 138 218 218 218 250 250 250
38026-253 253 253 253 253 253 253 253 253 250 250 250
38027-242 242 242 210 210 210 144 144 144 66 66 66
38028- 6 6 6 2 2 6 2 2 6 2 2 6
38029- 2 2 6 2 2 6 61 42 6 163 110 8
38030-216 158 10 236 178 12 246 190 14 246 190 14
38031-246 190 14 246 190 14 246 190 14 246 190 14
38032-246 190 14 246 190 14 246 190 14 246 190 14
38033-246 190 14 239 182 13 230 174 11 216 158 10
38034-190 142 34 124 112 88 70 70 70 38 38 38
38035- 18 18 18 6 6 6 0 0 0 0 0 0
38036- 0 0 0 0 0 0 0 0 0 0 0 0
38037- 0 0 0 0 0 0 0 0 0 0 0 0
38038- 0 0 0 0 0 0 6 6 6 22 22 22
38039- 62 62 62 168 124 44 206 145 10 224 166 10
38040-236 178 12 239 182 13 242 186 14 242 186 14
38041-246 186 14 246 190 14 246 190 14 246 190 14
38042-246 190 14 246 190 14 246 190 14 246 190 14
38043-246 190 14 246 190 14 246 190 14 246 190 14
38044-246 190 14 236 178 12 216 158 10 175 118 6
38045- 80 54 7 2 2 6 6 6 6 30 30 30
38046- 54 54 54 62 62 62 50 50 50 38 38 38
38047- 14 14 14 2 2 6 2 2 6 2 2 6
38048- 2 2 6 2 2 6 2 2 6 2 2 6
38049- 2 2 6 6 6 6 80 54 7 167 114 7
38050-213 154 11 236 178 12 246 190 14 246 190 14
38051-246 190 14 246 190 14 246 190 14 246 190 14
38052-246 190 14 242 186 14 239 182 13 239 182 13
38053-230 174 11 210 150 10 174 135 50 124 112 88
38054- 82 82 82 54 54 54 34 34 34 18 18 18
38055- 6 6 6 0 0 0 0 0 0 0 0 0
38056- 0 0 0 0 0 0 0 0 0 0 0 0
38057- 0 0 0 0 0 0 0 0 0 0 0 0
38058- 0 0 0 0 0 0 6 6 6 18 18 18
38059- 50 50 50 158 118 36 192 133 9 200 144 11
38060-216 158 10 219 162 10 224 166 10 226 170 11
38061-230 174 11 236 178 12 239 182 13 239 182 13
38062-242 186 14 246 186 14 246 190 14 246 190 14
38063-246 190 14 246 190 14 246 190 14 246 190 14
38064-246 186 14 230 174 11 210 150 10 163 110 8
38065-104 69 6 10 10 10 2 2 6 2 2 6
38066- 2 2 6 2 2 6 2 2 6 2 2 6
38067- 2 2 6 2 2 6 2 2 6 2 2 6
38068- 2 2 6 2 2 6 2 2 6 2 2 6
38069- 2 2 6 6 6 6 91 60 6 167 114 7
38070-206 145 10 230 174 11 242 186 14 246 190 14
38071-246 190 14 246 190 14 246 186 14 242 186 14
38072-239 182 13 230 174 11 224 166 10 213 154 11
38073-180 133 36 124 112 88 86 86 86 58 58 58
38074- 38 38 38 22 22 22 10 10 10 6 6 6
38075- 0 0 0 0 0 0 0 0 0 0 0 0
38076- 0 0 0 0 0 0 0 0 0 0 0 0
38077- 0 0 0 0 0 0 0 0 0 0 0 0
38078- 0 0 0 0 0 0 0 0 0 14 14 14
38079- 34 34 34 70 70 70 138 110 50 158 118 36
38080-167 114 7 180 123 7 192 133 9 197 138 11
38081-200 144 11 206 145 10 213 154 11 219 162 10
38082-224 166 10 230 174 11 239 182 13 242 186 14
38083-246 186 14 246 186 14 246 186 14 246 186 14
38084-239 182 13 216 158 10 185 133 11 152 99 6
38085-104 69 6 18 14 6 2 2 6 2 2 6
38086- 2 2 6 2 2 6 2 2 6 2 2 6
38087- 2 2 6 2 2 6 2 2 6 2 2 6
38088- 2 2 6 2 2 6 2 2 6 2 2 6
38089- 2 2 6 6 6 6 80 54 7 152 99 6
38090-192 133 9 219 162 10 236 178 12 239 182 13
38091-246 186 14 242 186 14 239 182 13 236 178 12
38092-224 166 10 206 145 10 192 133 9 154 121 60
38093- 94 94 94 62 62 62 42 42 42 22 22 22
38094- 14 14 14 6 6 6 0 0 0 0 0 0
38095- 0 0 0 0 0 0 0 0 0 0 0 0
38096- 0 0 0 0 0 0 0 0 0 0 0 0
38097- 0 0 0 0 0 0 0 0 0 0 0 0
38098- 0 0 0 0 0 0 0 0 0 6 6 6
38099- 18 18 18 34 34 34 58 58 58 78 78 78
38100-101 98 89 124 112 88 142 110 46 156 107 11
38101-163 110 8 167 114 7 175 118 6 180 123 7
38102-185 133 11 197 138 11 210 150 10 219 162 10
38103-226 170 11 236 178 12 236 178 12 234 174 13
38104-219 162 10 197 138 11 163 110 8 130 83 6
38105- 91 60 6 10 10 10 2 2 6 2 2 6
38106- 18 18 18 38 38 38 38 38 38 38 38 38
38107- 38 38 38 38 38 38 38 38 38 38 38 38
38108- 38 38 38 38 38 38 26 26 26 2 2 6
38109- 2 2 6 6 6 6 70 47 6 137 92 6
38110-175 118 6 200 144 11 219 162 10 230 174 11
38111-234 174 13 230 174 11 219 162 10 210 150 10
38112-192 133 9 163 110 8 124 112 88 82 82 82
38113- 50 50 50 30 30 30 14 14 14 6 6 6
38114- 0 0 0 0 0 0 0 0 0 0 0 0
38115- 0 0 0 0 0 0 0 0 0 0 0 0
38116- 0 0 0 0 0 0 0 0 0 0 0 0
38117- 0 0 0 0 0 0 0 0 0 0 0 0
38118- 0 0 0 0 0 0 0 0 0 0 0 0
38119- 6 6 6 14 14 14 22 22 22 34 34 34
38120- 42 42 42 58 58 58 74 74 74 86 86 86
38121-101 98 89 122 102 70 130 98 46 121 87 25
38122-137 92 6 152 99 6 163 110 8 180 123 7
38123-185 133 11 197 138 11 206 145 10 200 144 11
38124-180 123 7 156 107 11 130 83 6 104 69 6
38125- 50 34 6 54 54 54 110 110 110 101 98 89
38126- 86 86 86 82 82 82 78 78 78 78 78 78
38127- 78 78 78 78 78 78 78 78 78 78 78 78
38128- 78 78 78 82 82 82 86 86 86 94 94 94
38129-106 106 106 101 101 101 86 66 34 124 80 6
38130-156 107 11 180 123 7 192 133 9 200 144 11
38131-206 145 10 200 144 11 192 133 9 175 118 6
38132-139 102 15 109 106 95 70 70 70 42 42 42
38133- 22 22 22 10 10 10 0 0 0 0 0 0
38134- 0 0 0 0 0 0 0 0 0 0 0 0
38135- 0 0 0 0 0 0 0 0 0 0 0 0
38136- 0 0 0 0 0 0 0 0 0 0 0 0
38137- 0 0 0 0 0 0 0 0 0 0 0 0
38138- 0 0 0 0 0 0 0 0 0 0 0 0
38139- 0 0 0 0 0 0 6 6 6 10 10 10
38140- 14 14 14 22 22 22 30 30 30 38 38 38
38141- 50 50 50 62 62 62 74 74 74 90 90 90
38142-101 98 89 112 100 78 121 87 25 124 80 6
38143-137 92 6 152 99 6 152 99 6 152 99 6
38144-138 86 6 124 80 6 98 70 6 86 66 30
38145-101 98 89 82 82 82 58 58 58 46 46 46
38146- 38 38 38 34 34 34 34 34 34 34 34 34
38147- 34 34 34 34 34 34 34 34 34 34 34 34
38148- 34 34 34 34 34 34 38 38 38 42 42 42
38149- 54 54 54 82 82 82 94 86 76 91 60 6
38150-134 86 6 156 107 11 167 114 7 175 118 6
38151-175 118 6 167 114 7 152 99 6 121 87 25
38152-101 98 89 62 62 62 34 34 34 18 18 18
38153- 6 6 6 0 0 0 0 0 0 0 0 0
38154- 0 0 0 0 0 0 0 0 0 0 0 0
38155- 0 0 0 0 0 0 0 0 0 0 0 0
38156- 0 0 0 0 0 0 0 0 0 0 0 0
38157- 0 0 0 0 0 0 0 0 0 0 0 0
38158- 0 0 0 0 0 0 0 0 0 0 0 0
38159- 0 0 0 0 0 0 0 0 0 0 0 0
38160- 0 0 0 6 6 6 6 6 6 10 10 10
38161- 18 18 18 22 22 22 30 30 30 42 42 42
38162- 50 50 50 66 66 66 86 86 86 101 98 89
38163-106 86 58 98 70 6 104 69 6 104 69 6
38164-104 69 6 91 60 6 82 62 34 90 90 90
38165- 62 62 62 38 38 38 22 22 22 14 14 14
38166- 10 10 10 10 10 10 10 10 10 10 10 10
38167- 10 10 10 10 10 10 6 6 6 10 10 10
38168- 10 10 10 10 10 10 10 10 10 14 14 14
38169- 22 22 22 42 42 42 70 70 70 89 81 66
38170- 80 54 7 104 69 6 124 80 6 137 92 6
38171-134 86 6 116 81 8 100 82 52 86 86 86
38172- 58 58 58 30 30 30 14 14 14 6 6 6
38173- 0 0 0 0 0 0 0 0 0 0 0 0
38174- 0 0 0 0 0 0 0 0 0 0 0 0
38175- 0 0 0 0 0 0 0 0 0 0 0 0
38176- 0 0 0 0 0 0 0 0 0 0 0 0
38177- 0 0 0 0 0 0 0 0 0 0 0 0
38178- 0 0 0 0 0 0 0 0 0 0 0 0
38179- 0 0 0 0 0 0 0 0 0 0 0 0
38180- 0 0 0 0 0 0 0 0 0 0 0 0
38181- 0 0 0 6 6 6 10 10 10 14 14 14
38182- 18 18 18 26 26 26 38 38 38 54 54 54
38183- 70 70 70 86 86 86 94 86 76 89 81 66
38184- 89 81 66 86 86 86 74 74 74 50 50 50
38185- 30 30 30 14 14 14 6 6 6 0 0 0
38186- 0 0 0 0 0 0 0 0 0 0 0 0
38187- 0 0 0 0 0 0 0 0 0 0 0 0
38188- 0 0 0 0 0 0 0 0 0 0 0 0
38189- 6 6 6 18 18 18 34 34 34 58 58 58
38190- 82 82 82 89 81 66 89 81 66 89 81 66
38191- 94 86 66 94 86 76 74 74 74 50 50 50
38192- 26 26 26 14 14 14 6 6 6 0 0 0
38193- 0 0 0 0 0 0 0 0 0 0 0 0
38194- 0 0 0 0 0 0 0 0 0 0 0 0
38195- 0 0 0 0 0 0 0 0 0 0 0 0
38196- 0 0 0 0 0 0 0 0 0 0 0 0
38197- 0 0 0 0 0 0 0 0 0 0 0 0
38198- 0 0 0 0 0 0 0 0 0 0 0 0
38199- 0 0 0 0 0 0 0 0 0 0 0 0
38200- 0 0 0 0 0 0 0 0 0 0 0 0
38201- 0 0 0 0 0 0 0 0 0 0 0 0
38202- 6 6 6 6 6 6 14 14 14 18 18 18
38203- 30 30 30 38 38 38 46 46 46 54 54 54
38204- 50 50 50 42 42 42 30 30 30 18 18 18
38205- 10 10 10 0 0 0 0 0 0 0 0 0
38206- 0 0 0 0 0 0 0 0 0 0 0 0
38207- 0 0 0 0 0 0 0 0 0 0 0 0
38208- 0 0 0 0 0 0 0 0 0 0 0 0
38209- 0 0 0 6 6 6 14 14 14 26 26 26
38210- 38 38 38 50 50 50 58 58 58 58 58 58
38211- 54 54 54 42 42 42 30 30 30 18 18 18
38212- 10 10 10 0 0 0 0 0 0 0 0 0
38213- 0 0 0 0 0 0 0 0 0 0 0 0
38214- 0 0 0 0 0 0 0 0 0 0 0 0
38215- 0 0 0 0 0 0 0 0 0 0 0 0
38216- 0 0 0 0 0 0 0 0 0 0 0 0
38217- 0 0 0 0 0 0 0 0 0 0 0 0
38218- 0 0 0 0 0 0 0 0 0 0 0 0
38219- 0 0 0 0 0 0 0 0 0 0 0 0
38220- 0 0 0 0 0 0 0 0 0 0 0 0
38221- 0 0 0 0 0 0 0 0 0 0 0 0
38222- 0 0 0 0 0 0 0 0 0 6 6 6
38223- 6 6 6 10 10 10 14 14 14 18 18 18
38224- 18 18 18 14 14 14 10 10 10 6 6 6
38225- 0 0 0 0 0 0 0 0 0 0 0 0
38226- 0 0 0 0 0 0 0 0 0 0 0 0
38227- 0 0 0 0 0 0 0 0 0 0 0 0
38228- 0 0 0 0 0 0 0 0 0 0 0 0
38229- 0 0 0 0 0 0 0 0 0 6 6 6
38230- 14 14 14 18 18 18 22 22 22 22 22 22
38231- 18 18 18 14 14 14 10 10 10 6 6 6
38232- 0 0 0 0 0 0 0 0 0 0 0 0
38233- 0 0 0 0 0 0 0 0 0 0 0 0
38234- 0 0 0 0 0 0 0 0 0 0 0 0
38235- 0 0 0 0 0 0 0 0 0 0 0 0
38236- 0 0 0 0 0 0 0 0 0 0 0 0
38237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38250+4 4 4 4 4 4
38251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38258+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38264+4 4 4 4 4 4
38265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38278+4 4 4 4 4 4
38279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38286+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38292+4 4 4 4 4 4
38293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38300+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38306+4 4 4 4 4 4
38307+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38314+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38319+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38320+4 4 4 4 4 4
38321+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38325+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
38326+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
38327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38328+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38330+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
38331+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38332+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
38333+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38334+4 4 4 4 4 4
38335+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38339+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
38340+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
38341+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38342+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38344+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
38345+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
38346+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
38347+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38348+4 4 4 4 4 4
38349+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38353+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
38354+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
38355+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38356+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38357+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38358+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
38359+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
38360+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
38361+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
38362+4 4 4 4 4 4
38363+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38366+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
38367+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
38368+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
38369+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
38370+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38371+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38372+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
38373+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
38374+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
38375+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
38376+4 4 4 4 4 4
38377+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38380+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
38381+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
38382+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
38383+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
38384+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38385+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
38386+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
38387+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
38388+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
38389+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
38390+4 4 4 4 4 4
38391+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38392+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38394+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
38395+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
38396+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
38397+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
38398+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
38399+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
38400+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
38401+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
38402+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
38403+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
38404+4 4 4 4 4 4
38405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38407+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
38408+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
38409+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
38410+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
38411+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
38412+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
38413+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
38414+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
38415+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
38416+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
38417+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
38418+4 4 4 4 4 4
38419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38421+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
38422+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
38423+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
38424+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
38425+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
38426+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
38427+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
38428+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
38429+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
38430+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
38431+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
38432+4 4 4 4 4 4
38433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38435+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
38436+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
38437+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
38438+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
38439+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
38440+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
38441+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
38442+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
38443+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
38444+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
38445+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38446+4 4 4 4 4 4
38447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38449+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
38450+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
38451+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
38452+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
38453+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
38454+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
38455+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
38456+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
38457+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
38458+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
38459+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
38460+4 4 4 4 4 4
38461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38462+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
38463+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
38464+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
38465+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
38466+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
38467+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
38468+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
38469+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
38470+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
38471+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
38472+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
38473+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
38474+4 4 4 4 4 4
38475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38476+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
38477+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
38478+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
38479+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38480+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
38481+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
38482+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
38483+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
38484+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
38485+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
38486+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
38487+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
38488+0 0 0 4 4 4
38489+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38490+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
38491+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
38492+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
38493+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
38494+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
38495+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
38496+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
38497+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
38498+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
38499+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
38500+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
38501+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
38502+2 0 0 0 0 0
38503+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
38504+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
38505+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
38506+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
38507+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
38508+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
38509+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
38510+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
38511+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
38512+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
38513+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
38514+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
38515+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
38516+37 38 37 0 0 0
38517+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38518+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
38519+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
38520+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
38521+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
38522+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
38523+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
38524+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
38525+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
38526+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
38527+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
38528+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
38529+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
38530+85 115 134 4 0 0
38531+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
38532+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
38533+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
38534+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
38535+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
38536+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
38537+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
38538+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
38539+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
38540+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
38541+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
38542+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
38543+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
38544+60 73 81 4 0 0
38545+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
38546+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
38547+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
38548+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
38549+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
38550+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
38551+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
38552+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
38553+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
38554+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
38555+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
38556+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
38557+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
38558+16 19 21 4 0 0
38559+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
38560+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
38561+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
38562+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
38563+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
38564+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
38565+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
38566+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
38567+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
38568+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
38569+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
38570+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
38571+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
38572+4 0 0 4 3 3
38573+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
38574+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
38575+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
38576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
38577+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
38578+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
38579+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
38580+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
38581+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
38582+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
38583+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
38584+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
38585+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
38586+3 2 2 4 4 4
38587+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
38588+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
38589+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
38590+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38591+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
38592+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
38593+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
38594+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
38595+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
38596+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
38597+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
38598+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
38599+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
38600+4 4 4 4 4 4
38601+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
38602+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
38603+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
38604+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
38605+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
38606+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
38607+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
38608+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
38609+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
38610+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
38611+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
38612+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
38613+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
38614+4 4 4 4 4 4
38615+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
38616+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
38617+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
38618+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
38619+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
38620+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38621+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
38622+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
38623+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
38624+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
38625+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
38626+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
38627+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
38628+5 5 5 5 5 5
38629+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
38630+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
38631+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
38632+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
38633+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
38634+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38635+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
38636+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
38637+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
38638+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
38639+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
38640+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
38641+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38642+5 5 5 4 4 4
38643+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
38644+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
38645+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
38646+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
38647+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38648+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
38649+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
38650+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
38651+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
38652+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
38653+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
38654+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38655+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38656+4 4 4 4 4 4
38657+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
38658+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
38659+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
38660+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
38661+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
38662+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38663+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38664+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
38665+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
38666+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
38667+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
38668+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
38669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38670+4 4 4 4 4 4
38671+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
38672+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
38673+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
38674+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
38675+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38676+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
38677+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
38678+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
38679+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
38680+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
38681+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
38682+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38684+4 4 4 4 4 4
38685+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
38686+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
38687+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
38688+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
38689+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38690+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38691+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38692+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
38693+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
38694+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
38695+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
38696+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38697+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38698+4 4 4 4 4 4
38699+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
38700+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
38701+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
38702+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
38703+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38704+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
38705+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38706+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
38707+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
38708+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
38709+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38710+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38712+4 4 4 4 4 4
38713+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
38714+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
38715+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
38716+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
38717+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38718+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
38719+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
38720+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
38721+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
38722+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
38723+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
38724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38726+4 4 4 4 4 4
38727+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
38728+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
38729+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
38730+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
38731+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38732+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
38733+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
38734+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
38735+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
38736+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
38737+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
38738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38740+4 4 4 4 4 4
38741+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
38742+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
38743+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
38744+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38745+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
38746+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
38747+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
38748+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
38749+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
38750+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
38751+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38754+4 4 4 4 4 4
38755+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
38756+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
38757+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
38758+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38759+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38760+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
38761+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
38762+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
38763+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
38764+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
38765+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38768+4 4 4 4 4 4
38769+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
38770+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
38771+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38772+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38773+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38774+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
38775+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
38776+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
38777+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
38778+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
38779+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38782+4 4 4 4 4 4
38783+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
38784+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
38785+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38786+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38787+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38788+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
38789+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
38790+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
38791+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38792+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38793+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38796+4 4 4 4 4 4
38797+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38798+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
38799+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38800+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
38801+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
38802+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
38803+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
38804+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
38805+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38806+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38807+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38810+4 4 4 4 4 4
38811+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38812+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
38813+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38814+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
38815+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38816+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
38817+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
38818+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
38819+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38820+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38821+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38824+4 4 4 4 4 4
38825+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
38826+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
38827+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38828+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
38829+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
38830+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
38831+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
38832+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
38833+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38834+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38835+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38838+4 4 4 4 4 4
38839+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
38840+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
38841+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38842+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
38843+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
38844+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
38845+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
38846+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
38847+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38848+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38849+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38852+4 4 4 4 4 4
38853+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38854+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
38855+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38856+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
38857+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
38858+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
38859+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
38860+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
38861+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38862+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38863+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38866+4 4 4 4 4 4
38867+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
38868+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
38869+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38870+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
38871+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
38872+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
38873+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
38874+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
38875+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
38876+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38877+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38880+4 4 4 4 4 4
38881+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38882+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
38883+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
38884+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
38885+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
38886+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
38887+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
38888+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
38889+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38890+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38891+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38894+4 4 4 4 4 4
38895+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38896+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
38897+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38898+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
38899+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
38900+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
38901+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
38902+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
38903+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38904+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38905+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38908+4 4 4 4 4 4
38909+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38910+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
38911+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
38912+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
38913+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
38914+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
38915+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38916+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
38917+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38918+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38919+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38922+4 4 4 4 4 4
38923+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38924+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
38925+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
38926+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38927+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
38928+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
38929+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38930+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
38931+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38932+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38933+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38936+4 4 4 4 4 4
38937+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38938+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
38939+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
38940+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
38941+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
38942+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
38943+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
38944+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
38945+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
38946+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38947+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38950+4 4 4 4 4 4
38951+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38952+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
38953+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
38954+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
38955+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
38956+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
38957+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
38958+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
38959+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
38960+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38961+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38964+4 4 4 4 4 4
38965+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
38966+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
38967+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
38968+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
38969+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38970+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
38971+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
38972+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
38973+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
38974+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38975+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38978+4 4 4 4 4 4
38979+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38980+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
38981+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
38982+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
38983+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
38984+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
38985+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
38986+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
38987+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
38988+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38989+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38992+4 4 4 4 4 4
38993+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
38994+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
38995+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
38996+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
38997+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
38998+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
38999+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
39000+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
39001+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
39002+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
39003+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39006+4 4 4 4 4 4
39007+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
39008+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
39009+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
39010+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
39011+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
39012+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
39013+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
39014+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
39015+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
39016+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
39017+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39020+4 4 4 4 4 4
39021+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
39022+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
39023+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
39024+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
39025+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
39026+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
39027+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39028+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
39029+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
39030+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
39031+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39034+4 4 4 4 4 4
39035+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
39036+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
39037+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
39038+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
39039+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
39040+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
39041+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
39042+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
39043+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
39044+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
39045+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39048+4 4 4 4 4 4
39049+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
39050+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
39051+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
39052+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
39053+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
39054+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
39055+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
39056+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
39057+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
39058+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
39059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39062+4 4 4 4 4 4
39063+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39064+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
39065+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
39066+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
39067+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
39068+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
39069+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
39070+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
39071+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
39072+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
39073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39076+4 4 4 4 4 4
39077+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
39078+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
39079+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
39080+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
39081+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
39082+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
39083+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
39084+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
39085+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
39086+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
39087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39090+4 4 4 4 4 4
39091+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
39092+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
39093+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
39094+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
39095+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
39096+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
39097+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
39098+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
39099+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
39100+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39104+4 4 4 4 4 4
39105+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
39106+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39107+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
39108+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
39109+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
39110+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
39111+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
39112+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
39113+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
39114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39118+4 4 4 4 4 4
39119+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
39120+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
39121+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
39122+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
39123+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
39124+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
39125+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
39126+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
39127+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
39128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39132+4 4 4 4 4 4
39133+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
39134+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
39135+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
39136+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
39137+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
39138+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
39139+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
39140+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
39141+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39146+4 4 4 4 4 4
39147+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
39148+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
39149+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39150+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
39151+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
39152+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
39153+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
39154+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
39155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39160+4 4 4 4 4 4
39161+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39162+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
39163+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
39164+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
39165+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
39166+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
39167+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
39168+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
39169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39174+4 4 4 4 4 4
39175+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39176+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
39177+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39178+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
39179+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
39180+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
39181+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
39182+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
39183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39188+4 4 4 4 4 4
39189+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39190+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
39191+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
39192+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
39193+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
39194+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
39195+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
39196+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
39197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39202+4 4 4 4 4 4
39203+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39204+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
39205+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
39206+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39207+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
39208+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
39209+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
39210+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39216+4 4 4 4 4 4
39217+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39219+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39220+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
39221+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
39222+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
39223+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
39224+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39230+4 4 4 4 4 4
39231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39234+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39235+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
39236+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
39237+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
39238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39244+4 4 4 4 4 4
39245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39248+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39249+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39250+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
39251+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
39252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39258+4 4 4 4 4 4
39259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39262+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
39263+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39264+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39265+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
39266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39272+4 4 4 4 4 4
39273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39276+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
39277+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
39278+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
39279+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
39280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39286+4 4 4 4 4 4
39287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39291+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
39292+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39293+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39300+4 4 4 4 4 4
39301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39305+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
39306+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
39307+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
39308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39314+4 4 4 4 4 4
39315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39319+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
39320+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
39321+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39328+4 4 4 4 4 4
39329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39333+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
39334+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
39335+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39340+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39341+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39342+4 4 4 4 4 4
39343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39346+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39347+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39348+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
39349+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39353+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39354+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39355+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39356+4 4 4 4 4 4
39357diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
39358index 3473e75..c930142 100644
39359--- a/drivers/video/udlfb.c
39360+++ b/drivers/video/udlfb.c
39361@@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
39362 dlfb_urb_completion(urb);
39363
39364 error:
39365- atomic_add(bytes_sent, &dev->bytes_sent);
39366- atomic_add(bytes_identical, &dev->bytes_identical);
39367- atomic_add(width*height*2, &dev->bytes_rendered);
39368+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39369+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39370+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
39371 end_cycles = get_cycles();
39372- atomic_add(((unsigned int) ((end_cycles - start_cycles)
39373+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39374 >> 10)), /* Kcycles */
39375 &dev->cpu_kcycles_used);
39376
39377@@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
39378 dlfb_urb_completion(urb);
39379
39380 error:
39381- atomic_add(bytes_sent, &dev->bytes_sent);
39382- atomic_add(bytes_identical, &dev->bytes_identical);
39383- atomic_add(bytes_rendered, &dev->bytes_rendered);
39384+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39385+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39386+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
39387 end_cycles = get_cycles();
39388- atomic_add(((unsigned int) ((end_cycles - start_cycles)
39389+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39390 >> 10)), /* Kcycles */
39391 &dev->cpu_kcycles_used);
39392 }
39393@@ -1368,7 +1368,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
39394 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39395 struct dlfb_data *dev = fb_info->par;
39396 return snprintf(buf, PAGE_SIZE, "%u\n",
39397- atomic_read(&dev->bytes_rendered));
39398+ atomic_read_unchecked(&dev->bytes_rendered));
39399 }
39400
39401 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39402@@ -1376,7 +1376,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39403 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39404 struct dlfb_data *dev = fb_info->par;
39405 return snprintf(buf, PAGE_SIZE, "%u\n",
39406- atomic_read(&dev->bytes_identical));
39407+ atomic_read_unchecked(&dev->bytes_identical));
39408 }
39409
39410 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39411@@ -1384,7 +1384,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39412 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39413 struct dlfb_data *dev = fb_info->par;
39414 return snprintf(buf, PAGE_SIZE, "%u\n",
39415- atomic_read(&dev->bytes_sent));
39416+ atomic_read_unchecked(&dev->bytes_sent));
39417 }
39418
39419 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39420@@ -1392,7 +1392,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39421 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39422 struct dlfb_data *dev = fb_info->par;
39423 return snprintf(buf, PAGE_SIZE, "%u\n",
39424- atomic_read(&dev->cpu_kcycles_used));
39425+ atomic_read_unchecked(&dev->cpu_kcycles_used));
39426 }
39427
39428 static ssize_t edid_show(
39429@@ -1449,10 +1449,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
39430 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39431 struct dlfb_data *dev = fb_info->par;
39432
39433- atomic_set(&dev->bytes_rendered, 0);
39434- atomic_set(&dev->bytes_identical, 0);
39435- atomic_set(&dev->bytes_sent, 0);
39436- atomic_set(&dev->cpu_kcycles_used, 0);
39437+ atomic_set_unchecked(&dev->bytes_rendered, 0);
39438+ atomic_set_unchecked(&dev->bytes_identical, 0);
39439+ atomic_set_unchecked(&dev->bytes_sent, 0);
39440+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
39441
39442 return count;
39443 }
39444diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
39445index 7f8472c..9842e87 100644
39446--- a/drivers/video/uvesafb.c
39447+++ b/drivers/video/uvesafb.c
39448@@ -19,6 +19,7 @@
39449 #include <linux/io.h>
39450 #include <linux/mutex.h>
39451 #include <linux/slab.h>
39452+#include <linux/moduleloader.h>
39453 #include <video/edid.h>
39454 #include <video/uvesafb.h>
39455 #ifdef CONFIG_X86
39456@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
39457 NULL,
39458 };
39459
39460- return call_usermodehelper(v86d_path, argv, envp, 1);
39461+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
39462 }
39463
39464 /*
39465@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
39466 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
39467 par->pmi_setpal = par->ypan = 0;
39468 } else {
39469+
39470+#ifdef CONFIG_PAX_KERNEXEC
39471+#ifdef CONFIG_MODULES
39472+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
39473+#endif
39474+ if (!par->pmi_code) {
39475+ par->pmi_setpal = par->ypan = 0;
39476+ return 0;
39477+ }
39478+#endif
39479+
39480 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
39481 + task->t.regs.edi);
39482+
39483+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39484+ pax_open_kernel();
39485+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
39486+ pax_close_kernel();
39487+
39488+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
39489+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
39490+#else
39491 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
39492 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
39493+#endif
39494+
39495 printk(KERN_INFO "uvesafb: protected mode interface info at "
39496 "%04x:%04x\n",
39497 (u16)task->t.regs.es, (u16)task->t.regs.edi);
39498@@ -1821,6 +1844,11 @@ out:
39499 if (par->vbe_modes)
39500 kfree(par->vbe_modes);
39501
39502+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39503+ if (par->pmi_code)
39504+ module_free_exec(NULL, par->pmi_code);
39505+#endif
39506+
39507 framebuffer_release(info);
39508 return err;
39509 }
39510@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev)
39511 kfree(par->vbe_state_orig);
39512 if (par->vbe_state_saved)
39513 kfree(par->vbe_state_saved);
39514+
39515+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39516+ if (par->pmi_code)
39517+ module_free_exec(NULL, par->pmi_code);
39518+#endif
39519+
39520 }
39521
39522 framebuffer_release(info);
39523diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
39524index 501b340..86bd4cf 100644
39525--- a/drivers/video/vesafb.c
39526+++ b/drivers/video/vesafb.c
39527@@ -9,6 +9,7 @@
39528 */
39529
39530 #include <linux/module.h>
39531+#include <linux/moduleloader.h>
39532 #include <linux/kernel.h>
39533 #include <linux/errno.h>
39534 #include <linux/string.h>
39535@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
39536 static int vram_total __initdata; /* Set total amount of memory */
39537 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
39538 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
39539-static void (*pmi_start)(void) __read_mostly;
39540-static void (*pmi_pal) (void) __read_mostly;
39541+static void (*pmi_start)(void) __read_only;
39542+static void (*pmi_pal) (void) __read_only;
39543 static int depth __read_mostly;
39544 static int vga_compat __read_mostly;
39545 /* --------------------------------------------------------------------- */
39546@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
39547 unsigned int size_vmode;
39548 unsigned int size_remap;
39549 unsigned int size_total;
39550+ void *pmi_code = NULL;
39551
39552 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
39553 return -ENODEV;
39554@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
39555 size_remap = size_total;
39556 vesafb_fix.smem_len = size_remap;
39557
39558-#ifndef __i386__
39559- screen_info.vesapm_seg = 0;
39560-#endif
39561-
39562 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
39563 printk(KERN_WARNING
39564 "vesafb: cannot reserve video memory at 0x%lx\n",
39565@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
39566 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
39567 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
39568
39569+#ifdef __i386__
39570+
39571+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39572+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
39573+ if (!pmi_code)
39574+#elif !defined(CONFIG_PAX_KERNEXEC)
39575+ if (0)
39576+#endif
39577+
39578+#endif
39579+ screen_info.vesapm_seg = 0;
39580+
39581 if (screen_info.vesapm_seg) {
39582- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
39583- screen_info.vesapm_seg,screen_info.vesapm_off);
39584+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
39585+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
39586 }
39587
39588 if (screen_info.vesapm_seg < 0xc000)
39589@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
39590
39591 if (ypan || pmi_setpal) {
39592 unsigned short *pmi_base;
39593+
39594 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
39595- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
39596- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
39597+
39598+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39599+ pax_open_kernel();
39600+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
39601+#else
39602+ pmi_code = pmi_base;
39603+#endif
39604+
39605+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
39606+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
39607+
39608+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39609+ pmi_start = ktva_ktla(pmi_start);
39610+ pmi_pal = ktva_ktla(pmi_pal);
39611+ pax_close_kernel();
39612+#endif
39613+
39614 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
39615 if (pmi_base[3]) {
39616 printk(KERN_INFO "vesafb: pmi: ports = ");
39617@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
39618 info->node, info->fix.id);
39619 return 0;
39620 err:
39621+
39622+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39623+ module_free_exec(NULL, pmi_code);
39624+#endif
39625+
39626 if (info->screen_base)
39627 iounmap(info->screen_base);
39628 framebuffer_release(info);
39629diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
39630index 88714ae..16c2e11 100644
39631--- a/drivers/video/via/via_clock.h
39632+++ b/drivers/video/via/via_clock.h
39633@@ -56,7 +56,7 @@ struct via_clock {
39634
39635 void (*set_engine_pll_state)(u8 state);
39636 void (*set_engine_pll)(struct via_pll_config config);
39637-};
39638+} __no_const;
39639
39640
39641 static inline u32 get_pll_internal_frequency(u32 ref_freq,
39642diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
39643index e56c934..fc22f4b 100644
39644--- a/drivers/xen/xen-pciback/conf_space.h
39645+++ b/drivers/xen/xen-pciback/conf_space.h
39646@@ -44,15 +44,15 @@ struct config_field {
39647 struct {
39648 conf_dword_write write;
39649 conf_dword_read read;
39650- } dw;
39651+ } __no_const dw;
39652 struct {
39653 conf_word_write write;
39654 conf_word_read read;
39655- } w;
39656+ } __no_const w;
39657 struct {
39658 conf_byte_write write;
39659 conf_byte_read read;
39660- } b;
39661+ } __no_const b;
39662 } u;
39663 struct list_head list;
39664 };
39665diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
39666index 879ed88..bc03a01 100644
39667--- a/fs/9p/vfs_inode.c
39668+++ b/fs/9p/vfs_inode.c
39669@@ -1286,7 +1286,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
39670 void
39671 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39672 {
39673- char *s = nd_get_link(nd);
39674+ const char *s = nd_get_link(nd);
39675
39676 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
39677 IS_ERR(s) ? "<error>" : s);
39678diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
39679index 79e2ca7..5828ad1 100644
39680--- a/fs/Kconfig.binfmt
39681+++ b/fs/Kconfig.binfmt
39682@@ -86,7 +86,7 @@ config HAVE_AOUT
39683
39684 config BINFMT_AOUT
39685 tristate "Kernel support for a.out and ECOFF binaries"
39686- depends on HAVE_AOUT
39687+ depends on HAVE_AOUT && BROKEN
39688 ---help---
39689 A.out (Assembler.OUTput) is a set of formats for libraries and
39690 executables used in the earliest versions of UNIX. Linux used
39691diff --git a/fs/aio.c b/fs/aio.c
39692index 969beb0..09fab51 100644
39693--- a/fs/aio.c
39694+++ b/fs/aio.c
39695@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
39696 size += sizeof(struct io_event) * nr_events;
39697 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39698
39699- if (nr_pages < 0)
39700+ if (nr_pages <= 0)
39701 return -EINVAL;
39702
39703 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39704@@ -1461,22 +1461,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
39705 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
39706 {
39707 ssize_t ret;
39708+ struct iovec iovstack;
39709
39710 #ifdef CONFIG_COMPAT
39711 if (compat)
39712 ret = compat_rw_copy_check_uvector(type,
39713 (struct compat_iovec __user *)kiocb->ki_buf,
39714- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39715+ kiocb->ki_nbytes, 1, &iovstack,
39716 &kiocb->ki_iovec, 1);
39717 else
39718 #endif
39719 ret = rw_copy_check_uvector(type,
39720 (struct iovec __user *)kiocb->ki_buf,
39721- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39722+ kiocb->ki_nbytes, 1, &iovstack,
39723 &kiocb->ki_iovec, 1);
39724 if (ret < 0)
39725 goto out;
39726
39727+ if (kiocb->ki_iovec == &iovstack) {
39728+ kiocb->ki_inline_vec = iovstack;
39729+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
39730+ }
39731 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39732 kiocb->ki_cur_seg = 0;
39733 /* ki_nbytes/left now reflect bytes instead of segs */
39734diff --git a/fs/attr.c b/fs/attr.c
39735index 7ee7ba4..0c61a60 100644
39736--- a/fs/attr.c
39737+++ b/fs/attr.c
39738@@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
39739 unsigned long limit;
39740
39741 limit = rlimit(RLIMIT_FSIZE);
39742+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39743 if (limit != RLIM_INFINITY && offset > limit)
39744 goto out_sig;
39745 if (offset > inode->i_sb->s_maxbytes)
39746diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
39747index e1fbdee..cd5ea56 100644
39748--- a/fs/autofs4/waitq.c
39749+++ b/fs/autofs4/waitq.c
39750@@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
39751 {
39752 unsigned long sigpipe, flags;
39753 mm_segment_t fs;
39754- const char *data = (const char *)addr;
39755+ const char __user *data = (const char __force_user *)addr;
39756 ssize_t wr = 0;
39757
39758 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
39759diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
39760index 8342ca6..82fd192 100644
39761--- a/fs/befs/linuxvfs.c
39762+++ b/fs/befs/linuxvfs.c
39763@@ -503,7 +503,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39764 {
39765 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39766 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39767- char *link = nd_get_link(nd);
39768+ const char *link = nd_get_link(nd);
39769 if (!IS_ERR(link))
39770 kfree(link);
39771 }
39772diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
39773index a6395bd..a5b24c4 100644
39774--- a/fs/binfmt_aout.c
39775+++ b/fs/binfmt_aout.c
39776@@ -16,6 +16,7 @@
39777 #include <linux/string.h>
39778 #include <linux/fs.h>
39779 #include <linux/file.h>
39780+#include <linux/security.h>
39781 #include <linux/stat.h>
39782 #include <linux/fcntl.h>
39783 #include <linux/ptrace.h>
39784@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
39785 #endif
39786 # define START_STACK(u) ((void __user *)u.start_stack)
39787
39788+ memset(&dump, 0, sizeof(dump));
39789+
39790 fs = get_fs();
39791 set_fs(KERNEL_DS);
39792 has_dumped = 1;
39793@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
39794
39795 /* If the size of the dump file exceeds the rlimit, then see what would happen
39796 if we wrote the stack, but not the data area. */
39797+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39798 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
39799 dump.u_dsize = 0;
39800
39801 /* Make sure we have enough room to write the stack and data areas. */
39802+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39803 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
39804 dump.u_ssize = 0;
39805
39806@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39807 rlim = rlimit(RLIMIT_DATA);
39808 if (rlim >= RLIM_INFINITY)
39809 rlim = ~0;
39810+
39811+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39812 if (ex.a_data + ex.a_bss > rlim)
39813 return -ENOMEM;
39814
39815@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39816 install_exec_creds(bprm);
39817 current->flags &= ~PF_FORKNOEXEC;
39818
39819+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39820+ current->mm->pax_flags = 0UL;
39821+#endif
39822+
39823+#ifdef CONFIG_PAX_PAGEEXEC
39824+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39825+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39826+
39827+#ifdef CONFIG_PAX_EMUTRAMP
39828+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39829+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39830+#endif
39831+
39832+#ifdef CONFIG_PAX_MPROTECT
39833+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39834+ current->mm->pax_flags |= MF_PAX_MPROTECT;
39835+#endif
39836+
39837+ }
39838+#endif
39839+
39840 if (N_MAGIC(ex) == OMAGIC) {
39841 unsigned long text_addr, map_size;
39842 loff_t pos;
39843@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39844
39845 down_write(&current->mm->mmap_sem);
39846 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39847- PROT_READ | PROT_WRITE | PROT_EXEC,
39848+ PROT_READ | PROT_WRITE,
39849 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39850 fd_offset + ex.a_text);
39851 up_write(&current->mm->mmap_sem);
39852diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
39853index 21ac5ee..60c413a 100644
39854--- a/fs/binfmt_elf.c
39855+++ b/fs/binfmt_elf.c
39856@@ -32,6 +32,7 @@
39857 #include <linux/elf.h>
39858 #include <linux/utsname.h>
39859 #include <linux/coredump.h>
39860+#include <linux/xattr.h>
39861 #include <asm/uaccess.h>
39862 #include <asm/param.h>
39863 #include <asm/page.h>
39864@@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
39865 #define elf_core_dump NULL
39866 #endif
39867
39868+#ifdef CONFIG_PAX_MPROTECT
39869+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39870+#endif
39871+
39872 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39873 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39874 #else
39875@@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
39876 .load_binary = load_elf_binary,
39877 .load_shlib = load_elf_library,
39878 .core_dump = elf_core_dump,
39879+
39880+#ifdef CONFIG_PAX_MPROTECT
39881+ .handle_mprotect= elf_handle_mprotect,
39882+#endif
39883+
39884 .min_coredump = ELF_EXEC_PAGESIZE,
39885 };
39886
39887@@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
39888
39889 static int set_brk(unsigned long start, unsigned long end)
39890 {
39891+ unsigned long e = end;
39892+
39893 start = ELF_PAGEALIGN(start);
39894 end = ELF_PAGEALIGN(end);
39895 if (end > start) {
39896@@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
39897 if (BAD_ADDR(addr))
39898 return addr;
39899 }
39900- current->mm->start_brk = current->mm->brk = end;
39901+ current->mm->start_brk = current->mm->brk = e;
39902 return 0;
39903 }
39904
39905@@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39906 elf_addr_t __user *u_rand_bytes;
39907 const char *k_platform = ELF_PLATFORM;
39908 const char *k_base_platform = ELF_BASE_PLATFORM;
39909- unsigned char k_rand_bytes[16];
39910+ u32 k_rand_bytes[4];
39911 int items;
39912 elf_addr_t *elf_info;
39913 int ei_index = 0;
39914 const struct cred *cred = current_cred();
39915 struct vm_area_struct *vma;
39916+ unsigned long saved_auxv[AT_VECTOR_SIZE];
39917
39918 /*
39919 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39920@@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39921 * Generate 16 random bytes for userspace PRNG seeding.
39922 */
39923 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39924- u_rand_bytes = (elf_addr_t __user *)
39925- STACK_ALLOC(p, sizeof(k_rand_bytes));
39926+ srandom32(k_rand_bytes[0] ^ random32());
39927+ srandom32(k_rand_bytes[1] ^ random32());
39928+ srandom32(k_rand_bytes[2] ^ random32());
39929+ srandom32(k_rand_bytes[3] ^ random32());
39930+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
39931+ u_rand_bytes = (elf_addr_t __user *) p;
39932 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39933 return -EFAULT;
39934
39935@@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39936 return -EFAULT;
39937 current->mm->env_end = p;
39938
39939+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39940+
39941 /* Put the elf_info on the stack in the right place. */
39942 sp = (elf_addr_t __user *)envp + 1;
39943- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39944+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39945 return -EFAULT;
39946 return 0;
39947 }
39948@@ -381,10 +400,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39949 {
39950 struct elf_phdr *elf_phdata;
39951 struct elf_phdr *eppnt;
39952- unsigned long load_addr = 0;
39953+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39954 int load_addr_set = 0;
39955 unsigned long last_bss = 0, elf_bss = 0;
39956- unsigned long error = ~0UL;
39957+ unsigned long error = -EINVAL;
39958 unsigned long total_size;
39959 int retval, i, size;
39960
39961@@ -430,6 +449,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39962 goto out_close;
39963 }
39964
39965+#ifdef CONFIG_PAX_SEGMEXEC
39966+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39967+ pax_task_size = SEGMEXEC_TASK_SIZE;
39968+#endif
39969+
39970 eppnt = elf_phdata;
39971 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39972 if (eppnt->p_type == PT_LOAD) {
39973@@ -473,8 +497,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39974 k = load_addr + eppnt->p_vaddr;
39975 if (BAD_ADDR(k) ||
39976 eppnt->p_filesz > eppnt->p_memsz ||
39977- eppnt->p_memsz > TASK_SIZE ||
39978- TASK_SIZE - eppnt->p_memsz < k) {
39979+ eppnt->p_memsz > pax_task_size ||
39980+ pax_task_size - eppnt->p_memsz < k) {
39981 error = -ENOMEM;
39982 goto out_close;
39983 }
39984@@ -528,6 +552,351 @@ out:
39985 return error;
39986 }
39987
39988+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
39989+{
39990+ unsigned long pax_flags = 0UL;
39991+
39992+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39993+
39994+#ifdef CONFIG_PAX_PAGEEXEC
39995+ if (elf_phdata->p_flags & PF_PAGEEXEC)
39996+ pax_flags |= MF_PAX_PAGEEXEC;
39997+#endif
39998+
39999+#ifdef CONFIG_PAX_SEGMEXEC
40000+ if (elf_phdata->p_flags & PF_SEGMEXEC)
40001+ pax_flags |= MF_PAX_SEGMEXEC;
40002+#endif
40003+
40004+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40005+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40006+ if ((__supported_pte_mask & _PAGE_NX))
40007+ pax_flags &= ~MF_PAX_SEGMEXEC;
40008+ else
40009+ pax_flags &= ~MF_PAX_PAGEEXEC;
40010+ }
40011+#endif
40012+
40013+#ifdef CONFIG_PAX_EMUTRAMP
40014+ if (elf_phdata->p_flags & PF_EMUTRAMP)
40015+ pax_flags |= MF_PAX_EMUTRAMP;
40016+#endif
40017+
40018+#ifdef CONFIG_PAX_MPROTECT
40019+ if (elf_phdata->p_flags & PF_MPROTECT)
40020+ pax_flags |= MF_PAX_MPROTECT;
40021+#endif
40022+
40023+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40024+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
40025+ pax_flags |= MF_PAX_RANDMMAP;
40026+#endif
40027+
40028+#endif
40029+
40030+ return pax_flags;
40031+}
40032+
40033+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
40034+{
40035+ unsigned long pax_flags = 0UL;
40036+
40037+#ifdef CONFIG_PAX_PT_PAX_FLAGS
40038+
40039+#ifdef CONFIG_PAX_PAGEEXEC
40040+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
40041+ pax_flags |= MF_PAX_PAGEEXEC;
40042+#endif
40043+
40044+#ifdef CONFIG_PAX_SEGMEXEC
40045+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
40046+ pax_flags |= MF_PAX_SEGMEXEC;
40047+#endif
40048+
40049+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40050+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40051+ if ((__supported_pte_mask & _PAGE_NX))
40052+ pax_flags &= ~MF_PAX_SEGMEXEC;
40053+ else
40054+ pax_flags &= ~MF_PAX_PAGEEXEC;
40055+ }
40056+#endif
40057+
40058+#ifdef CONFIG_PAX_EMUTRAMP
40059+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
40060+ pax_flags |= MF_PAX_EMUTRAMP;
40061+#endif
40062+
40063+#ifdef CONFIG_PAX_MPROTECT
40064+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
40065+ pax_flags |= MF_PAX_MPROTECT;
40066+#endif
40067+
40068+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40069+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
40070+ pax_flags |= MF_PAX_RANDMMAP;
40071+#endif
40072+
40073+#endif
40074+
40075+ return pax_flags;
40076+}
40077+
40078+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
40079+{
40080+ unsigned long pax_flags = 0UL;
40081+
40082+#ifdef CONFIG_PAX_EI_PAX
40083+
40084+#ifdef CONFIG_PAX_PAGEEXEC
40085+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
40086+ pax_flags |= MF_PAX_PAGEEXEC;
40087+#endif
40088+
40089+#ifdef CONFIG_PAX_SEGMEXEC
40090+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
40091+ pax_flags |= MF_PAX_SEGMEXEC;
40092+#endif
40093+
40094+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40095+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40096+ if ((__supported_pte_mask & _PAGE_NX))
40097+ pax_flags &= ~MF_PAX_SEGMEXEC;
40098+ else
40099+ pax_flags &= ~MF_PAX_PAGEEXEC;
40100+ }
40101+#endif
40102+
40103+#ifdef CONFIG_PAX_EMUTRAMP
40104+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
40105+ pax_flags |= MF_PAX_EMUTRAMP;
40106+#endif
40107+
40108+#ifdef CONFIG_PAX_MPROTECT
40109+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
40110+ pax_flags |= MF_PAX_MPROTECT;
40111+#endif
40112+
40113+#ifdef CONFIG_PAX_ASLR
40114+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
40115+ pax_flags |= MF_PAX_RANDMMAP;
40116+#endif
40117+
40118+#else
40119+
40120+#ifdef CONFIG_PAX_PAGEEXEC
40121+ pax_flags |= MF_PAX_PAGEEXEC;
40122+#endif
40123+
40124+#ifdef CONFIG_PAX_MPROTECT
40125+ pax_flags |= MF_PAX_MPROTECT;
40126+#endif
40127+
40128+#ifdef CONFIG_PAX_RANDMMAP
40129+ pax_flags |= MF_PAX_RANDMMAP;
40130+#endif
40131+
40132+#ifdef CONFIG_PAX_SEGMEXEC
40133+ if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
40134+ pax_flags &= ~MF_PAX_PAGEEXEC;
40135+ pax_flags |= MF_PAX_SEGMEXEC;
40136+ }
40137+#endif
40138+
40139+#endif
40140+
40141+ return pax_flags;
40142+}
40143+
40144+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
40145+{
40146+
40147+#ifdef CONFIG_PAX_PT_PAX_FLAGS
40148+ unsigned long i;
40149+
40150+ for (i = 0UL; i < elf_ex->e_phnum; i++)
40151+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
40152+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
40153+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
40154+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
40155+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
40156+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
40157+ return ~0UL;
40158+
40159+#ifdef CONFIG_PAX_SOFTMODE
40160+ if (pax_softmode)
40161+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
40162+ else
40163+#endif
40164+
40165+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
40166+ break;
40167+ }
40168+#endif
40169+
40170+ return ~0UL;
40171+}
40172+
40173+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
40174+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
40175+{
40176+ unsigned long pax_flags = 0UL;
40177+
40178+#ifdef CONFIG_PAX_PAGEEXEC
40179+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
40180+ pax_flags |= MF_PAX_PAGEEXEC;
40181+#endif
40182+
40183+#ifdef CONFIG_PAX_SEGMEXEC
40184+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
40185+ pax_flags |= MF_PAX_SEGMEXEC;
40186+#endif
40187+
40188+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40189+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40190+ if ((__supported_pte_mask & _PAGE_NX))
40191+ pax_flags &= ~MF_PAX_SEGMEXEC;
40192+ else
40193+ pax_flags &= ~MF_PAX_PAGEEXEC;
40194+ }
40195+#endif
40196+
40197+#ifdef CONFIG_PAX_EMUTRAMP
40198+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
40199+ pax_flags |= MF_PAX_EMUTRAMP;
40200+#endif
40201+
40202+#ifdef CONFIG_PAX_MPROTECT
40203+ if (pax_flags_softmode & MF_PAX_MPROTECT)
40204+ pax_flags |= MF_PAX_MPROTECT;
40205+#endif
40206+
40207+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40208+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
40209+ pax_flags |= MF_PAX_RANDMMAP;
40210+#endif
40211+
40212+ return pax_flags;
40213+}
40214+
40215+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
40216+{
40217+ unsigned long pax_flags = 0UL;
40218+
40219+#ifdef CONFIG_PAX_PAGEEXEC
40220+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
40221+ pax_flags |= MF_PAX_PAGEEXEC;
40222+#endif
40223+
40224+#ifdef CONFIG_PAX_SEGMEXEC
40225+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
40226+ pax_flags |= MF_PAX_SEGMEXEC;
40227+#endif
40228+
40229+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40230+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40231+ if ((__supported_pte_mask & _PAGE_NX))
40232+ pax_flags &= ~MF_PAX_SEGMEXEC;
40233+ else
40234+ pax_flags &= ~MF_PAX_PAGEEXEC;
40235+ }
40236+#endif
40237+
40238+#ifdef CONFIG_PAX_EMUTRAMP
40239+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
40240+ pax_flags |= MF_PAX_EMUTRAMP;
40241+#endif
40242+
40243+#ifdef CONFIG_PAX_MPROTECT
40244+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
40245+ pax_flags |= MF_PAX_MPROTECT;
40246+#endif
40247+
40248+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40249+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
40250+ pax_flags |= MF_PAX_RANDMMAP;
40251+#endif
40252+
40253+ return pax_flags;
40254+}
40255+#endif
40256+
40257+static unsigned long pax_parse_xattr_pax(struct file * const file)
40258+{
40259+
40260+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
40261+ ssize_t xattr_size, i;
40262+ unsigned char xattr_value[5];
40263+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
40264+
40265+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
40266+ if (xattr_size <= 0)
40267+ return ~0UL;
40268+
40269+ for (i = 0; i < xattr_size; i++)
40270+ switch (xattr_value[i]) {
40271+ default:
40272+ return ~0UL;
40273+
40274+#define parse_flag(option1, option2, flag) \
40275+ case option1: \
40276+ pax_flags_hardmode |= MF_PAX_##flag; \
40277+ break; \
40278+ case option2: \
40279+ pax_flags_softmode |= MF_PAX_##flag; \
40280+ break;
40281+
40282+ parse_flag('p', 'P', PAGEEXEC);
40283+ parse_flag('e', 'E', EMUTRAMP);
40284+ parse_flag('m', 'M', MPROTECT);
40285+ parse_flag('r', 'R', RANDMMAP);
40286+ parse_flag('s', 'S', SEGMEXEC);
40287+
40288+#undef parse_flag
40289+ }
40290+
40291+ if (pax_flags_hardmode & pax_flags_softmode)
40292+ return ~0UL;
40293+
40294+#ifdef CONFIG_PAX_SOFTMODE
40295+ if (pax_softmode)
40296+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
40297+ else
40298+#endif
40299+
40300+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
40301+#else
40302+ return ~0UL;
40303+#endif
40304+
40305+}
40306+
40307+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
40308+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
40309+{
40310+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
40311+
40312+ pax_flags = pax_parse_ei_pax(elf_ex);
40313+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
40314+ xattr_pax_flags = pax_parse_xattr_pax(file);
40315+
40316+ if (pt_pax_flags == ~0UL)
40317+ pt_pax_flags = xattr_pax_flags;
40318+ else if (xattr_pax_flags == ~0UL)
40319+ xattr_pax_flags = pt_pax_flags;
40320+ if (pt_pax_flags != xattr_pax_flags)
40321+ return -EINVAL;
40322+ if (pt_pax_flags != ~0UL)
40323+ pax_flags = pt_pax_flags;
40324+
40325+ if (0 > pax_check_flags(&pax_flags))
40326+ return -EINVAL;
40327+
40328+ current->mm->pax_flags = pax_flags;
40329+ return 0;
40330+}
40331+#endif
40332+
40333 /*
40334 * These are the functions used to load ELF style executables and shared
40335 * libraries. There is no binary dependent code anywhere else.
40336@@ -544,6 +913,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
40337 {
40338 unsigned int random_variable = 0;
40339
40340+#ifdef CONFIG_PAX_RANDUSTACK
40341+ if (randomize_va_space)
40342+ return stack_top - current->mm->delta_stack;
40343+#endif
40344+
40345 if ((current->flags & PF_RANDOMIZE) &&
40346 !(current->personality & ADDR_NO_RANDOMIZE)) {
40347 random_variable = get_random_int() & STACK_RND_MASK;
40348@@ -562,7 +936,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40349 unsigned long load_addr = 0, load_bias = 0;
40350 int load_addr_set = 0;
40351 char * elf_interpreter = NULL;
40352- unsigned long error;
40353+ unsigned long error = 0;
40354 struct elf_phdr *elf_ppnt, *elf_phdata;
40355 unsigned long elf_bss, elf_brk;
40356 int retval, i;
40357@@ -572,11 +946,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40358 unsigned long start_code, end_code, start_data, end_data;
40359 unsigned long reloc_func_desc __maybe_unused = 0;
40360 int executable_stack = EXSTACK_DEFAULT;
40361- unsigned long def_flags = 0;
40362 struct {
40363 struct elfhdr elf_ex;
40364 struct elfhdr interp_elf_ex;
40365 } *loc;
40366+ unsigned long pax_task_size = TASK_SIZE;
40367
40368 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
40369 if (!loc) {
40370@@ -713,11 +1087,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40371
40372 /* OK, This is the point of no return */
40373 current->flags &= ~PF_FORKNOEXEC;
40374- current->mm->def_flags = def_flags;
40375+
40376+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
40377+ current->mm->pax_flags = 0UL;
40378+#endif
40379+
40380+#ifdef CONFIG_PAX_DLRESOLVE
40381+ current->mm->call_dl_resolve = 0UL;
40382+#endif
40383+
40384+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
40385+ current->mm->call_syscall = 0UL;
40386+#endif
40387+
40388+#ifdef CONFIG_PAX_ASLR
40389+ current->mm->delta_mmap = 0UL;
40390+ current->mm->delta_stack = 0UL;
40391+#endif
40392+
40393+ current->mm->def_flags = 0;
40394+
40395+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
40396+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
40397+ send_sig(SIGKILL, current, 0);
40398+ goto out_free_dentry;
40399+ }
40400+#endif
40401+
40402+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
40403+ pax_set_initial_flags(bprm);
40404+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
40405+ if (pax_set_initial_flags_func)
40406+ (pax_set_initial_flags_func)(bprm);
40407+#endif
40408+
40409+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40410+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
40411+ current->mm->context.user_cs_limit = PAGE_SIZE;
40412+ current->mm->def_flags |= VM_PAGEEXEC;
40413+ }
40414+#endif
40415+
40416+#ifdef CONFIG_PAX_SEGMEXEC
40417+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
40418+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
40419+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
40420+ pax_task_size = SEGMEXEC_TASK_SIZE;
40421+ current->mm->def_flags |= VM_NOHUGEPAGE;
40422+ }
40423+#endif
40424+
40425+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
40426+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40427+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
40428+ put_cpu();
40429+ }
40430+#endif
40431
40432 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
40433 may depend on the personality. */
40434 SET_PERSONALITY(loc->elf_ex);
40435+
40436+#ifdef CONFIG_PAX_ASLR
40437+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
40438+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
40439+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
40440+ }
40441+#endif
40442+
40443+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40444+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40445+ executable_stack = EXSTACK_DISABLE_X;
40446+ current->personality &= ~READ_IMPLIES_EXEC;
40447+ } else
40448+#endif
40449+
40450 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
40451 current->personality |= READ_IMPLIES_EXEC;
40452
40453@@ -808,6 +1252,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40454 #else
40455 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
40456 #endif
40457+
40458+#ifdef CONFIG_PAX_RANDMMAP
40459+ /* PaX: randomize base address at the default exe base if requested */
40460+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
40461+#ifdef CONFIG_SPARC64
40462+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
40463+#else
40464+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
40465+#endif
40466+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
40467+ elf_flags |= MAP_FIXED;
40468+ }
40469+#endif
40470+
40471 }
40472
40473 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
40474@@ -840,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40475 * allowed task size. Note that p_filesz must always be
40476 * <= p_memsz so it is only necessary to check p_memsz.
40477 */
40478- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40479- elf_ppnt->p_memsz > TASK_SIZE ||
40480- TASK_SIZE - elf_ppnt->p_memsz < k) {
40481+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40482+ elf_ppnt->p_memsz > pax_task_size ||
40483+ pax_task_size - elf_ppnt->p_memsz < k) {
40484 /* set_brk can never work. Avoid overflows. */
40485 send_sig(SIGKILL, current, 0);
40486 retval = -EINVAL;
40487@@ -870,6 +1328,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40488 start_data += load_bias;
40489 end_data += load_bias;
40490
40491+#ifdef CONFIG_PAX_RANDMMAP
40492+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
40493+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
40494+#endif
40495+
40496 /* Calling set_brk effectively mmaps the pages that we need
40497 * for the bss and break sections. We must do this before
40498 * mapping in the interpreter, to make sure it doesn't wind
40499@@ -881,9 +1344,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40500 goto out_free_dentry;
40501 }
40502 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
40503- send_sig(SIGSEGV, current, 0);
40504- retval = -EFAULT; /* Nobody gets to see this, but.. */
40505- goto out_free_dentry;
40506+ /*
40507+ * This bss-zeroing can fail if the ELF
40508+ * file specifies odd protections. So
40509+ * we don't check the return value
40510+ */
40511 }
40512
40513 if (elf_interpreter) {
40514@@ -1098,7 +1563,7 @@ out:
40515 * Decide what to dump of a segment, part, all or none.
40516 */
40517 static unsigned long vma_dump_size(struct vm_area_struct *vma,
40518- unsigned long mm_flags)
40519+ unsigned long mm_flags, long signr)
40520 {
40521 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
40522
40523@@ -1132,7 +1597,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
40524 if (vma->vm_file == NULL)
40525 return 0;
40526
40527- if (FILTER(MAPPED_PRIVATE))
40528+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
40529 goto whole;
40530
40531 /*
40532@@ -1354,9 +1819,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
40533 {
40534 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
40535 int i = 0;
40536- do
40537+ do {
40538 i += 2;
40539- while (auxv[i - 2] != AT_NULL);
40540+ } while (auxv[i - 2] != AT_NULL);
40541 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
40542 }
40543
40544@@ -1862,14 +2327,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
40545 }
40546
40547 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
40548- unsigned long mm_flags)
40549+ struct coredump_params *cprm)
40550 {
40551 struct vm_area_struct *vma;
40552 size_t size = 0;
40553
40554 for (vma = first_vma(current, gate_vma); vma != NULL;
40555 vma = next_vma(vma, gate_vma))
40556- size += vma_dump_size(vma, mm_flags);
40557+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40558 return size;
40559 }
40560
40561@@ -1963,7 +2428,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40562
40563 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
40564
40565- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
40566+ offset += elf_core_vma_data_size(gate_vma, cprm);
40567 offset += elf_core_extra_data_size();
40568 e_shoff = offset;
40569
40570@@ -1977,10 +2442,12 @@ static int elf_core_dump(struct coredump_params *cprm)
40571 offset = dataoff;
40572
40573 size += sizeof(*elf);
40574+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40575 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
40576 goto end_coredump;
40577
40578 size += sizeof(*phdr4note);
40579+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40580 if (size > cprm->limit
40581 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
40582 goto end_coredump;
40583@@ -1994,7 +2461,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40584 phdr.p_offset = offset;
40585 phdr.p_vaddr = vma->vm_start;
40586 phdr.p_paddr = 0;
40587- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
40588+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40589 phdr.p_memsz = vma->vm_end - vma->vm_start;
40590 offset += phdr.p_filesz;
40591 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
40592@@ -2005,6 +2472,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40593 phdr.p_align = ELF_EXEC_PAGESIZE;
40594
40595 size += sizeof(phdr);
40596+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40597 if (size > cprm->limit
40598 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
40599 goto end_coredump;
40600@@ -2029,7 +2497,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40601 unsigned long addr;
40602 unsigned long end;
40603
40604- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
40605+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40606
40607 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
40608 struct page *page;
40609@@ -2038,6 +2506,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40610 page = get_dump_page(addr);
40611 if (page) {
40612 void *kaddr = kmap(page);
40613+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
40614 stop = ((size += PAGE_SIZE) > cprm->limit) ||
40615 !dump_write(cprm->file, kaddr,
40616 PAGE_SIZE);
40617@@ -2055,6 +2524,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40618
40619 if (e_phnum == PN_XNUM) {
40620 size += sizeof(*shdr4extnum);
40621+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40622 if (size > cprm->limit
40623 || !dump_write(cprm->file, shdr4extnum,
40624 sizeof(*shdr4extnum)))
40625@@ -2075,6 +2545,97 @@ out:
40626
40627 #endif /* CONFIG_ELF_CORE */
40628
40629+#ifdef CONFIG_PAX_MPROTECT
40630+/* PaX: non-PIC ELF libraries need relocations on their executable segments
40631+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
40632+ * we'll remove VM_MAYWRITE for good on RELRO segments.
40633+ *
40634+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
40635+ * basis because we want to allow the common case and not the special ones.
40636+ */
40637+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
40638+{
40639+ struct elfhdr elf_h;
40640+ struct elf_phdr elf_p;
40641+ unsigned long i;
40642+ unsigned long oldflags;
40643+ bool is_textrel_rw, is_textrel_rx, is_relro;
40644+
40645+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
40646+ return;
40647+
40648+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
40649+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
40650+
40651+#ifdef CONFIG_PAX_ELFRELOCS
40652+ /* possible TEXTREL */
40653+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
40654+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
40655+#else
40656+ is_textrel_rw = false;
40657+ is_textrel_rx = false;
40658+#endif
40659+
40660+ /* possible RELRO */
40661+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
40662+
40663+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
40664+ return;
40665+
40666+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
40667+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
40668+
40669+#ifdef CONFIG_PAX_ETEXECRELOCS
40670+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40671+#else
40672+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
40673+#endif
40674+
40675+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40676+ !elf_check_arch(&elf_h) ||
40677+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
40678+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
40679+ return;
40680+
40681+ for (i = 0UL; i < elf_h.e_phnum; i++) {
40682+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
40683+ return;
40684+ switch (elf_p.p_type) {
40685+ case PT_DYNAMIC:
40686+ if (!is_textrel_rw && !is_textrel_rx)
40687+ continue;
40688+ i = 0UL;
40689+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
40690+ elf_dyn dyn;
40691+
40692+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
40693+ return;
40694+ if (dyn.d_tag == DT_NULL)
40695+ return;
40696+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
40697+ gr_log_textrel(vma);
40698+ if (is_textrel_rw)
40699+ vma->vm_flags |= VM_MAYWRITE;
40700+ else
40701+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
40702+ vma->vm_flags &= ~VM_MAYWRITE;
40703+ return;
40704+ }
40705+ i++;
40706+ }
40707+ return;
40708+
40709+ case PT_GNU_RELRO:
40710+ if (!is_relro)
40711+ continue;
40712+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
40713+ vma->vm_flags &= ~VM_MAYWRITE;
40714+ return;
40715+ }
40716+ }
40717+}
40718+#endif
40719+
40720 static int __init init_elf_binfmt(void)
40721 {
40722 return register_binfmt(&elf_format);
40723diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
40724index 1bffbe0..c8c283e 100644
40725--- a/fs/binfmt_flat.c
40726+++ b/fs/binfmt_flat.c
40727@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
40728 realdatastart = (unsigned long) -ENOMEM;
40729 printk("Unable to allocate RAM for process data, errno %d\n",
40730 (int)-realdatastart);
40731+ down_write(&current->mm->mmap_sem);
40732 do_munmap(current->mm, textpos, text_len);
40733+ up_write(&current->mm->mmap_sem);
40734 ret = realdatastart;
40735 goto err;
40736 }
40737@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40738 }
40739 if (IS_ERR_VALUE(result)) {
40740 printk("Unable to read data+bss, errno %d\n", (int)-result);
40741+ down_write(&current->mm->mmap_sem);
40742 do_munmap(current->mm, textpos, text_len);
40743 do_munmap(current->mm, realdatastart, len);
40744+ up_write(&current->mm->mmap_sem);
40745 ret = result;
40746 goto err;
40747 }
40748@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40749 }
40750 if (IS_ERR_VALUE(result)) {
40751 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
40752+ down_write(&current->mm->mmap_sem);
40753 do_munmap(current->mm, textpos, text_len + data_len + extra +
40754 MAX_SHARED_LIBS * sizeof(unsigned long));
40755+ up_write(&current->mm->mmap_sem);
40756 ret = result;
40757 goto err;
40758 }
40759diff --git a/fs/bio.c b/fs/bio.c
40760index b1fe82c..84da0a9 100644
40761--- a/fs/bio.c
40762+++ b/fs/bio.c
40763@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
40764 const int read = bio_data_dir(bio) == READ;
40765 struct bio_map_data *bmd = bio->bi_private;
40766 int i;
40767- char *p = bmd->sgvecs[0].iov_base;
40768+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
40769
40770 __bio_for_each_segment(bvec, bio, i, 0) {
40771 char *addr = page_address(bvec->bv_page);
40772diff --git a/fs/block_dev.c b/fs/block_dev.c
40773index b07f1da..9efcb92 100644
40774--- a/fs/block_dev.c
40775+++ b/fs/block_dev.c
40776@@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
40777 else if (bdev->bd_contains == bdev)
40778 return true; /* is a whole device which isn't held */
40779
40780- else if (whole->bd_holder == bd_may_claim)
40781+ else if (whole->bd_holder == (void *)bd_may_claim)
40782 return true; /* is a partition of a device that is being partitioned */
40783 else if (whole->bd_holder != NULL)
40784 return false; /* is a partition of a held device */
40785diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
40786index dede441..f2a2507 100644
40787--- a/fs/btrfs/ctree.c
40788+++ b/fs/btrfs/ctree.c
40789@@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
40790 free_extent_buffer(buf);
40791 add_root_to_dirty_list(root);
40792 } else {
40793- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
40794- parent_start = parent->start;
40795- else
40796+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
40797+ if (parent)
40798+ parent_start = parent->start;
40799+ else
40800+ parent_start = 0;
40801+ } else
40802 parent_start = 0;
40803
40804 WARN_ON(trans->transid != btrfs_header_generation(parent));
40805diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
40806index fd1a06d..6e9033d 100644
40807--- a/fs/btrfs/inode.c
40808+++ b/fs/btrfs/inode.c
40809@@ -6895,7 +6895,7 @@ fail:
40810 return -ENOMEM;
40811 }
40812
40813-static int btrfs_getattr(struct vfsmount *mnt,
40814+int btrfs_getattr(struct vfsmount *mnt,
40815 struct dentry *dentry, struct kstat *stat)
40816 {
40817 struct inode *inode = dentry->d_inode;
40818@@ -6909,6 +6909,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
40819 return 0;
40820 }
40821
40822+EXPORT_SYMBOL(btrfs_getattr);
40823+
40824+dev_t get_btrfs_dev_from_inode(struct inode *inode)
40825+{
40826+ return BTRFS_I(inode)->root->anon_dev;
40827+}
40828+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40829+
40830 /*
40831 * If a file is moved, it will inherit the cow and compression flags of the new
40832 * directory.
40833diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
40834index c04f02c..f5c9e2e 100644
40835--- a/fs/btrfs/ioctl.c
40836+++ b/fs/btrfs/ioctl.c
40837@@ -2733,9 +2733,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40838 for (i = 0; i < num_types; i++) {
40839 struct btrfs_space_info *tmp;
40840
40841+ /* Don't copy in more than we allocated */
40842 if (!slot_count)
40843 break;
40844
40845+ slot_count--;
40846+
40847 info = NULL;
40848 rcu_read_lock();
40849 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
40850@@ -2757,15 +2760,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40851 memcpy(dest, &space, sizeof(space));
40852 dest++;
40853 space_args.total_spaces++;
40854- slot_count--;
40855 }
40856- if (!slot_count)
40857- break;
40858 }
40859 up_read(&info->groups_sem);
40860 }
40861
40862- user_dest = (struct btrfs_ioctl_space_info *)
40863+ user_dest = (struct btrfs_ioctl_space_info __user *)
40864 (arg + sizeof(struct btrfs_ioctl_space_args));
40865
40866 if (copy_to_user(user_dest, dest_orig, alloc_size))
40867diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
40868index cfb5543..1ae7347 100644
40869--- a/fs/btrfs/relocation.c
40870+++ b/fs/btrfs/relocation.c
40871@@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
40872 }
40873 spin_unlock(&rc->reloc_root_tree.lock);
40874
40875- BUG_ON((struct btrfs_root *)node->data != root);
40876+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
40877
40878 if (!del) {
40879 spin_lock(&rc->reloc_root_tree.lock);
40880diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
40881index 622f469..e8d2d55 100644
40882--- a/fs/cachefiles/bind.c
40883+++ b/fs/cachefiles/bind.c
40884@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
40885 args);
40886
40887 /* start by checking things over */
40888- ASSERT(cache->fstop_percent >= 0 &&
40889- cache->fstop_percent < cache->fcull_percent &&
40890+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
40891 cache->fcull_percent < cache->frun_percent &&
40892 cache->frun_percent < 100);
40893
40894- ASSERT(cache->bstop_percent >= 0 &&
40895- cache->bstop_percent < cache->bcull_percent &&
40896+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
40897 cache->bcull_percent < cache->brun_percent &&
40898 cache->brun_percent < 100);
40899
40900diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
40901index 0a1467b..6a53245 100644
40902--- a/fs/cachefiles/daemon.c
40903+++ b/fs/cachefiles/daemon.c
40904@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
40905 if (n > buflen)
40906 return -EMSGSIZE;
40907
40908- if (copy_to_user(_buffer, buffer, n) != 0)
40909+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
40910 return -EFAULT;
40911
40912 return n;
40913@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
40914 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40915 return -EIO;
40916
40917- if (datalen < 0 || datalen > PAGE_SIZE - 1)
40918+ if (datalen > PAGE_SIZE - 1)
40919 return -EOPNOTSUPP;
40920
40921 /* drag the command string into the kernel so we can parse it */
40922@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
40923 if (args[0] != '%' || args[1] != '\0')
40924 return -EINVAL;
40925
40926- if (fstop < 0 || fstop >= cache->fcull_percent)
40927+ if (fstop >= cache->fcull_percent)
40928 return cachefiles_daemon_range_error(cache, args);
40929
40930 cache->fstop_percent = fstop;
40931@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
40932 if (args[0] != '%' || args[1] != '\0')
40933 return -EINVAL;
40934
40935- if (bstop < 0 || bstop >= cache->bcull_percent)
40936+ if (bstop >= cache->bcull_percent)
40937 return cachefiles_daemon_range_error(cache, args);
40938
40939 cache->bstop_percent = bstop;
40940diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
40941index bd6bc1b..b627b53 100644
40942--- a/fs/cachefiles/internal.h
40943+++ b/fs/cachefiles/internal.h
40944@@ -57,7 +57,7 @@ struct cachefiles_cache {
40945 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40946 struct rb_root active_nodes; /* active nodes (can't be culled) */
40947 rwlock_t active_lock; /* lock for active_nodes */
40948- atomic_t gravecounter; /* graveyard uniquifier */
40949+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40950 unsigned frun_percent; /* when to stop culling (% files) */
40951 unsigned fcull_percent; /* when to start culling (% files) */
40952 unsigned fstop_percent; /* when to stop allocating (% files) */
40953@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
40954 * proc.c
40955 */
40956 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40957-extern atomic_t cachefiles_lookup_histogram[HZ];
40958-extern atomic_t cachefiles_mkdir_histogram[HZ];
40959-extern atomic_t cachefiles_create_histogram[HZ];
40960+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40961+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40962+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40963
40964 extern int __init cachefiles_proc_init(void);
40965 extern void cachefiles_proc_cleanup(void);
40966 static inline
40967-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40968+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40969 {
40970 unsigned long jif = jiffies - start_jif;
40971 if (jif >= HZ)
40972 jif = HZ - 1;
40973- atomic_inc(&histogram[jif]);
40974+ atomic_inc_unchecked(&histogram[jif]);
40975 }
40976
40977 #else
40978diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
40979index a0358c2..d6137f2 100644
40980--- a/fs/cachefiles/namei.c
40981+++ b/fs/cachefiles/namei.c
40982@@ -318,7 +318,7 @@ try_again:
40983 /* first step is to make up a grave dentry in the graveyard */
40984 sprintf(nbuffer, "%08x%08x",
40985 (uint32_t) get_seconds(),
40986- (uint32_t) atomic_inc_return(&cache->gravecounter));
40987+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40988
40989 /* do the multiway lock magic */
40990 trap = lock_rename(cache->graveyard, dir);
40991diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
40992index eccd339..4c1d995 100644
40993--- a/fs/cachefiles/proc.c
40994+++ b/fs/cachefiles/proc.c
40995@@ -14,9 +14,9 @@
40996 #include <linux/seq_file.h>
40997 #include "internal.h"
40998
40999-atomic_t cachefiles_lookup_histogram[HZ];
41000-atomic_t cachefiles_mkdir_histogram[HZ];
41001-atomic_t cachefiles_create_histogram[HZ];
41002+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
41003+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
41004+atomic_unchecked_t cachefiles_create_histogram[HZ];
41005
41006 /*
41007 * display the latency histogram
41008@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
41009 return 0;
41010 default:
41011 index = (unsigned long) v - 3;
41012- x = atomic_read(&cachefiles_lookup_histogram[index]);
41013- y = atomic_read(&cachefiles_mkdir_histogram[index]);
41014- z = atomic_read(&cachefiles_create_histogram[index]);
41015+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
41016+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
41017+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
41018 if (x == 0 && y == 0 && z == 0)
41019 return 0;
41020
41021diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
41022index 0e3c092..818480e 100644
41023--- a/fs/cachefiles/rdwr.c
41024+++ b/fs/cachefiles/rdwr.c
41025@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
41026 old_fs = get_fs();
41027 set_fs(KERNEL_DS);
41028 ret = file->f_op->write(
41029- file, (const void __user *) data, len, &pos);
41030+ file, (const void __force_user *) data, len, &pos);
41031 set_fs(old_fs);
41032 kunmap(page);
41033 if (ret != len)
41034diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
41035index 9895400..fa40a7d 100644
41036--- a/fs/ceph/dir.c
41037+++ b/fs/ceph/dir.c
41038@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
41039 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
41040 struct ceph_mds_client *mdsc = fsc->mdsc;
41041 unsigned frag = fpos_frag(filp->f_pos);
41042- int off = fpos_off(filp->f_pos);
41043+ unsigned int off = fpos_off(filp->f_pos);
41044 int err;
41045 u32 ftype;
41046 struct ceph_mds_reply_info_parsed *rinfo;
41047diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
41048index 84e8c07..6170d31 100644
41049--- a/fs/cifs/cifs_debug.c
41050+++ b/fs/cifs/cifs_debug.c
41051@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
41052
41053 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
41054 #ifdef CONFIG_CIFS_STATS2
41055- atomic_set(&totBufAllocCount, 0);
41056- atomic_set(&totSmBufAllocCount, 0);
41057+ atomic_set_unchecked(&totBufAllocCount, 0);
41058+ atomic_set_unchecked(&totSmBufAllocCount, 0);
41059 #endif /* CONFIG_CIFS_STATS2 */
41060 spin_lock(&cifs_tcp_ses_lock);
41061 list_for_each(tmp1, &cifs_tcp_ses_list) {
41062@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
41063 tcon = list_entry(tmp3,
41064 struct cifs_tcon,
41065 tcon_list);
41066- atomic_set(&tcon->num_smbs_sent, 0);
41067- atomic_set(&tcon->num_writes, 0);
41068- atomic_set(&tcon->num_reads, 0);
41069- atomic_set(&tcon->num_oplock_brks, 0);
41070- atomic_set(&tcon->num_opens, 0);
41071- atomic_set(&tcon->num_posixopens, 0);
41072- atomic_set(&tcon->num_posixmkdirs, 0);
41073- atomic_set(&tcon->num_closes, 0);
41074- atomic_set(&tcon->num_deletes, 0);
41075- atomic_set(&tcon->num_mkdirs, 0);
41076- atomic_set(&tcon->num_rmdirs, 0);
41077- atomic_set(&tcon->num_renames, 0);
41078- atomic_set(&tcon->num_t2renames, 0);
41079- atomic_set(&tcon->num_ffirst, 0);
41080- atomic_set(&tcon->num_fnext, 0);
41081- atomic_set(&tcon->num_fclose, 0);
41082- atomic_set(&tcon->num_hardlinks, 0);
41083- atomic_set(&tcon->num_symlinks, 0);
41084- atomic_set(&tcon->num_locks, 0);
41085+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
41086+ atomic_set_unchecked(&tcon->num_writes, 0);
41087+ atomic_set_unchecked(&tcon->num_reads, 0);
41088+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
41089+ atomic_set_unchecked(&tcon->num_opens, 0);
41090+ atomic_set_unchecked(&tcon->num_posixopens, 0);
41091+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
41092+ atomic_set_unchecked(&tcon->num_closes, 0);
41093+ atomic_set_unchecked(&tcon->num_deletes, 0);
41094+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
41095+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
41096+ atomic_set_unchecked(&tcon->num_renames, 0);
41097+ atomic_set_unchecked(&tcon->num_t2renames, 0);
41098+ atomic_set_unchecked(&tcon->num_ffirst, 0);
41099+ atomic_set_unchecked(&tcon->num_fnext, 0);
41100+ atomic_set_unchecked(&tcon->num_fclose, 0);
41101+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
41102+ atomic_set_unchecked(&tcon->num_symlinks, 0);
41103+ atomic_set_unchecked(&tcon->num_locks, 0);
41104 }
41105 }
41106 }
41107@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
41108 smBufAllocCount.counter, cifs_min_small);
41109 #ifdef CONFIG_CIFS_STATS2
41110 seq_printf(m, "Total Large %d Small %d Allocations\n",
41111- atomic_read(&totBufAllocCount),
41112- atomic_read(&totSmBufAllocCount));
41113+ atomic_read_unchecked(&totBufAllocCount),
41114+ atomic_read_unchecked(&totSmBufAllocCount));
41115 #endif /* CONFIG_CIFS_STATS2 */
41116
41117 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
41118@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
41119 if (tcon->need_reconnect)
41120 seq_puts(m, "\tDISCONNECTED ");
41121 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
41122- atomic_read(&tcon->num_smbs_sent),
41123- atomic_read(&tcon->num_oplock_brks));
41124+ atomic_read_unchecked(&tcon->num_smbs_sent),
41125+ atomic_read_unchecked(&tcon->num_oplock_brks));
41126 seq_printf(m, "\nReads: %d Bytes: %lld",
41127- atomic_read(&tcon->num_reads),
41128+ atomic_read_unchecked(&tcon->num_reads),
41129 (long long)(tcon->bytes_read));
41130 seq_printf(m, "\nWrites: %d Bytes: %lld",
41131- atomic_read(&tcon->num_writes),
41132+ atomic_read_unchecked(&tcon->num_writes),
41133 (long long)(tcon->bytes_written));
41134 seq_printf(m, "\nFlushes: %d",
41135- atomic_read(&tcon->num_flushes));
41136+ atomic_read_unchecked(&tcon->num_flushes));
41137 seq_printf(m, "\nLocks: %d HardLinks: %d "
41138 "Symlinks: %d",
41139- atomic_read(&tcon->num_locks),
41140- atomic_read(&tcon->num_hardlinks),
41141- atomic_read(&tcon->num_symlinks));
41142+ atomic_read_unchecked(&tcon->num_locks),
41143+ atomic_read_unchecked(&tcon->num_hardlinks),
41144+ atomic_read_unchecked(&tcon->num_symlinks));
41145 seq_printf(m, "\nOpens: %d Closes: %d "
41146 "Deletes: %d",
41147- atomic_read(&tcon->num_opens),
41148- atomic_read(&tcon->num_closes),
41149- atomic_read(&tcon->num_deletes));
41150+ atomic_read_unchecked(&tcon->num_opens),
41151+ atomic_read_unchecked(&tcon->num_closes),
41152+ atomic_read_unchecked(&tcon->num_deletes));
41153 seq_printf(m, "\nPosix Opens: %d "
41154 "Posix Mkdirs: %d",
41155- atomic_read(&tcon->num_posixopens),
41156- atomic_read(&tcon->num_posixmkdirs));
41157+ atomic_read_unchecked(&tcon->num_posixopens),
41158+ atomic_read_unchecked(&tcon->num_posixmkdirs));
41159 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
41160- atomic_read(&tcon->num_mkdirs),
41161- atomic_read(&tcon->num_rmdirs));
41162+ atomic_read_unchecked(&tcon->num_mkdirs),
41163+ atomic_read_unchecked(&tcon->num_rmdirs));
41164 seq_printf(m, "\nRenames: %d T2 Renames %d",
41165- atomic_read(&tcon->num_renames),
41166- atomic_read(&tcon->num_t2renames));
41167+ atomic_read_unchecked(&tcon->num_renames),
41168+ atomic_read_unchecked(&tcon->num_t2renames));
41169 seq_printf(m, "\nFindFirst: %d FNext %d "
41170 "FClose %d",
41171- atomic_read(&tcon->num_ffirst),
41172- atomic_read(&tcon->num_fnext),
41173- atomic_read(&tcon->num_fclose));
41174+ atomic_read_unchecked(&tcon->num_ffirst),
41175+ atomic_read_unchecked(&tcon->num_fnext),
41176+ atomic_read_unchecked(&tcon->num_fclose));
41177 }
41178 }
41179 }
41180diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
41181index 8f1fe32..38f9e27 100644
41182--- a/fs/cifs/cifsfs.c
41183+++ b/fs/cifs/cifsfs.c
41184@@ -989,7 +989,7 @@ cifs_init_request_bufs(void)
41185 cifs_req_cachep = kmem_cache_create("cifs_request",
41186 CIFSMaxBufSize +
41187 MAX_CIFS_HDR_SIZE, 0,
41188- SLAB_HWCACHE_ALIGN, NULL);
41189+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
41190 if (cifs_req_cachep == NULL)
41191 return -ENOMEM;
41192
41193@@ -1016,7 +1016,7 @@ cifs_init_request_bufs(void)
41194 efficient to alloc 1 per page off the slab compared to 17K (5page)
41195 alloc of large cifs buffers even when page debugging is on */
41196 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
41197- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
41198+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
41199 NULL);
41200 if (cifs_sm_req_cachep == NULL) {
41201 mempool_destroy(cifs_req_poolp);
41202@@ -1101,8 +1101,8 @@ init_cifs(void)
41203 atomic_set(&bufAllocCount, 0);
41204 atomic_set(&smBufAllocCount, 0);
41205 #ifdef CONFIG_CIFS_STATS2
41206- atomic_set(&totBufAllocCount, 0);
41207- atomic_set(&totSmBufAllocCount, 0);
41208+ atomic_set_unchecked(&totBufAllocCount, 0);
41209+ atomic_set_unchecked(&totSmBufAllocCount, 0);
41210 #endif /* CONFIG_CIFS_STATS2 */
41211
41212 atomic_set(&midCount, 0);
41213diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
41214index 8238aa1..0347196 100644
41215--- a/fs/cifs/cifsglob.h
41216+++ b/fs/cifs/cifsglob.h
41217@@ -392,28 +392,28 @@ struct cifs_tcon {
41218 __u16 Flags; /* optional support bits */
41219 enum statusEnum tidStatus;
41220 #ifdef CONFIG_CIFS_STATS
41221- atomic_t num_smbs_sent;
41222- atomic_t num_writes;
41223- atomic_t num_reads;
41224- atomic_t num_flushes;
41225- atomic_t num_oplock_brks;
41226- atomic_t num_opens;
41227- atomic_t num_closes;
41228- atomic_t num_deletes;
41229- atomic_t num_mkdirs;
41230- atomic_t num_posixopens;
41231- atomic_t num_posixmkdirs;
41232- atomic_t num_rmdirs;
41233- atomic_t num_renames;
41234- atomic_t num_t2renames;
41235- atomic_t num_ffirst;
41236- atomic_t num_fnext;
41237- atomic_t num_fclose;
41238- atomic_t num_hardlinks;
41239- atomic_t num_symlinks;
41240- atomic_t num_locks;
41241- atomic_t num_acl_get;
41242- atomic_t num_acl_set;
41243+ atomic_unchecked_t num_smbs_sent;
41244+ atomic_unchecked_t num_writes;
41245+ atomic_unchecked_t num_reads;
41246+ atomic_unchecked_t num_flushes;
41247+ atomic_unchecked_t num_oplock_brks;
41248+ atomic_unchecked_t num_opens;
41249+ atomic_unchecked_t num_closes;
41250+ atomic_unchecked_t num_deletes;
41251+ atomic_unchecked_t num_mkdirs;
41252+ atomic_unchecked_t num_posixopens;
41253+ atomic_unchecked_t num_posixmkdirs;
41254+ atomic_unchecked_t num_rmdirs;
41255+ atomic_unchecked_t num_renames;
41256+ atomic_unchecked_t num_t2renames;
41257+ atomic_unchecked_t num_ffirst;
41258+ atomic_unchecked_t num_fnext;
41259+ atomic_unchecked_t num_fclose;
41260+ atomic_unchecked_t num_hardlinks;
41261+ atomic_unchecked_t num_symlinks;
41262+ atomic_unchecked_t num_locks;
41263+ atomic_unchecked_t num_acl_get;
41264+ atomic_unchecked_t num_acl_set;
41265 #ifdef CONFIG_CIFS_STATS2
41266 unsigned long long time_writes;
41267 unsigned long long time_reads;
41268@@ -628,7 +628,7 @@ convert_delimiter(char *path, char delim)
41269 }
41270
41271 #ifdef CONFIG_CIFS_STATS
41272-#define cifs_stats_inc atomic_inc
41273+#define cifs_stats_inc atomic_inc_unchecked
41274
41275 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
41276 unsigned int bytes)
41277@@ -985,8 +985,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
41278 /* Various Debug counters */
41279 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
41280 #ifdef CONFIG_CIFS_STATS2
41281-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
41282-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
41283+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
41284+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
41285 #endif
41286 GLOBAL_EXTERN atomic_t smBufAllocCount;
41287 GLOBAL_EXTERN atomic_t midCount;
41288diff --git a/fs/cifs/link.c b/fs/cifs/link.c
41289index 6b0e064..94e6c3c 100644
41290--- a/fs/cifs/link.c
41291+++ b/fs/cifs/link.c
41292@@ -600,7 +600,7 @@ symlink_exit:
41293
41294 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
41295 {
41296- char *p = nd_get_link(nd);
41297+ const char *p = nd_get_link(nd);
41298 if (!IS_ERR(p))
41299 kfree(p);
41300 }
41301diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
41302index 703ef5c..2a44ed5 100644
41303--- a/fs/cifs/misc.c
41304+++ b/fs/cifs/misc.c
41305@@ -156,7 +156,7 @@ cifs_buf_get(void)
41306 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
41307 atomic_inc(&bufAllocCount);
41308 #ifdef CONFIG_CIFS_STATS2
41309- atomic_inc(&totBufAllocCount);
41310+ atomic_inc_unchecked(&totBufAllocCount);
41311 #endif /* CONFIG_CIFS_STATS2 */
41312 }
41313
41314@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
41315 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
41316 atomic_inc(&smBufAllocCount);
41317 #ifdef CONFIG_CIFS_STATS2
41318- atomic_inc(&totSmBufAllocCount);
41319+ atomic_inc_unchecked(&totSmBufAllocCount);
41320 #endif /* CONFIG_CIFS_STATS2 */
41321
41322 }
41323diff --git a/fs/coda/cache.c b/fs/coda/cache.c
41324index 6901578..d402eb5 100644
41325--- a/fs/coda/cache.c
41326+++ b/fs/coda/cache.c
41327@@ -24,7 +24,7 @@
41328 #include "coda_linux.h"
41329 #include "coda_cache.h"
41330
41331-static atomic_t permission_epoch = ATOMIC_INIT(0);
41332+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
41333
41334 /* replace or extend an acl cache hit */
41335 void coda_cache_enter(struct inode *inode, int mask)
41336@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
41337 struct coda_inode_info *cii = ITOC(inode);
41338
41339 spin_lock(&cii->c_lock);
41340- cii->c_cached_epoch = atomic_read(&permission_epoch);
41341+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
41342 if (cii->c_uid != current_fsuid()) {
41343 cii->c_uid = current_fsuid();
41344 cii->c_cached_perm = mask;
41345@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
41346 {
41347 struct coda_inode_info *cii = ITOC(inode);
41348 spin_lock(&cii->c_lock);
41349- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
41350+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
41351 spin_unlock(&cii->c_lock);
41352 }
41353
41354 /* remove all acl caches */
41355 void coda_cache_clear_all(struct super_block *sb)
41356 {
41357- atomic_inc(&permission_epoch);
41358+ atomic_inc_unchecked(&permission_epoch);
41359 }
41360
41361
41362@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
41363 spin_lock(&cii->c_lock);
41364 hit = (mask & cii->c_cached_perm) == mask &&
41365 cii->c_uid == current_fsuid() &&
41366- cii->c_cached_epoch == atomic_read(&permission_epoch);
41367+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
41368 spin_unlock(&cii->c_lock);
41369
41370 return hit;
41371diff --git a/fs/compat.c b/fs/compat.c
41372index c987875..08771ca 100644
41373--- a/fs/compat.c
41374+++ b/fs/compat.c
41375@@ -132,8 +132,8 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim
41376 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
41377 {
41378 compat_ino_t ino = stat->ino;
41379- typeof(ubuf->st_uid) uid = 0;
41380- typeof(ubuf->st_gid) gid = 0;
41381+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
41382+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
41383 int err;
41384
41385 SET_UID(uid, stat->uid);
41386@@ -504,7 +504,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
41387
41388 set_fs(KERNEL_DS);
41389 /* The __user pointer cast is valid because of the set_fs() */
41390- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
41391+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
41392 set_fs(oldfs);
41393 /* truncating is ok because it's a user address */
41394 if (!ret)
41395@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
41396 goto out;
41397
41398 ret = -EINVAL;
41399- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
41400+ if (nr_segs > UIO_MAXIOV)
41401 goto out;
41402 if (nr_segs > fast_segs) {
41403 ret = -ENOMEM;
41404@@ -845,6 +845,7 @@ struct compat_old_linux_dirent {
41405
41406 struct compat_readdir_callback {
41407 struct compat_old_linux_dirent __user *dirent;
41408+ struct file * file;
41409 int result;
41410 };
41411
41412@@ -862,6 +863,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
41413 buf->result = -EOVERFLOW;
41414 return -EOVERFLOW;
41415 }
41416+
41417+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41418+ return 0;
41419+
41420 buf->result++;
41421 dirent = buf->dirent;
41422 if (!access_ok(VERIFY_WRITE, dirent,
41423@@ -894,6 +899,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
41424
41425 buf.result = 0;
41426 buf.dirent = dirent;
41427+ buf.file = file;
41428
41429 error = vfs_readdir(file, compat_fillonedir, &buf);
41430 if (buf.result)
41431@@ -914,6 +920,7 @@ struct compat_linux_dirent {
41432 struct compat_getdents_callback {
41433 struct compat_linux_dirent __user *current_dir;
41434 struct compat_linux_dirent __user *previous;
41435+ struct file * file;
41436 int count;
41437 int error;
41438 };
41439@@ -935,6 +942,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
41440 buf->error = -EOVERFLOW;
41441 return -EOVERFLOW;
41442 }
41443+
41444+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41445+ return 0;
41446+
41447 dirent = buf->previous;
41448 if (dirent) {
41449 if (__put_user(offset, &dirent->d_off))
41450@@ -982,6 +993,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
41451 buf.previous = NULL;
41452 buf.count = count;
41453 buf.error = 0;
41454+ buf.file = file;
41455
41456 error = vfs_readdir(file, compat_filldir, &buf);
41457 if (error >= 0)
41458@@ -1003,6 +1015,7 @@ out:
41459 struct compat_getdents_callback64 {
41460 struct linux_dirent64 __user *current_dir;
41461 struct linux_dirent64 __user *previous;
41462+ struct file * file;
41463 int count;
41464 int error;
41465 };
41466@@ -1019,6 +1032,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
41467 buf->error = -EINVAL; /* only used if we fail.. */
41468 if (reclen > buf->count)
41469 return -EINVAL;
41470+
41471+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41472+ return 0;
41473+
41474 dirent = buf->previous;
41475
41476 if (dirent) {
41477@@ -1070,13 +1087,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
41478 buf.previous = NULL;
41479 buf.count = count;
41480 buf.error = 0;
41481+ buf.file = file;
41482
41483 error = vfs_readdir(file, compat_filldir64, &buf);
41484 if (error >= 0)
41485 error = buf.error;
41486 lastdirent = buf.previous;
41487 if (lastdirent) {
41488- typeof(lastdirent->d_off) d_off = file->f_pos;
41489+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
41490 if (__put_user_unaligned(d_off, &lastdirent->d_off))
41491 error = -EFAULT;
41492 else
41493diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
41494index 112e45a..b59845b 100644
41495--- a/fs/compat_binfmt_elf.c
41496+++ b/fs/compat_binfmt_elf.c
41497@@ -30,11 +30,13 @@
41498 #undef elf_phdr
41499 #undef elf_shdr
41500 #undef elf_note
41501+#undef elf_dyn
41502 #undef elf_addr_t
41503 #define elfhdr elf32_hdr
41504 #define elf_phdr elf32_phdr
41505 #define elf_shdr elf32_shdr
41506 #define elf_note elf32_note
41507+#define elf_dyn Elf32_Dyn
41508 #define elf_addr_t Elf32_Addr
41509
41510 /*
41511diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
41512index 51352de..93292ff 100644
41513--- a/fs/compat_ioctl.c
41514+++ b/fs/compat_ioctl.c
41515@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
41516
41517 err = get_user(palp, &up->palette);
41518 err |= get_user(length, &up->length);
41519+ if (err)
41520+ return -EFAULT;
41521
41522 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
41523 err = put_user(compat_ptr(palp), &up_native->palette);
41524@@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
41525 return -EFAULT;
41526 if (__get_user(udata, &ss32->iomem_base))
41527 return -EFAULT;
41528- ss.iomem_base = compat_ptr(udata);
41529+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
41530 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
41531 __get_user(ss.port_high, &ss32->port_high))
41532 return -EFAULT;
41533@@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
41534 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
41535 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
41536 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
41537- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41538+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41539 return -EFAULT;
41540
41541 return ioctl_preallocate(file, p);
41542@@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
41543 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
41544 {
41545 unsigned int a, b;
41546- a = *(unsigned int *)p;
41547- b = *(unsigned int *)q;
41548+ a = *(const unsigned int *)p;
41549+ b = *(const unsigned int *)q;
41550 if (a > b)
41551 return 1;
41552 if (a < b)
41553diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
41554index 9a37a9b..35792b6 100644
41555--- a/fs/configfs/dir.c
41556+++ b/fs/configfs/dir.c
41557@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41558 }
41559 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
41560 struct configfs_dirent *next;
41561- const char * name;
41562+ const unsigned char * name;
41563+ char d_name[sizeof(next->s_dentry->d_iname)];
41564 int len;
41565 struct inode *inode = NULL;
41566
41567@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41568 continue;
41569
41570 name = configfs_get_name(next);
41571- len = strlen(name);
41572+ if (next->s_dentry && name == next->s_dentry->d_iname) {
41573+ len = next->s_dentry->d_name.len;
41574+ memcpy(d_name, name, len);
41575+ name = d_name;
41576+ } else
41577+ len = strlen(name);
41578
41579 /*
41580 * We'll have a dentry and an inode for
41581diff --git a/fs/dcache.c b/fs/dcache.c
41582index f7908ae..920a680 100644
41583--- a/fs/dcache.c
41584+++ b/fs/dcache.c
41585@@ -3042,7 +3042,7 @@ void __init vfs_caches_init(unsigned long mempages)
41586 mempages -= reserve;
41587
41588 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
41589- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
41590+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
41591
41592 dcache_init();
41593 inode_init();
41594diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
41595index f3a257d..715ac0f 100644
41596--- a/fs/debugfs/inode.c
41597+++ b/fs/debugfs/inode.c
41598@@ -261,7 +261,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
41599 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
41600 {
41601 return debugfs_create_file(name,
41602+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
41603+ S_IFDIR | S_IRWXU,
41604+#else
41605 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
41606+#endif
41607 parent, NULL, NULL);
41608 }
41609 EXPORT_SYMBOL_GPL(debugfs_create_dir);
41610diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
41611index af11098..81e3bbe 100644
41612--- a/fs/ecryptfs/inode.c
41613+++ b/fs/ecryptfs/inode.c
41614@@ -691,7 +691,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
41615 old_fs = get_fs();
41616 set_fs(get_ds());
41617 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
41618- (char __user *)lower_buf,
41619+ (char __force_user *)lower_buf,
41620 lower_bufsiz);
41621 set_fs(old_fs);
41622 if (rc < 0)
41623@@ -737,7 +737,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41624 }
41625 old_fs = get_fs();
41626 set_fs(get_ds());
41627- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
41628+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
41629 set_fs(old_fs);
41630 if (rc < 0) {
41631 kfree(buf);
41632@@ -752,7 +752,7 @@ out:
41633 static void
41634 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
41635 {
41636- char *buf = nd_get_link(nd);
41637+ const char *buf = nd_get_link(nd);
41638 if (!IS_ERR(buf)) {
41639 /* Free the char* */
41640 kfree(buf);
41641diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
41642index 0dc5a3d..d3cdeea 100644
41643--- a/fs/ecryptfs/miscdev.c
41644+++ b/fs/ecryptfs/miscdev.c
41645@@ -328,7 +328,7 @@ check_list:
41646 goto out_unlock_msg_ctx;
41647 i = 5;
41648 if (msg_ctx->msg) {
41649- if (copy_to_user(&buf[i], packet_length, packet_length_size))
41650+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
41651 goto out_unlock_msg_ctx;
41652 i += packet_length_size;
41653 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
41654diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
41655index 608c1c3..7d040a8 100644
41656--- a/fs/ecryptfs/read_write.c
41657+++ b/fs/ecryptfs/read_write.c
41658@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
41659 return -EIO;
41660 fs_save = get_fs();
41661 set_fs(get_ds());
41662- rc = vfs_write(lower_file, data, size, &offset);
41663+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
41664 set_fs(fs_save);
41665 mark_inode_dirty_sync(ecryptfs_inode);
41666 return rc;
41667@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
41668 return -EIO;
41669 fs_save = get_fs();
41670 set_fs(get_ds());
41671- rc = vfs_read(lower_file, data, size, &offset);
41672+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
41673 set_fs(fs_save);
41674 return rc;
41675 }
41676diff --git a/fs/exec.c b/fs/exec.c
41677index 3625464..04855f9 100644
41678--- a/fs/exec.c
41679+++ b/fs/exec.c
41680@@ -55,12 +55,28 @@
41681 #include <linux/pipe_fs_i.h>
41682 #include <linux/oom.h>
41683 #include <linux/compat.h>
41684+#include <linux/random.h>
41685+#include <linux/seq_file.h>
41686+
41687+#ifdef CONFIG_PAX_REFCOUNT
41688+#include <linux/kallsyms.h>
41689+#include <linux/kdebug.h>
41690+#endif
41691
41692 #include <asm/uaccess.h>
41693 #include <asm/mmu_context.h>
41694 #include <asm/tlb.h>
41695 #include "internal.h"
41696
41697+#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
41698+void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
41699+#endif
41700+
41701+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
41702+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
41703+EXPORT_SYMBOL(pax_set_initial_flags_func);
41704+#endif
41705+
41706 int core_uses_pid;
41707 char core_pattern[CORENAME_MAX_SIZE] = "core";
41708 unsigned int core_pipe_limit;
41709@@ -70,7 +86,7 @@ struct core_name {
41710 char *corename;
41711 int used, size;
41712 };
41713-static atomic_t call_count = ATOMIC_INIT(1);
41714+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
41715
41716 /* The maximal length of core_pattern is also specified in sysctl.c */
41717
41718@@ -188,18 +204,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
41719 int write)
41720 {
41721 struct page *page;
41722- int ret;
41723
41724-#ifdef CONFIG_STACK_GROWSUP
41725- if (write) {
41726- ret = expand_downwards(bprm->vma, pos);
41727- if (ret < 0)
41728- return NULL;
41729- }
41730-#endif
41731- ret = get_user_pages(current, bprm->mm, pos,
41732- 1, write, 1, &page, NULL);
41733- if (ret <= 0)
41734+ if (0 > expand_downwards(bprm->vma, pos))
41735+ return NULL;
41736+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
41737 return NULL;
41738
41739 if (write) {
41740@@ -215,6 +223,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
41741 if (size <= ARG_MAX)
41742 return page;
41743
41744+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41745+ // only allow 1MB for argv+env on suid/sgid binaries
41746+ // to prevent easy ASLR exhaustion
41747+ if (((bprm->cred->euid != current_euid()) ||
41748+ (bprm->cred->egid != current_egid())) &&
41749+ (size > (1024 * 1024))) {
41750+ put_page(page);
41751+ return NULL;
41752+ }
41753+#endif
41754+
41755 /*
41756 * Limit to 1/4-th the stack size for the argv+env strings.
41757 * This ensures that:
41758@@ -274,6 +293,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41759 vma->vm_end = STACK_TOP_MAX;
41760 vma->vm_start = vma->vm_end - PAGE_SIZE;
41761 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
41762+
41763+#ifdef CONFIG_PAX_SEGMEXEC
41764+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
41765+#endif
41766+
41767 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
41768 INIT_LIST_HEAD(&vma->anon_vma_chain);
41769
41770@@ -288,6 +312,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41771 mm->stack_vm = mm->total_vm = 1;
41772 up_write(&mm->mmap_sem);
41773 bprm->p = vma->vm_end - sizeof(void *);
41774+
41775+#ifdef CONFIG_PAX_RANDUSTACK
41776+ if (randomize_va_space)
41777+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
41778+#endif
41779+
41780 return 0;
41781 err:
41782 up_write(&mm->mmap_sem);
41783@@ -396,19 +426,7 @@ err:
41784 return err;
41785 }
41786
41787-struct user_arg_ptr {
41788-#ifdef CONFIG_COMPAT
41789- bool is_compat;
41790-#endif
41791- union {
41792- const char __user *const __user *native;
41793-#ifdef CONFIG_COMPAT
41794- compat_uptr_t __user *compat;
41795-#endif
41796- } ptr;
41797-};
41798-
41799-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41800+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41801 {
41802 const char __user *native;
41803
41804@@ -417,14 +435,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41805 compat_uptr_t compat;
41806
41807 if (get_user(compat, argv.ptr.compat + nr))
41808- return ERR_PTR(-EFAULT);
41809+ return (const char __force_user *)ERR_PTR(-EFAULT);
41810
41811 return compat_ptr(compat);
41812 }
41813 #endif
41814
41815 if (get_user(native, argv.ptr.native + nr))
41816- return ERR_PTR(-EFAULT);
41817+ return (const char __force_user *)ERR_PTR(-EFAULT);
41818
41819 return native;
41820 }
41821@@ -443,7 +461,7 @@ static int count(struct user_arg_ptr argv, int max)
41822 if (!p)
41823 break;
41824
41825- if (IS_ERR(p))
41826+ if (IS_ERR((const char __force_kernel *)p))
41827 return -EFAULT;
41828
41829 if (i++ >= max)
41830@@ -477,7 +495,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
41831
41832 ret = -EFAULT;
41833 str = get_user_arg_ptr(argv, argc);
41834- if (IS_ERR(str))
41835+ if (IS_ERR((const char __force_kernel *)str))
41836 goto out;
41837
41838 len = strnlen_user(str, MAX_ARG_STRLEN);
41839@@ -559,7 +577,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
41840 int r;
41841 mm_segment_t oldfs = get_fs();
41842 struct user_arg_ptr argv = {
41843- .ptr.native = (const char __user *const __user *)__argv,
41844+ .ptr.native = (const char __force_user *const __force_user *)__argv,
41845 };
41846
41847 set_fs(KERNEL_DS);
41848@@ -594,7 +612,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41849 unsigned long new_end = old_end - shift;
41850 struct mmu_gather tlb;
41851
41852- BUG_ON(new_start > new_end);
41853+ if (new_start >= new_end || new_start < mmap_min_addr)
41854+ return -ENOMEM;
41855
41856 /*
41857 * ensure there are no vmas between where we want to go
41858@@ -603,6 +622,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41859 if (vma != find_vma(mm, new_start))
41860 return -EFAULT;
41861
41862+#ifdef CONFIG_PAX_SEGMEXEC
41863+ BUG_ON(pax_find_mirror_vma(vma));
41864+#endif
41865+
41866 /*
41867 * cover the whole range: [new_start, old_end)
41868 */
41869@@ -683,10 +706,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41870 stack_top = arch_align_stack(stack_top);
41871 stack_top = PAGE_ALIGN(stack_top);
41872
41873- if (unlikely(stack_top < mmap_min_addr) ||
41874- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
41875- return -ENOMEM;
41876-
41877 stack_shift = vma->vm_end - stack_top;
41878
41879 bprm->p -= stack_shift;
41880@@ -698,8 +717,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
41881 bprm->exec -= stack_shift;
41882
41883 down_write(&mm->mmap_sem);
41884+
41885+ /* Move stack pages down in memory. */
41886+ if (stack_shift) {
41887+ ret = shift_arg_pages(vma, stack_shift);
41888+ if (ret)
41889+ goto out_unlock;
41890+ }
41891+
41892 vm_flags = VM_STACK_FLAGS;
41893
41894+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41895+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41896+ vm_flags &= ~VM_EXEC;
41897+
41898+#ifdef CONFIG_PAX_MPROTECT
41899+ if (mm->pax_flags & MF_PAX_MPROTECT)
41900+ vm_flags &= ~VM_MAYEXEC;
41901+#endif
41902+
41903+ }
41904+#endif
41905+
41906 /*
41907 * Adjust stack execute permissions; explicitly enable for
41908 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
41909@@ -718,13 +757,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41910 goto out_unlock;
41911 BUG_ON(prev != vma);
41912
41913- /* Move stack pages down in memory. */
41914- if (stack_shift) {
41915- ret = shift_arg_pages(vma, stack_shift);
41916- if (ret)
41917- goto out_unlock;
41918- }
41919-
41920 /* mprotect_fixup is overkill to remove the temporary stack flags */
41921 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
41922
41923@@ -805,7 +837,7 @@ int kernel_read(struct file *file, loff_t offset,
41924 old_fs = get_fs();
41925 set_fs(get_ds());
41926 /* The cast to a user pointer is valid due to the set_fs() */
41927- result = vfs_read(file, (void __user *)addr, count, &pos);
41928+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
41929 set_fs(old_fs);
41930 return result;
41931 }
41932@@ -1067,6 +1099,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
41933 perf_event_comm(tsk);
41934 }
41935
41936+static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
41937+{
41938+ int i, ch;
41939+
41940+ /* Copies the binary name from after last slash */
41941+ for (i = 0; (ch = *(fn++)) != '\0';) {
41942+ if (ch == '/')
41943+ i = 0; /* overwrite what we wrote */
41944+ else
41945+ if (i < len - 1)
41946+ tcomm[i++] = ch;
41947+ }
41948+ tcomm[i] = '\0';
41949+}
41950+
41951 int flush_old_exec(struct linux_binprm * bprm)
41952 {
41953 int retval;
41954@@ -1081,6 +1128,7 @@ int flush_old_exec(struct linux_binprm * bprm)
41955
41956 set_mm_exe_file(bprm->mm, bprm->file);
41957
41958+ filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
41959 /*
41960 * Release all of the old mmap stuff
41961 */
41962@@ -1112,10 +1160,6 @@ EXPORT_SYMBOL(would_dump);
41963
41964 void setup_new_exec(struct linux_binprm * bprm)
41965 {
41966- int i, ch;
41967- const char *name;
41968- char tcomm[sizeof(current->comm)];
41969-
41970 arch_pick_mmap_layout(current->mm);
41971
41972 /* This is the point of no return */
41973@@ -1126,18 +1170,7 @@ void setup_new_exec(struct linux_binprm * bprm)
41974 else
41975 set_dumpable(current->mm, suid_dumpable);
41976
41977- name = bprm->filename;
41978-
41979- /* Copies the binary name from after last slash */
41980- for (i=0; (ch = *(name++)) != '\0';) {
41981- if (ch == '/')
41982- i = 0; /* overwrite what we wrote */
41983- else
41984- if (i < (sizeof(tcomm) - 1))
41985- tcomm[i++] = ch;
41986- }
41987- tcomm[i] = '\0';
41988- set_task_comm(current, tcomm);
41989+ set_task_comm(current, bprm->tcomm);
41990
41991 /* Set the new mm task size. We have to do that late because it may
41992 * depend on TIF_32BIT which is only updated in flush_thread() on
41993@@ -1247,7 +1280,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
41994 }
41995 rcu_read_unlock();
41996
41997- if (p->fs->users > n_fs) {
41998+ if (atomic_read(&p->fs->users) > n_fs) {
41999 bprm->unsafe |= LSM_UNSAFE_SHARE;
42000 } else {
42001 res = -EAGAIN;
42002@@ -1442,6 +1475,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
42003
42004 EXPORT_SYMBOL(search_binary_handler);
42005
42006+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42007+static atomic64_unchecked_t global_exec_counter = ATOMIC64_INIT(0);
42008+#endif
42009+
42010 /*
42011 * sys_execve() executes a new program.
42012 */
42013@@ -1450,6 +1487,11 @@ static int do_execve_common(const char *filename,
42014 struct user_arg_ptr envp,
42015 struct pt_regs *regs)
42016 {
42017+#ifdef CONFIG_GRKERNSEC
42018+ struct file *old_exec_file;
42019+ struct acl_subject_label *old_acl;
42020+ struct rlimit old_rlim[RLIM_NLIMITS];
42021+#endif
42022 struct linux_binprm *bprm;
42023 struct file *file;
42024 struct files_struct *displaced;
42025@@ -1457,6 +1499,8 @@ static int do_execve_common(const char *filename,
42026 int retval;
42027 const struct cred *cred = current_cred();
42028
42029+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
42030+
42031 /*
42032 * We move the actual failure in case of RLIMIT_NPROC excess from
42033 * set*uid() to execve() because too many poorly written programs
42034@@ -1497,12 +1541,27 @@ static int do_execve_common(const char *filename,
42035 if (IS_ERR(file))
42036 goto out_unmark;
42037
42038+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
42039+ retval = -EPERM;
42040+ goto out_file;
42041+ }
42042+
42043 sched_exec();
42044
42045 bprm->file = file;
42046 bprm->filename = filename;
42047 bprm->interp = filename;
42048
42049+ if (gr_process_user_ban()) {
42050+ retval = -EPERM;
42051+ goto out_file;
42052+ }
42053+
42054+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
42055+ retval = -EACCES;
42056+ goto out_file;
42057+ }
42058+
42059 retval = bprm_mm_init(bprm);
42060 if (retval)
42061 goto out_file;
42062@@ -1532,11 +1591,46 @@ static int do_execve_common(const char *filename,
42063 if (retval < 0)
42064 goto out;
42065
42066+ if (!gr_tpe_allow(file)) {
42067+ retval = -EACCES;
42068+ goto out;
42069+ }
42070+
42071+ if (gr_check_crash_exec(file)) {
42072+ retval = -EACCES;
42073+ goto out;
42074+ }
42075+
42076+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
42077+
42078+ gr_handle_exec_args(bprm, argv);
42079+
42080+#ifdef CONFIG_GRKERNSEC
42081+ old_acl = current->acl;
42082+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
42083+ old_exec_file = current->exec_file;
42084+ get_file(file);
42085+ current->exec_file = file;
42086+#endif
42087+
42088+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
42089+ bprm->unsafe);
42090+ if (retval < 0)
42091+ goto out_fail;
42092+
42093 retval = search_binary_handler(bprm,regs);
42094 if (retval < 0)
42095- goto out;
42096+ goto out_fail;
42097+#ifdef CONFIG_GRKERNSEC
42098+ if (old_exec_file)
42099+ fput(old_exec_file);
42100+#endif
42101
42102 /* execve succeeded */
42103+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42104+ current->exec_id = atomic64_inc_return_unchecked(&global_exec_counter);
42105+#endif
42106+
42107 current->fs->in_exec = 0;
42108 current->in_execve = 0;
42109 acct_update_integrals(current);
42110@@ -1545,6 +1639,14 @@ static int do_execve_common(const char *filename,
42111 put_files_struct(displaced);
42112 return retval;
42113
42114+out_fail:
42115+#ifdef CONFIG_GRKERNSEC
42116+ current->acl = old_acl;
42117+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
42118+ fput(current->exec_file);
42119+ current->exec_file = old_exec_file;
42120+#endif
42121+
42122 out:
42123 if (bprm->mm) {
42124 acct_arg_size(bprm, 0);
42125@@ -1618,7 +1720,7 @@ static int expand_corename(struct core_name *cn)
42126 {
42127 char *old_corename = cn->corename;
42128
42129- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
42130+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
42131 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
42132
42133 if (!cn->corename) {
42134@@ -1715,7 +1817,7 @@ static int format_corename(struct core_name *cn, long signr)
42135 int pid_in_pattern = 0;
42136 int err = 0;
42137
42138- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
42139+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
42140 cn->corename = kmalloc(cn->size, GFP_KERNEL);
42141 cn->used = 0;
42142
42143@@ -1812,6 +1914,218 @@ out:
42144 return ispipe;
42145 }
42146
42147+int pax_check_flags(unsigned long *flags)
42148+{
42149+ int retval = 0;
42150+
42151+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
42152+ if (*flags & MF_PAX_SEGMEXEC)
42153+ {
42154+ *flags &= ~MF_PAX_SEGMEXEC;
42155+ retval = -EINVAL;
42156+ }
42157+#endif
42158+
42159+ if ((*flags & MF_PAX_PAGEEXEC)
42160+
42161+#ifdef CONFIG_PAX_PAGEEXEC
42162+ && (*flags & MF_PAX_SEGMEXEC)
42163+#endif
42164+
42165+ )
42166+ {
42167+ *flags &= ~MF_PAX_PAGEEXEC;
42168+ retval = -EINVAL;
42169+ }
42170+
42171+ if ((*flags & MF_PAX_MPROTECT)
42172+
42173+#ifdef CONFIG_PAX_MPROTECT
42174+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
42175+#endif
42176+
42177+ )
42178+ {
42179+ *flags &= ~MF_PAX_MPROTECT;
42180+ retval = -EINVAL;
42181+ }
42182+
42183+ if ((*flags & MF_PAX_EMUTRAMP)
42184+
42185+#ifdef CONFIG_PAX_EMUTRAMP
42186+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
42187+#endif
42188+
42189+ )
42190+ {
42191+ *flags &= ~MF_PAX_EMUTRAMP;
42192+ retval = -EINVAL;
42193+ }
42194+
42195+ return retval;
42196+}
42197+
42198+EXPORT_SYMBOL(pax_check_flags);
42199+
42200+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42201+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
42202+{
42203+ struct task_struct *tsk = current;
42204+ struct mm_struct *mm = current->mm;
42205+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
42206+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
42207+ char *path_exec = NULL;
42208+ char *path_fault = NULL;
42209+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
42210+
42211+ if (buffer_exec && buffer_fault) {
42212+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
42213+
42214+ down_read(&mm->mmap_sem);
42215+ vma = mm->mmap;
42216+ while (vma && (!vma_exec || !vma_fault)) {
42217+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
42218+ vma_exec = vma;
42219+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
42220+ vma_fault = vma;
42221+ vma = vma->vm_next;
42222+ }
42223+ if (vma_exec) {
42224+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
42225+ if (IS_ERR(path_exec))
42226+ path_exec = "<path too long>";
42227+ else {
42228+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
42229+ if (path_exec) {
42230+ *path_exec = 0;
42231+ path_exec = buffer_exec;
42232+ } else
42233+ path_exec = "<path too long>";
42234+ }
42235+ }
42236+ if (vma_fault) {
42237+ start = vma_fault->vm_start;
42238+ end = vma_fault->vm_end;
42239+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
42240+ if (vma_fault->vm_file) {
42241+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
42242+ if (IS_ERR(path_fault))
42243+ path_fault = "<path too long>";
42244+ else {
42245+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
42246+ if (path_fault) {
42247+ *path_fault = 0;
42248+ path_fault = buffer_fault;
42249+ } else
42250+ path_fault = "<path too long>";
42251+ }
42252+ } else
42253+ path_fault = "<anonymous mapping>";
42254+ }
42255+ up_read(&mm->mmap_sem);
42256+ }
42257+ if (tsk->signal->curr_ip)
42258+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
42259+ else
42260+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
42261+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
42262+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
42263+ task_uid(tsk), task_euid(tsk), pc, sp);
42264+ free_page((unsigned long)buffer_exec);
42265+ free_page((unsigned long)buffer_fault);
42266+ pax_report_insns(regs, pc, sp);
42267+ do_coredump(SIGKILL, SIGKILL, regs);
42268+}
42269+#endif
42270+
42271+#ifdef CONFIG_PAX_REFCOUNT
42272+void pax_report_refcount_overflow(struct pt_regs *regs)
42273+{
42274+ if (current->signal->curr_ip)
42275+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
42276+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
42277+ else
42278+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
42279+ current->comm, task_pid_nr(current), current_uid(), current_euid());
42280+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
42281+ show_regs(regs);
42282+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
42283+}
42284+#endif
42285+
42286+#ifdef CONFIG_PAX_USERCOPY
42287+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
42288+int object_is_on_stack(const void *obj, unsigned long len)
42289+{
42290+ const void * const stack = task_stack_page(current);
42291+ const void * const stackend = stack + THREAD_SIZE;
42292+
42293+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
42294+ const void *frame = NULL;
42295+ const void *oldframe;
42296+#endif
42297+
42298+ if (obj + len < obj)
42299+ return -1;
42300+
42301+ if (obj + len <= stack || stackend <= obj)
42302+ return 0;
42303+
42304+ if (obj < stack || stackend < obj + len)
42305+ return -1;
42306+
42307+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
42308+ oldframe = __builtin_frame_address(1);
42309+ if (oldframe)
42310+ frame = __builtin_frame_address(2);
42311+ /*
42312+ low ----------------------------------------------> high
42313+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
42314+ ^----------------^
42315+ allow copies only within here
42316+ */
42317+ while (stack <= frame && frame < stackend) {
42318+ /* if obj + len extends past the last frame, this
42319+ check won't pass and the next frame will be 0,
42320+ causing us to bail out and correctly report
42321+ the copy as invalid
42322+ */
42323+ if (obj + len <= frame)
42324+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
42325+ oldframe = frame;
42326+ frame = *(const void * const *)frame;
42327+ }
42328+ return -1;
42329+#else
42330+ return 1;
42331+#endif
42332+}
42333+
42334+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
42335+{
42336+ if (current->signal->curr_ip)
42337+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
42338+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
42339+ else
42340+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
42341+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
42342+ dump_stack();
42343+ gr_handle_kernel_exploit();
42344+ do_group_exit(SIGKILL);
42345+}
42346+#endif
42347+
42348+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
42349+void pax_track_stack(void)
42350+{
42351+ unsigned long sp = (unsigned long)&sp;
42352+ if (sp < current_thread_info()->lowest_stack &&
42353+ sp > (unsigned long)task_stack_page(current))
42354+ current_thread_info()->lowest_stack = sp;
42355+}
42356+EXPORT_SYMBOL(pax_track_stack);
42357+#endif
42358+
42359 static int zap_process(struct task_struct *start, int exit_code)
42360 {
42361 struct task_struct *t;
42362@@ -2023,17 +2337,17 @@ static void wait_for_dump_helpers(struct file *file)
42363 pipe = file->f_path.dentry->d_inode->i_pipe;
42364
42365 pipe_lock(pipe);
42366- pipe->readers++;
42367- pipe->writers--;
42368+ atomic_inc(&pipe->readers);
42369+ atomic_dec(&pipe->writers);
42370
42371- while ((pipe->readers > 1) && (!signal_pending(current))) {
42372+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
42373 wake_up_interruptible_sync(&pipe->wait);
42374 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
42375 pipe_wait(pipe);
42376 }
42377
42378- pipe->readers--;
42379- pipe->writers++;
42380+ atomic_dec(&pipe->readers);
42381+ atomic_inc(&pipe->writers);
42382 pipe_unlock(pipe);
42383
42384 }
42385@@ -2094,7 +2408,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42386 int retval = 0;
42387 int flag = 0;
42388 int ispipe;
42389- static atomic_t core_dump_count = ATOMIC_INIT(0);
42390+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
42391 struct coredump_params cprm = {
42392 .signr = signr,
42393 .regs = regs,
42394@@ -2109,6 +2423,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42395
42396 audit_core_dumps(signr);
42397
42398+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
42399+ gr_handle_brute_attach(current, cprm.mm_flags);
42400+
42401 binfmt = mm->binfmt;
42402 if (!binfmt || !binfmt->core_dump)
42403 goto fail;
42404@@ -2176,7 +2493,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42405 }
42406 cprm.limit = RLIM_INFINITY;
42407
42408- dump_count = atomic_inc_return(&core_dump_count);
42409+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
42410 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
42411 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
42412 task_tgid_vnr(current), current->comm);
42413@@ -2203,6 +2520,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42414 } else {
42415 struct inode *inode;
42416
42417+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
42418+
42419 if (cprm.limit < binfmt->min_coredump)
42420 goto fail_unlock;
42421
42422@@ -2246,7 +2565,7 @@ close_fail:
42423 filp_close(cprm.file, NULL);
42424 fail_dropcount:
42425 if (ispipe)
42426- atomic_dec(&core_dump_count);
42427+ atomic_dec_unchecked(&core_dump_count);
42428 fail_unlock:
42429 kfree(cn.corename);
42430 fail_corename:
42431@@ -2265,7 +2584,7 @@ fail:
42432 */
42433 int dump_write(struct file *file, const void *addr, int nr)
42434 {
42435- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
42436+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
42437 }
42438 EXPORT_SYMBOL(dump_write);
42439
42440diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
42441index a8cbe1b..fed04cb 100644
42442--- a/fs/ext2/balloc.c
42443+++ b/fs/ext2/balloc.c
42444@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
42445
42446 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42447 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42448- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42449+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
42450 sbi->s_resuid != current_fsuid() &&
42451 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42452 return 0;
42453diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
42454index a203892..4e64db5 100644
42455--- a/fs/ext3/balloc.c
42456+++ b/fs/ext3/balloc.c
42457@@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
42458
42459 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42460 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42461- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42462+ if (free_blocks < root_blocks + 1 &&
42463 !use_reservation && sbi->s_resuid != current_fsuid() &&
42464- (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42465+ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
42466+ !capable_nolog(CAP_SYS_RESOURCE)) {
42467 return 0;
42468 }
42469 return 1;
42470diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
42471index 12ccacd..a6035fce0 100644
42472--- a/fs/ext4/balloc.c
42473+++ b/fs/ext4/balloc.c
42474@@ -436,8 +436,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
42475 /* Hm, nope. Are (enough) root reserved clusters available? */
42476 if (sbi->s_resuid == current_fsuid() ||
42477 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
42478- capable(CAP_SYS_RESOURCE) ||
42479- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
42480+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
42481+ capable_nolog(CAP_SYS_RESOURCE)) {
42482
42483 if (free_clusters >= (nclusters + dirty_clusters))
42484 return 1;
42485diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
42486index 5b0e26a..0aa002d 100644
42487--- a/fs/ext4/ext4.h
42488+++ b/fs/ext4/ext4.h
42489@@ -1208,19 +1208,19 @@ struct ext4_sb_info {
42490 unsigned long s_mb_last_start;
42491
42492 /* stats for buddy allocator */
42493- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
42494- atomic_t s_bal_success; /* we found long enough chunks */
42495- atomic_t s_bal_allocated; /* in blocks */
42496- atomic_t s_bal_ex_scanned; /* total extents scanned */
42497- atomic_t s_bal_goals; /* goal hits */
42498- atomic_t s_bal_breaks; /* too long searches */
42499- atomic_t s_bal_2orders; /* 2^order hits */
42500+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
42501+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
42502+ atomic_unchecked_t s_bal_allocated; /* in blocks */
42503+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
42504+ atomic_unchecked_t s_bal_goals; /* goal hits */
42505+ atomic_unchecked_t s_bal_breaks; /* too long searches */
42506+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
42507 spinlock_t s_bal_lock;
42508 unsigned long s_mb_buddies_generated;
42509 unsigned long long s_mb_generation_time;
42510- atomic_t s_mb_lost_chunks;
42511- atomic_t s_mb_preallocated;
42512- atomic_t s_mb_discarded;
42513+ atomic_unchecked_t s_mb_lost_chunks;
42514+ atomic_unchecked_t s_mb_preallocated;
42515+ atomic_unchecked_t s_mb_discarded;
42516 atomic_t s_lock_busy;
42517
42518 /* locality groups */
42519diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
42520index e2d8be8..c7f0ce9 100644
42521--- a/fs/ext4/mballoc.c
42522+++ b/fs/ext4/mballoc.c
42523@@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
42524 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
42525
42526 if (EXT4_SB(sb)->s_mb_stats)
42527- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
42528+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
42529
42530 break;
42531 }
42532@@ -2088,7 +2088,7 @@ repeat:
42533 ac->ac_status = AC_STATUS_CONTINUE;
42534 ac->ac_flags |= EXT4_MB_HINT_FIRST;
42535 cr = 3;
42536- atomic_inc(&sbi->s_mb_lost_chunks);
42537+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
42538 goto repeat;
42539 }
42540 }
42541@@ -2592,25 +2592,25 @@ int ext4_mb_release(struct super_block *sb)
42542 if (sbi->s_mb_stats) {
42543 ext4_msg(sb, KERN_INFO,
42544 "mballoc: %u blocks %u reqs (%u success)",
42545- atomic_read(&sbi->s_bal_allocated),
42546- atomic_read(&sbi->s_bal_reqs),
42547- atomic_read(&sbi->s_bal_success));
42548+ atomic_read_unchecked(&sbi->s_bal_allocated),
42549+ atomic_read_unchecked(&sbi->s_bal_reqs),
42550+ atomic_read_unchecked(&sbi->s_bal_success));
42551 ext4_msg(sb, KERN_INFO,
42552 "mballoc: %u extents scanned, %u goal hits, "
42553 "%u 2^N hits, %u breaks, %u lost",
42554- atomic_read(&sbi->s_bal_ex_scanned),
42555- atomic_read(&sbi->s_bal_goals),
42556- atomic_read(&sbi->s_bal_2orders),
42557- atomic_read(&sbi->s_bal_breaks),
42558- atomic_read(&sbi->s_mb_lost_chunks));
42559+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
42560+ atomic_read_unchecked(&sbi->s_bal_goals),
42561+ atomic_read_unchecked(&sbi->s_bal_2orders),
42562+ atomic_read_unchecked(&sbi->s_bal_breaks),
42563+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
42564 ext4_msg(sb, KERN_INFO,
42565 "mballoc: %lu generated and it took %Lu",
42566 sbi->s_mb_buddies_generated,
42567 sbi->s_mb_generation_time);
42568 ext4_msg(sb, KERN_INFO,
42569 "mballoc: %u preallocated, %u discarded",
42570- atomic_read(&sbi->s_mb_preallocated),
42571- atomic_read(&sbi->s_mb_discarded));
42572+ atomic_read_unchecked(&sbi->s_mb_preallocated),
42573+ atomic_read_unchecked(&sbi->s_mb_discarded));
42574 }
42575
42576 free_percpu(sbi->s_locality_groups);
42577@@ -3096,16 +3096,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
42578 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
42579
42580 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
42581- atomic_inc(&sbi->s_bal_reqs);
42582- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42583+ atomic_inc_unchecked(&sbi->s_bal_reqs);
42584+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42585 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
42586- atomic_inc(&sbi->s_bal_success);
42587- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
42588+ atomic_inc_unchecked(&sbi->s_bal_success);
42589+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
42590 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
42591 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
42592- atomic_inc(&sbi->s_bal_goals);
42593+ atomic_inc_unchecked(&sbi->s_bal_goals);
42594 if (ac->ac_found > sbi->s_mb_max_to_scan)
42595- atomic_inc(&sbi->s_bal_breaks);
42596+ atomic_inc_unchecked(&sbi->s_bal_breaks);
42597 }
42598
42599 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
42600@@ -3509,7 +3509,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
42601 trace_ext4_mb_new_inode_pa(ac, pa);
42602
42603 ext4_mb_use_inode_pa(ac, pa);
42604- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
42605+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
42606
42607 ei = EXT4_I(ac->ac_inode);
42608 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42609@@ -3569,7 +3569,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
42610 trace_ext4_mb_new_group_pa(ac, pa);
42611
42612 ext4_mb_use_group_pa(ac, pa);
42613- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42614+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42615
42616 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42617 lg = ac->ac_lg;
42618@@ -3658,7 +3658,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
42619 * from the bitmap and continue.
42620 */
42621 }
42622- atomic_add(free, &sbi->s_mb_discarded);
42623+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
42624
42625 return err;
42626 }
42627@@ -3676,7 +3676,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
42628 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
42629 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
42630 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
42631- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42632+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42633 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
42634
42635 return 0;
42636diff --git a/fs/fcntl.c b/fs/fcntl.c
42637index 22764c7..86372c9 100644
42638--- a/fs/fcntl.c
42639+++ b/fs/fcntl.c
42640@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
42641 if (err)
42642 return err;
42643
42644+ if (gr_handle_chroot_fowner(pid, type))
42645+ return -ENOENT;
42646+ if (gr_check_protected_task_fowner(pid, type))
42647+ return -EACCES;
42648+
42649 f_modown(filp, pid, type, force);
42650 return 0;
42651 }
42652@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
42653
42654 static int f_setown_ex(struct file *filp, unsigned long arg)
42655 {
42656- struct f_owner_ex * __user owner_p = (void * __user)arg;
42657+ struct f_owner_ex __user *owner_p = (void __user *)arg;
42658 struct f_owner_ex owner;
42659 struct pid *pid;
42660 int type;
42661@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
42662
42663 static int f_getown_ex(struct file *filp, unsigned long arg)
42664 {
42665- struct f_owner_ex * __user owner_p = (void * __user)arg;
42666+ struct f_owner_ex __user *owner_p = (void __user *)arg;
42667 struct f_owner_ex owner;
42668 int ret = 0;
42669
42670@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
42671 switch (cmd) {
42672 case F_DUPFD:
42673 case F_DUPFD_CLOEXEC:
42674+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
42675 if (arg >= rlimit(RLIMIT_NOFILE))
42676 break;
42677 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
42678diff --git a/fs/fifo.c b/fs/fifo.c
42679index b1a524d..4ee270e 100644
42680--- a/fs/fifo.c
42681+++ b/fs/fifo.c
42682@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
42683 */
42684 filp->f_op = &read_pipefifo_fops;
42685 pipe->r_counter++;
42686- if (pipe->readers++ == 0)
42687+ if (atomic_inc_return(&pipe->readers) == 1)
42688 wake_up_partner(inode);
42689
42690- if (!pipe->writers) {
42691+ if (!atomic_read(&pipe->writers)) {
42692 if ((filp->f_flags & O_NONBLOCK)) {
42693 /* suppress POLLHUP until we have
42694 * seen a writer */
42695@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
42696 * errno=ENXIO when there is no process reading the FIFO.
42697 */
42698 ret = -ENXIO;
42699- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
42700+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
42701 goto err;
42702
42703 filp->f_op = &write_pipefifo_fops;
42704 pipe->w_counter++;
42705- if (!pipe->writers++)
42706+ if (atomic_inc_return(&pipe->writers) == 1)
42707 wake_up_partner(inode);
42708
42709- if (!pipe->readers) {
42710+ if (!atomic_read(&pipe->readers)) {
42711 wait_for_partner(inode, &pipe->r_counter);
42712 if (signal_pending(current))
42713 goto err_wr;
42714@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
42715 */
42716 filp->f_op = &rdwr_pipefifo_fops;
42717
42718- pipe->readers++;
42719- pipe->writers++;
42720+ atomic_inc(&pipe->readers);
42721+ atomic_inc(&pipe->writers);
42722 pipe->r_counter++;
42723 pipe->w_counter++;
42724- if (pipe->readers == 1 || pipe->writers == 1)
42725+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
42726 wake_up_partner(inode);
42727 break;
42728
42729@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
42730 return 0;
42731
42732 err_rd:
42733- if (!--pipe->readers)
42734+ if (atomic_dec_and_test(&pipe->readers))
42735 wake_up_interruptible(&pipe->wait);
42736 ret = -ERESTARTSYS;
42737 goto err;
42738
42739 err_wr:
42740- if (!--pipe->writers)
42741+ if (atomic_dec_and_test(&pipe->writers))
42742 wake_up_interruptible(&pipe->wait);
42743 ret = -ERESTARTSYS;
42744 goto err;
42745
42746 err:
42747- if (!pipe->readers && !pipe->writers)
42748+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
42749 free_pipe_info(inode);
42750
42751 err_nocleanup:
42752diff --git a/fs/file.c b/fs/file.c
42753index 4c6992d..104cdea 100644
42754--- a/fs/file.c
42755+++ b/fs/file.c
42756@@ -15,6 +15,7 @@
42757 #include <linux/slab.h>
42758 #include <linux/vmalloc.h>
42759 #include <linux/file.h>
42760+#include <linux/security.h>
42761 #include <linux/fdtable.h>
42762 #include <linux/bitops.h>
42763 #include <linux/interrupt.h>
42764@@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
42765 * N.B. For clone tasks sharing a files structure, this test
42766 * will limit the total number of files that can be opened.
42767 */
42768+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
42769 if (nr >= rlimit(RLIMIT_NOFILE))
42770 return -EMFILE;
42771
42772diff --git a/fs/filesystems.c b/fs/filesystems.c
42773index 0845f84..7b4ebef 100644
42774--- a/fs/filesystems.c
42775+++ b/fs/filesystems.c
42776@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(const char *name)
42777 int len = dot ? dot - name : strlen(name);
42778
42779 fs = __get_fs_type(name, len);
42780+
42781+#ifdef CONFIG_GRKERNSEC_MODHARDEN
42782+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
42783+#else
42784 if (!fs && (request_module("%.*s", len, name) == 0))
42785+#endif
42786 fs = __get_fs_type(name, len);
42787
42788 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
42789diff --git a/fs/fs_struct.c b/fs/fs_struct.c
42790index 78b519c..a8b4979 100644
42791--- a/fs/fs_struct.c
42792+++ b/fs/fs_struct.c
42793@@ -4,6 +4,7 @@
42794 #include <linux/path.h>
42795 #include <linux/slab.h>
42796 #include <linux/fs_struct.h>
42797+#include <linux/grsecurity.h>
42798 #include "internal.h"
42799
42800 static inline void path_get_longterm(struct path *path)
42801@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
42802 old_root = fs->root;
42803 fs->root = *path;
42804 path_get_longterm(path);
42805+ gr_set_chroot_entries(current, path);
42806 write_seqcount_end(&fs->seq);
42807 spin_unlock(&fs->lock);
42808 if (old_root.dentry)
42809@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
42810 && fs->root.mnt == old_root->mnt) {
42811 path_get_longterm(new_root);
42812 fs->root = *new_root;
42813+ gr_set_chroot_entries(p, new_root);
42814 count++;
42815 }
42816 if (fs->pwd.dentry == old_root->dentry
42817@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
42818 spin_lock(&fs->lock);
42819 write_seqcount_begin(&fs->seq);
42820 tsk->fs = NULL;
42821- kill = !--fs->users;
42822+ gr_clear_chroot_entries(tsk);
42823+ kill = !atomic_dec_return(&fs->users);
42824 write_seqcount_end(&fs->seq);
42825 spin_unlock(&fs->lock);
42826 task_unlock(tsk);
42827@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42828 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
42829 /* We don't need to lock fs - think why ;-) */
42830 if (fs) {
42831- fs->users = 1;
42832+ atomic_set(&fs->users, 1);
42833 fs->in_exec = 0;
42834 spin_lock_init(&fs->lock);
42835 seqcount_init(&fs->seq);
42836@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42837 spin_lock(&old->lock);
42838 fs->root = old->root;
42839 path_get_longterm(&fs->root);
42840+ /* instead of calling gr_set_chroot_entries here,
42841+ we call it from every caller of this function
42842+ */
42843 fs->pwd = old->pwd;
42844 path_get_longterm(&fs->pwd);
42845 spin_unlock(&old->lock);
42846@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
42847
42848 task_lock(current);
42849 spin_lock(&fs->lock);
42850- kill = !--fs->users;
42851+ kill = !atomic_dec_return(&fs->users);
42852 current->fs = new_fs;
42853+ gr_set_chroot_entries(current, &new_fs->root);
42854 spin_unlock(&fs->lock);
42855 task_unlock(current);
42856
42857@@ -164,13 +172,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
42858
42859 int current_umask(void)
42860 {
42861- return current->fs->umask;
42862+ return current->fs->umask | gr_acl_umask();
42863 }
42864 EXPORT_SYMBOL(current_umask);
42865
42866 /* to be mentioned only in INIT_TASK */
42867 struct fs_struct init_fs = {
42868- .users = 1,
42869+ .users = ATOMIC_INIT(1),
42870 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
42871 .seq = SEQCNT_ZERO,
42872 .umask = 0022,
42873@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
42874 task_lock(current);
42875
42876 spin_lock(&init_fs.lock);
42877- init_fs.users++;
42878+ atomic_inc(&init_fs.users);
42879 spin_unlock(&init_fs.lock);
42880
42881 spin_lock(&fs->lock);
42882 current->fs = &init_fs;
42883- kill = !--fs->users;
42884+ gr_set_chroot_entries(current, &current->fs->root);
42885+ kill = !atomic_dec_return(&fs->users);
42886 spin_unlock(&fs->lock);
42887
42888 task_unlock(current);
42889diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
42890index 9905350..02eaec4 100644
42891--- a/fs/fscache/cookie.c
42892+++ b/fs/fscache/cookie.c
42893@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
42894 parent ? (char *) parent->def->name : "<no-parent>",
42895 def->name, netfs_data);
42896
42897- fscache_stat(&fscache_n_acquires);
42898+ fscache_stat_unchecked(&fscache_n_acquires);
42899
42900 /* if there's no parent cookie, then we don't create one here either */
42901 if (!parent) {
42902- fscache_stat(&fscache_n_acquires_null);
42903+ fscache_stat_unchecked(&fscache_n_acquires_null);
42904 _leave(" [no parent]");
42905 return NULL;
42906 }
42907@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
42908 /* allocate and initialise a cookie */
42909 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
42910 if (!cookie) {
42911- fscache_stat(&fscache_n_acquires_oom);
42912+ fscache_stat_unchecked(&fscache_n_acquires_oom);
42913 _leave(" [ENOMEM]");
42914 return NULL;
42915 }
42916@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42917
42918 switch (cookie->def->type) {
42919 case FSCACHE_COOKIE_TYPE_INDEX:
42920- fscache_stat(&fscache_n_cookie_index);
42921+ fscache_stat_unchecked(&fscache_n_cookie_index);
42922 break;
42923 case FSCACHE_COOKIE_TYPE_DATAFILE:
42924- fscache_stat(&fscache_n_cookie_data);
42925+ fscache_stat_unchecked(&fscache_n_cookie_data);
42926 break;
42927 default:
42928- fscache_stat(&fscache_n_cookie_special);
42929+ fscache_stat_unchecked(&fscache_n_cookie_special);
42930 break;
42931 }
42932
42933@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42934 if (fscache_acquire_non_index_cookie(cookie) < 0) {
42935 atomic_dec(&parent->n_children);
42936 __fscache_cookie_put(cookie);
42937- fscache_stat(&fscache_n_acquires_nobufs);
42938+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
42939 _leave(" = NULL");
42940 return NULL;
42941 }
42942 }
42943
42944- fscache_stat(&fscache_n_acquires_ok);
42945+ fscache_stat_unchecked(&fscache_n_acquires_ok);
42946 _leave(" = %p", cookie);
42947 return cookie;
42948 }
42949@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
42950 cache = fscache_select_cache_for_object(cookie->parent);
42951 if (!cache) {
42952 up_read(&fscache_addremove_sem);
42953- fscache_stat(&fscache_n_acquires_no_cache);
42954+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
42955 _leave(" = -ENOMEDIUM [no cache]");
42956 return -ENOMEDIUM;
42957 }
42958@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
42959 object = cache->ops->alloc_object(cache, cookie);
42960 fscache_stat_d(&fscache_n_cop_alloc_object);
42961 if (IS_ERR(object)) {
42962- fscache_stat(&fscache_n_object_no_alloc);
42963+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
42964 ret = PTR_ERR(object);
42965 goto error;
42966 }
42967
42968- fscache_stat(&fscache_n_object_alloc);
42969+ fscache_stat_unchecked(&fscache_n_object_alloc);
42970
42971 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
42972
42973@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
42974 struct fscache_object *object;
42975 struct hlist_node *_p;
42976
42977- fscache_stat(&fscache_n_updates);
42978+ fscache_stat_unchecked(&fscache_n_updates);
42979
42980 if (!cookie) {
42981- fscache_stat(&fscache_n_updates_null);
42982+ fscache_stat_unchecked(&fscache_n_updates_null);
42983 _leave(" [no cookie]");
42984 return;
42985 }
42986@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
42987 struct fscache_object *object;
42988 unsigned long event;
42989
42990- fscache_stat(&fscache_n_relinquishes);
42991+ fscache_stat_unchecked(&fscache_n_relinquishes);
42992 if (retire)
42993- fscache_stat(&fscache_n_relinquishes_retire);
42994+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
42995
42996 if (!cookie) {
42997- fscache_stat(&fscache_n_relinquishes_null);
42998+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
42999 _leave(" [no cookie]");
43000 return;
43001 }
43002@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
43003
43004 /* wait for the cookie to finish being instantiated (or to fail) */
43005 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
43006- fscache_stat(&fscache_n_relinquishes_waitcrt);
43007+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
43008 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
43009 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
43010 }
43011diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
43012index f6aad48..88dcf26 100644
43013--- a/fs/fscache/internal.h
43014+++ b/fs/fscache/internal.h
43015@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
43016 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
43017 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
43018
43019-extern atomic_t fscache_n_op_pend;
43020-extern atomic_t fscache_n_op_run;
43021-extern atomic_t fscache_n_op_enqueue;
43022-extern atomic_t fscache_n_op_deferred_release;
43023-extern atomic_t fscache_n_op_release;
43024-extern atomic_t fscache_n_op_gc;
43025-extern atomic_t fscache_n_op_cancelled;
43026-extern atomic_t fscache_n_op_rejected;
43027+extern atomic_unchecked_t fscache_n_op_pend;
43028+extern atomic_unchecked_t fscache_n_op_run;
43029+extern atomic_unchecked_t fscache_n_op_enqueue;
43030+extern atomic_unchecked_t fscache_n_op_deferred_release;
43031+extern atomic_unchecked_t fscache_n_op_release;
43032+extern atomic_unchecked_t fscache_n_op_gc;
43033+extern atomic_unchecked_t fscache_n_op_cancelled;
43034+extern atomic_unchecked_t fscache_n_op_rejected;
43035
43036-extern atomic_t fscache_n_attr_changed;
43037-extern atomic_t fscache_n_attr_changed_ok;
43038-extern atomic_t fscache_n_attr_changed_nobufs;
43039-extern atomic_t fscache_n_attr_changed_nomem;
43040-extern atomic_t fscache_n_attr_changed_calls;
43041+extern atomic_unchecked_t fscache_n_attr_changed;
43042+extern atomic_unchecked_t fscache_n_attr_changed_ok;
43043+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
43044+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
43045+extern atomic_unchecked_t fscache_n_attr_changed_calls;
43046
43047-extern atomic_t fscache_n_allocs;
43048-extern atomic_t fscache_n_allocs_ok;
43049-extern atomic_t fscache_n_allocs_wait;
43050-extern atomic_t fscache_n_allocs_nobufs;
43051-extern atomic_t fscache_n_allocs_intr;
43052-extern atomic_t fscache_n_allocs_object_dead;
43053-extern atomic_t fscache_n_alloc_ops;
43054-extern atomic_t fscache_n_alloc_op_waits;
43055+extern atomic_unchecked_t fscache_n_allocs;
43056+extern atomic_unchecked_t fscache_n_allocs_ok;
43057+extern atomic_unchecked_t fscache_n_allocs_wait;
43058+extern atomic_unchecked_t fscache_n_allocs_nobufs;
43059+extern atomic_unchecked_t fscache_n_allocs_intr;
43060+extern atomic_unchecked_t fscache_n_allocs_object_dead;
43061+extern atomic_unchecked_t fscache_n_alloc_ops;
43062+extern atomic_unchecked_t fscache_n_alloc_op_waits;
43063
43064-extern atomic_t fscache_n_retrievals;
43065-extern atomic_t fscache_n_retrievals_ok;
43066-extern atomic_t fscache_n_retrievals_wait;
43067-extern atomic_t fscache_n_retrievals_nodata;
43068-extern atomic_t fscache_n_retrievals_nobufs;
43069-extern atomic_t fscache_n_retrievals_intr;
43070-extern atomic_t fscache_n_retrievals_nomem;
43071-extern atomic_t fscache_n_retrievals_object_dead;
43072-extern atomic_t fscache_n_retrieval_ops;
43073-extern atomic_t fscache_n_retrieval_op_waits;
43074+extern atomic_unchecked_t fscache_n_retrievals;
43075+extern atomic_unchecked_t fscache_n_retrievals_ok;
43076+extern atomic_unchecked_t fscache_n_retrievals_wait;
43077+extern atomic_unchecked_t fscache_n_retrievals_nodata;
43078+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
43079+extern atomic_unchecked_t fscache_n_retrievals_intr;
43080+extern atomic_unchecked_t fscache_n_retrievals_nomem;
43081+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
43082+extern atomic_unchecked_t fscache_n_retrieval_ops;
43083+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
43084
43085-extern atomic_t fscache_n_stores;
43086-extern atomic_t fscache_n_stores_ok;
43087-extern atomic_t fscache_n_stores_again;
43088-extern atomic_t fscache_n_stores_nobufs;
43089-extern atomic_t fscache_n_stores_oom;
43090-extern atomic_t fscache_n_store_ops;
43091-extern atomic_t fscache_n_store_calls;
43092-extern atomic_t fscache_n_store_pages;
43093-extern atomic_t fscache_n_store_radix_deletes;
43094-extern atomic_t fscache_n_store_pages_over_limit;
43095+extern atomic_unchecked_t fscache_n_stores;
43096+extern atomic_unchecked_t fscache_n_stores_ok;
43097+extern atomic_unchecked_t fscache_n_stores_again;
43098+extern atomic_unchecked_t fscache_n_stores_nobufs;
43099+extern atomic_unchecked_t fscache_n_stores_oom;
43100+extern atomic_unchecked_t fscache_n_store_ops;
43101+extern atomic_unchecked_t fscache_n_store_calls;
43102+extern atomic_unchecked_t fscache_n_store_pages;
43103+extern atomic_unchecked_t fscache_n_store_radix_deletes;
43104+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
43105
43106-extern atomic_t fscache_n_store_vmscan_not_storing;
43107-extern atomic_t fscache_n_store_vmscan_gone;
43108-extern atomic_t fscache_n_store_vmscan_busy;
43109-extern atomic_t fscache_n_store_vmscan_cancelled;
43110+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43111+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
43112+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
43113+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43114
43115-extern atomic_t fscache_n_marks;
43116-extern atomic_t fscache_n_uncaches;
43117+extern atomic_unchecked_t fscache_n_marks;
43118+extern atomic_unchecked_t fscache_n_uncaches;
43119
43120-extern atomic_t fscache_n_acquires;
43121-extern atomic_t fscache_n_acquires_null;
43122-extern atomic_t fscache_n_acquires_no_cache;
43123-extern atomic_t fscache_n_acquires_ok;
43124-extern atomic_t fscache_n_acquires_nobufs;
43125-extern atomic_t fscache_n_acquires_oom;
43126+extern atomic_unchecked_t fscache_n_acquires;
43127+extern atomic_unchecked_t fscache_n_acquires_null;
43128+extern atomic_unchecked_t fscache_n_acquires_no_cache;
43129+extern atomic_unchecked_t fscache_n_acquires_ok;
43130+extern atomic_unchecked_t fscache_n_acquires_nobufs;
43131+extern atomic_unchecked_t fscache_n_acquires_oom;
43132
43133-extern atomic_t fscache_n_updates;
43134-extern atomic_t fscache_n_updates_null;
43135-extern atomic_t fscache_n_updates_run;
43136+extern atomic_unchecked_t fscache_n_updates;
43137+extern atomic_unchecked_t fscache_n_updates_null;
43138+extern atomic_unchecked_t fscache_n_updates_run;
43139
43140-extern atomic_t fscache_n_relinquishes;
43141-extern atomic_t fscache_n_relinquishes_null;
43142-extern atomic_t fscache_n_relinquishes_waitcrt;
43143-extern atomic_t fscache_n_relinquishes_retire;
43144+extern atomic_unchecked_t fscache_n_relinquishes;
43145+extern atomic_unchecked_t fscache_n_relinquishes_null;
43146+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43147+extern atomic_unchecked_t fscache_n_relinquishes_retire;
43148
43149-extern atomic_t fscache_n_cookie_index;
43150-extern atomic_t fscache_n_cookie_data;
43151-extern atomic_t fscache_n_cookie_special;
43152+extern atomic_unchecked_t fscache_n_cookie_index;
43153+extern atomic_unchecked_t fscache_n_cookie_data;
43154+extern atomic_unchecked_t fscache_n_cookie_special;
43155
43156-extern atomic_t fscache_n_object_alloc;
43157-extern atomic_t fscache_n_object_no_alloc;
43158-extern atomic_t fscache_n_object_lookups;
43159-extern atomic_t fscache_n_object_lookups_negative;
43160-extern atomic_t fscache_n_object_lookups_positive;
43161-extern atomic_t fscache_n_object_lookups_timed_out;
43162-extern atomic_t fscache_n_object_created;
43163-extern atomic_t fscache_n_object_avail;
43164-extern atomic_t fscache_n_object_dead;
43165+extern atomic_unchecked_t fscache_n_object_alloc;
43166+extern atomic_unchecked_t fscache_n_object_no_alloc;
43167+extern atomic_unchecked_t fscache_n_object_lookups;
43168+extern atomic_unchecked_t fscache_n_object_lookups_negative;
43169+extern atomic_unchecked_t fscache_n_object_lookups_positive;
43170+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
43171+extern atomic_unchecked_t fscache_n_object_created;
43172+extern atomic_unchecked_t fscache_n_object_avail;
43173+extern atomic_unchecked_t fscache_n_object_dead;
43174
43175-extern atomic_t fscache_n_checkaux_none;
43176-extern atomic_t fscache_n_checkaux_okay;
43177-extern atomic_t fscache_n_checkaux_update;
43178-extern atomic_t fscache_n_checkaux_obsolete;
43179+extern atomic_unchecked_t fscache_n_checkaux_none;
43180+extern atomic_unchecked_t fscache_n_checkaux_okay;
43181+extern atomic_unchecked_t fscache_n_checkaux_update;
43182+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
43183
43184 extern atomic_t fscache_n_cop_alloc_object;
43185 extern atomic_t fscache_n_cop_lookup_object;
43186@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
43187 atomic_inc(stat);
43188 }
43189
43190+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
43191+{
43192+ atomic_inc_unchecked(stat);
43193+}
43194+
43195 static inline void fscache_stat_d(atomic_t *stat)
43196 {
43197 atomic_dec(stat);
43198@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
43199
43200 #define __fscache_stat(stat) (NULL)
43201 #define fscache_stat(stat) do {} while (0)
43202+#define fscache_stat_unchecked(stat) do {} while (0)
43203 #define fscache_stat_d(stat) do {} while (0)
43204 #endif
43205
43206diff --git a/fs/fscache/object.c b/fs/fscache/object.c
43207index b6b897c..0ffff9c 100644
43208--- a/fs/fscache/object.c
43209+++ b/fs/fscache/object.c
43210@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
43211 /* update the object metadata on disk */
43212 case FSCACHE_OBJECT_UPDATING:
43213 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
43214- fscache_stat(&fscache_n_updates_run);
43215+ fscache_stat_unchecked(&fscache_n_updates_run);
43216 fscache_stat(&fscache_n_cop_update_object);
43217 object->cache->ops->update_object(object);
43218 fscache_stat_d(&fscache_n_cop_update_object);
43219@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
43220 spin_lock(&object->lock);
43221 object->state = FSCACHE_OBJECT_DEAD;
43222 spin_unlock(&object->lock);
43223- fscache_stat(&fscache_n_object_dead);
43224+ fscache_stat_unchecked(&fscache_n_object_dead);
43225 goto terminal_transit;
43226
43227 /* handle the parent cache of this object being withdrawn from
43228@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
43229 spin_lock(&object->lock);
43230 object->state = FSCACHE_OBJECT_DEAD;
43231 spin_unlock(&object->lock);
43232- fscache_stat(&fscache_n_object_dead);
43233+ fscache_stat_unchecked(&fscache_n_object_dead);
43234 goto terminal_transit;
43235
43236 /* complain about the object being woken up once it is
43237@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
43238 parent->cookie->def->name, cookie->def->name,
43239 object->cache->tag->name);
43240
43241- fscache_stat(&fscache_n_object_lookups);
43242+ fscache_stat_unchecked(&fscache_n_object_lookups);
43243 fscache_stat(&fscache_n_cop_lookup_object);
43244 ret = object->cache->ops->lookup_object(object);
43245 fscache_stat_d(&fscache_n_cop_lookup_object);
43246@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
43247 if (ret == -ETIMEDOUT) {
43248 /* probably stuck behind another object, so move this one to
43249 * the back of the queue */
43250- fscache_stat(&fscache_n_object_lookups_timed_out);
43251+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
43252 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
43253 }
43254
43255@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
43256
43257 spin_lock(&object->lock);
43258 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
43259- fscache_stat(&fscache_n_object_lookups_negative);
43260+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
43261
43262 /* transit here to allow write requests to begin stacking up
43263 * and read requests to begin returning ENODATA */
43264@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
43265 * result, in which case there may be data available */
43266 spin_lock(&object->lock);
43267 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
43268- fscache_stat(&fscache_n_object_lookups_positive);
43269+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
43270
43271 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
43272
43273@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
43274 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
43275 } else {
43276 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
43277- fscache_stat(&fscache_n_object_created);
43278+ fscache_stat_unchecked(&fscache_n_object_created);
43279
43280 object->state = FSCACHE_OBJECT_AVAILABLE;
43281 spin_unlock(&object->lock);
43282@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
43283 fscache_enqueue_dependents(object);
43284
43285 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
43286- fscache_stat(&fscache_n_object_avail);
43287+ fscache_stat_unchecked(&fscache_n_object_avail);
43288
43289 _leave("");
43290 }
43291@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
43292 enum fscache_checkaux result;
43293
43294 if (!object->cookie->def->check_aux) {
43295- fscache_stat(&fscache_n_checkaux_none);
43296+ fscache_stat_unchecked(&fscache_n_checkaux_none);
43297 return FSCACHE_CHECKAUX_OKAY;
43298 }
43299
43300@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
43301 switch (result) {
43302 /* entry okay as is */
43303 case FSCACHE_CHECKAUX_OKAY:
43304- fscache_stat(&fscache_n_checkaux_okay);
43305+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
43306 break;
43307
43308 /* entry requires update */
43309 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
43310- fscache_stat(&fscache_n_checkaux_update);
43311+ fscache_stat_unchecked(&fscache_n_checkaux_update);
43312 break;
43313
43314 /* entry requires deletion */
43315 case FSCACHE_CHECKAUX_OBSOLETE:
43316- fscache_stat(&fscache_n_checkaux_obsolete);
43317+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
43318 break;
43319
43320 default:
43321diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
43322index 30afdfa..2256596 100644
43323--- a/fs/fscache/operation.c
43324+++ b/fs/fscache/operation.c
43325@@ -17,7 +17,7 @@
43326 #include <linux/slab.h>
43327 #include "internal.h"
43328
43329-atomic_t fscache_op_debug_id;
43330+atomic_unchecked_t fscache_op_debug_id;
43331 EXPORT_SYMBOL(fscache_op_debug_id);
43332
43333 /**
43334@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
43335 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
43336 ASSERTCMP(atomic_read(&op->usage), >, 0);
43337
43338- fscache_stat(&fscache_n_op_enqueue);
43339+ fscache_stat_unchecked(&fscache_n_op_enqueue);
43340 switch (op->flags & FSCACHE_OP_TYPE) {
43341 case FSCACHE_OP_ASYNC:
43342 _debug("queue async");
43343@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
43344 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
43345 if (op->processor)
43346 fscache_enqueue_operation(op);
43347- fscache_stat(&fscache_n_op_run);
43348+ fscache_stat_unchecked(&fscache_n_op_run);
43349 }
43350
43351 /*
43352@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
43353 if (object->n_ops > 1) {
43354 atomic_inc(&op->usage);
43355 list_add_tail(&op->pend_link, &object->pending_ops);
43356- fscache_stat(&fscache_n_op_pend);
43357+ fscache_stat_unchecked(&fscache_n_op_pend);
43358 } else if (!list_empty(&object->pending_ops)) {
43359 atomic_inc(&op->usage);
43360 list_add_tail(&op->pend_link, &object->pending_ops);
43361- fscache_stat(&fscache_n_op_pend);
43362+ fscache_stat_unchecked(&fscache_n_op_pend);
43363 fscache_start_operations(object);
43364 } else {
43365 ASSERTCMP(object->n_in_progress, ==, 0);
43366@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
43367 object->n_exclusive++; /* reads and writes must wait */
43368 atomic_inc(&op->usage);
43369 list_add_tail(&op->pend_link, &object->pending_ops);
43370- fscache_stat(&fscache_n_op_pend);
43371+ fscache_stat_unchecked(&fscache_n_op_pend);
43372 ret = 0;
43373 } else {
43374 /* not allowed to submit ops in any other state */
43375@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
43376 if (object->n_exclusive > 0) {
43377 atomic_inc(&op->usage);
43378 list_add_tail(&op->pend_link, &object->pending_ops);
43379- fscache_stat(&fscache_n_op_pend);
43380+ fscache_stat_unchecked(&fscache_n_op_pend);
43381 } else if (!list_empty(&object->pending_ops)) {
43382 atomic_inc(&op->usage);
43383 list_add_tail(&op->pend_link, &object->pending_ops);
43384- fscache_stat(&fscache_n_op_pend);
43385+ fscache_stat_unchecked(&fscache_n_op_pend);
43386 fscache_start_operations(object);
43387 } else {
43388 ASSERTCMP(object->n_exclusive, ==, 0);
43389@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
43390 object->n_ops++;
43391 atomic_inc(&op->usage);
43392 list_add_tail(&op->pend_link, &object->pending_ops);
43393- fscache_stat(&fscache_n_op_pend);
43394+ fscache_stat_unchecked(&fscache_n_op_pend);
43395 ret = 0;
43396 } else if (object->state == FSCACHE_OBJECT_DYING ||
43397 object->state == FSCACHE_OBJECT_LC_DYING ||
43398 object->state == FSCACHE_OBJECT_WITHDRAWING) {
43399- fscache_stat(&fscache_n_op_rejected);
43400+ fscache_stat_unchecked(&fscache_n_op_rejected);
43401 ret = -ENOBUFS;
43402 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
43403 fscache_report_unexpected_submission(object, op, ostate);
43404@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
43405
43406 ret = -EBUSY;
43407 if (!list_empty(&op->pend_link)) {
43408- fscache_stat(&fscache_n_op_cancelled);
43409+ fscache_stat_unchecked(&fscache_n_op_cancelled);
43410 list_del_init(&op->pend_link);
43411 object->n_ops--;
43412 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
43413@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
43414 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
43415 BUG();
43416
43417- fscache_stat(&fscache_n_op_release);
43418+ fscache_stat_unchecked(&fscache_n_op_release);
43419
43420 if (op->release) {
43421 op->release(op);
43422@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
43423 * lock, and defer it otherwise */
43424 if (!spin_trylock(&object->lock)) {
43425 _debug("defer put");
43426- fscache_stat(&fscache_n_op_deferred_release);
43427+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
43428
43429 cache = object->cache;
43430 spin_lock(&cache->op_gc_list_lock);
43431@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
43432
43433 _debug("GC DEFERRED REL OBJ%x OP%x",
43434 object->debug_id, op->debug_id);
43435- fscache_stat(&fscache_n_op_gc);
43436+ fscache_stat_unchecked(&fscache_n_op_gc);
43437
43438 ASSERTCMP(atomic_read(&op->usage), ==, 0);
43439
43440diff --git a/fs/fscache/page.c b/fs/fscache/page.c
43441index 3f7a59b..cf196cc 100644
43442--- a/fs/fscache/page.c
43443+++ b/fs/fscache/page.c
43444@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43445 val = radix_tree_lookup(&cookie->stores, page->index);
43446 if (!val) {
43447 rcu_read_unlock();
43448- fscache_stat(&fscache_n_store_vmscan_not_storing);
43449+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
43450 __fscache_uncache_page(cookie, page);
43451 return true;
43452 }
43453@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43454 spin_unlock(&cookie->stores_lock);
43455
43456 if (xpage) {
43457- fscache_stat(&fscache_n_store_vmscan_cancelled);
43458- fscache_stat(&fscache_n_store_radix_deletes);
43459+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
43460+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43461 ASSERTCMP(xpage, ==, page);
43462 } else {
43463- fscache_stat(&fscache_n_store_vmscan_gone);
43464+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
43465 }
43466
43467 wake_up_bit(&cookie->flags, 0);
43468@@ -107,7 +107,7 @@ page_busy:
43469 /* we might want to wait here, but that could deadlock the allocator as
43470 * the work threads writing to the cache may all end up sleeping
43471 * on memory allocation */
43472- fscache_stat(&fscache_n_store_vmscan_busy);
43473+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
43474 return false;
43475 }
43476 EXPORT_SYMBOL(__fscache_maybe_release_page);
43477@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
43478 FSCACHE_COOKIE_STORING_TAG);
43479 if (!radix_tree_tag_get(&cookie->stores, page->index,
43480 FSCACHE_COOKIE_PENDING_TAG)) {
43481- fscache_stat(&fscache_n_store_radix_deletes);
43482+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43483 xpage = radix_tree_delete(&cookie->stores, page->index);
43484 }
43485 spin_unlock(&cookie->stores_lock);
43486@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
43487
43488 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
43489
43490- fscache_stat(&fscache_n_attr_changed_calls);
43491+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
43492
43493 if (fscache_object_is_active(object)) {
43494 fscache_stat(&fscache_n_cop_attr_changed);
43495@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43496
43497 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43498
43499- fscache_stat(&fscache_n_attr_changed);
43500+ fscache_stat_unchecked(&fscache_n_attr_changed);
43501
43502 op = kzalloc(sizeof(*op), GFP_KERNEL);
43503 if (!op) {
43504- fscache_stat(&fscache_n_attr_changed_nomem);
43505+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
43506 _leave(" = -ENOMEM");
43507 return -ENOMEM;
43508 }
43509@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43510 if (fscache_submit_exclusive_op(object, op) < 0)
43511 goto nobufs;
43512 spin_unlock(&cookie->lock);
43513- fscache_stat(&fscache_n_attr_changed_ok);
43514+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
43515 fscache_put_operation(op);
43516 _leave(" = 0");
43517 return 0;
43518@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43519 nobufs:
43520 spin_unlock(&cookie->lock);
43521 kfree(op);
43522- fscache_stat(&fscache_n_attr_changed_nobufs);
43523+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
43524 _leave(" = %d", -ENOBUFS);
43525 return -ENOBUFS;
43526 }
43527@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
43528 /* allocate a retrieval operation and attempt to submit it */
43529 op = kzalloc(sizeof(*op), GFP_NOIO);
43530 if (!op) {
43531- fscache_stat(&fscache_n_retrievals_nomem);
43532+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43533 return NULL;
43534 }
43535
43536@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43537 return 0;
43538 }
43539
43540- fscache_stat(&fscache_n_retrievals_wait);
43541+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
43542
43543 jif = jiffies;
43544 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
43545 fscache_wait_bit_interruptible,
43546 TASK_INTERRUPTIBLE) != 0) {
43547- fscache_stat(&fscache_n_retrievals_intr);
43548+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43549 _leave(" = -ERESTARTSYS");
43550 return -ERESTARTSYS;
43551 }
43552@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43553 */
43554 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43555 struct fscache_retrieval *op,
43556- atomic_t *stat_op_waits,
43557- atomic_t *stat_object_dead)
43558+ atomic_unchecked_t *stat_op_waits,
43559+ atomic_unchecked_t *stat_object_dead)
43560 {
43561 int ret;
43562
43563@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43564 goto check_if_dead;
43565
43566 _debug(">>> WT");
43567- fscache_stat(stat_op_waits);
43568+ fscache_stat_unchecked(stat_op_waits);
43569 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
43570 fscache_wait_bit_interruptible,
43571 TASK_INTERRUPTIBLE) < 0) {
43572@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43573
43574 check_if_dead:
43575 if (unlikely(fscache_object_is_dead(object))) {
43576- fscache_stat(stat_object_dead);
43577+ fscache_stat_unchecked(stat_object_dead);
43578 return -ENOBUFS;
43579 }
43580 return 0;
43581@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43582
43583 _enter("%p,%p,,,", cookie, page);
43584
43585- fscache_stat(&fscache_n_retrievals);
43586+ fscache_stat_unchecked(&fscache_n_retrievals);
43587
43588 if (hlist_empty(&cookie->backing_objects))
43589 goto nobufs;
43590@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43591 goto nobufs_unlock;
43592 spin_unlock(&cookie->lock);
43593
43594- fscache_stat(&fscache_n_retrieval_ops);
43595+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
43596
43597 /* pin the netfs read context in case we need to do the actual netfs
43598 * read because we've encountered a cache read failure */
43599@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43600
43601 error:
43602 if (ret == -ENOMEM)
43603- fscache_stat(&fscache_n_retrievals_nomem);
43604+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43605 else if (ret == -ERESTARTSYS)
43606- fscache_stat(&fscache_n_retrievals_intr);
43607+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43608 else if (ret == -ENODATA)
43609- fscache_stat(&fscache_n_retrievals_nodata);
43610+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43611 else if (ret < 0)
43612- fscache_stat(&fscache_n_retrievals_nobufs);
43613+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43614 else
43615- fscache_stat(&fscache_n_retrievals_ok);
43616+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
43617
43618 fscache_put_retrieval(op);
43619 _leave(" = %d", ret);
43620@@ -429,7 +429,7 @@ nobufs_unlock:
43621 spin_unlock(&cookie->lock);
43622 kfree(op);
43623 nobufs:
43624- fscache_stat(&fscache_n_retrievals_nobufs);
43625+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43626 _leave(" = -ENOBUFS");
43627 return -ENOBUFS;
43628 }
43629@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43630
43631 _enter("%p,,%d,,,", cookie, *nr_pages);
43632
43633- fscache_stat(&fscache_n_retrievals);
43634+ fscache_stat_unchecked(&fscache_n_retrievals);
43635
43636 if (hlist_empty(&cookie->backing_objects))
43637 goto nobufs;
43638@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43639 goto nobufs_unlock;
43640 spin_unlock(&cookie->lock);
43641
43642- fscache_stat(&fscache_n_retrieval_ops);
43643+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
43644
43645 /* pin the netfs read context in case we need to do the actual netfs
43646 * read because we've encountered a cache read failure */
43647@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43648
43649 error:
43650 if (ret == -ENOMEM)
43651- fscache_stat(&fscache_n_retrievals_nomem);
43652+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43653 else if (ret == -ERESTARTSYS)
43654- fscache_stat(&fscache_n_retrievals_intr);
43655+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43656 else if (ret == -ENODATA)
43657- fscache_stat(&fscache_n_retrievals_nodata);
43658+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43659 else if (ret < 0)
43660- fscache_stat(&fscache_n_retrievals_nobufs);
43661+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43662 else
43663- fscache_stat(&fscache_n_retrievals_ok);
43664+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
43665
43666 fscache_put_retrieval(op);
43667 _leave(" = %d", ret);
43668@@ -545,7 +545,7 @@ nobufs_unlock:
43669 spin_unlock(&cookie->lock);
43670 kfree(op);
43671 nobufs:
43672- fscache_stat(&fscache_n_retrievals_nobufs);
43673+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43674 _leave(" = -ENOBUFS");
43675 return -ENOBUFS;
43676 }
43677@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43678
43679 _enter("%p,%p,,,", cookie, page);
43680
43681- fscache_stat(&fscache_n_allocs);
43682+ fscache_stat_unchecked(&fscache_n_allocs);
43683
43684 if (hlist_empty(&cookie->backing_objects))
43685 goto nobufs;
43686@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43687 goto nobufs_unlock;
43688 spin_unlock(&cookie->lock);
43689
43690- fscache_stat(&fscache_n_alloc_ops);
43691+ fscache_stat_unchecked(&fscache_n_alloc_ops);
43692
43693 ret = fscache_wait_for_retrieval_activation(
43694 object, op,
43695@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43696
43697 error:
43698 if (ret == -ERESTARTSYS)
43699- fscache_stat(&fscache_n_allocs_intr);
43700+ fscache_stat_unchecked(&fscache_n_allocs_intr);
43701 else if (ret < 0)
43702- fscache_stat(&fscache_n_allocs_nobufs);
43703+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43704 else
43705- fscache_stat(&fscache_n_allocs_ok);
43706+ fscache_stat_unchecked(&fscache_n_allocs_ok);
43707
43708 fscache_put_retrieval(op);
43709 _leave(" = %d", ret);
43710@@ -625,7 +625,7 @@ nobufs_unlock:
43711 spin_unlock(&cookie->lock);
43712 kfree(op);
43713 nobufs:
43714- fscache_stat(&fscache_n_allocs_nobufs);
43715+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43716 _leave(" = -ENOBUFS");
43717 return -ENOBUFS;
43718 }
43719@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43720
43721 spin_lock(&cookie->stores_lock);
43722
43723- fscache_stat(&fscache_n_store_calls);
43724+ fscache_stat_unchecked(&fscache_n_store_calls);
43725
43726 /* find a page to store */
43727 page = NULL;
43728@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43729 page = results[0];
43730 _debug("gang %d [%lx]", n, page->index);
43731 if (page->index > op->store_limit) {
43732- fscache_stat(&fscache_n_store_pages_over_limit);
43733+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
43734 goto superseded;
43735 }
43736
43737@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43738 spin_unlock(&cookie->stores_lock);
43739 spin_unlock(&object->lock);
43740
43741- fscache_stat(&fscache_n_store_pages);
43742+ fscache_stat_unchecked(&fscache_n_store_pages);
43743 fscache_stat(&fscache_n_cop_write_page);
43744 ret = object->cache->ops->write_page(op, page);
43745 fscache_stat_d(&fscache_n_cop_write_page);
43746@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43747 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43748 ASSERT(PageFsCache(page));
43749
43750- fscache_stat(&fscache_n_stores);
43751+ fscache_stat_unchecked(&fscache_n_stores);
43752
43753 op = kzalloc(sizeof(*op), GFP_NOIO);
43754 if (!op)
43755@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43756 spin_unlock(&cookie->stores_lock);
43757 spin_unlock(&object->lock);
43758
43759- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
43760+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
43761 op->store_limit = object->store_limit;
43762
43763 if (fscache_submit_op(object, &op->op) < 0)
43764@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43765
43766 spin_unlock(&cookie->lock);
43767 radix_tree_preload_end();
43768- fscache_stat(&fscache_n_store_ops);
43769- fscache_stat(&fscache_n_stores_ok);
43770+ fscache_stat_unchecked(&fscache_n_store_ops);
43771+ fscache_stat_unchecked(&fscache_n_stores_ok);
43772
43773 /* the work queue now carries its own ref on the object */
43774 fscache_put_operation(&op->op);
43775@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43776 return 0;
43777
43778 already_queued:
43779- fscache_stat(&fscache_n_stores_again);
43780+ fscache_stat_unchecked(&fscache_n_stores_again);
43781 already_pending:
43782 spin_unlock(&cookie->stores_lock);
43783 spin_unlock(&object->lock);
43784 spin_unlock(&cookie->lock);
43785 radix_tree_preload_end();
43786 kfree(op);
43787- fscache_stat(&fscache_n_stores_ok);
43788+ fscache_stat_unchecked(&fscache_n_stores_ok);
43789 _leave(" = 0");
43790 return 0;
43791
43792@@ -851,14 +851,14 @@ nobufs:
43793 spin_unlock(&cookie->lock);
43794 radix_tree_preload_end();
43795 kfree(op);
43796- fscache_stat(&fscache_n_stores_nobufs);
43797+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
43798 _leave(" = -ENOBUFS");
43799 return -ENOBUFS;
43800
43801 nomem_free:
43802 kfree(op);
43803 nomem:
43804- fscache_stat(&fscache_n_stores_oom);
43805+ fscache_stat_unchecked(&fscache_n_stores_oom);
43806 _leave(" = -ENOMEM");
43807 return -ENOMEM;
43808 }
43809@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
43810 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43811 ASSERTCMP(page, !=, NULL);
43812
43813- fscache_stat(&fscache_n_uncaches);
43814+ fscache_stat_unchecked(&fscache_n_uncaches);
43815
43816 /* cache withdrawal may beat us to it */
43817 if (!PageFsCache(page))
43818@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
43819 unsigned long loop;
43820
43821 #ifdef CONFIG_FSCACHE_STATS
43822- atomic_add(pagevec->nr, &fscache_n_marks);
43823+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
43824 #endif
43825
43826 for (loop = 0; loop < pagevec->nr; loop++) {
43827diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
43828index 4765190..2a067f2 100644
43829--- a/fs/fscache/stats.c
43830+++ b/fs/fscache/stats.c
43831@@ -18,95 +18,95 @@
43832 /*
43833 * operation counters
43834 */
43835-atomic_t fscache_n_op_pend;
43836-atomic_t fscache_n_op_run;
43837-atomic_t fscache_n_op_enqueue;
43838-atomic_t fscache_n_op_requeue;
43839-atomic_t fscache_n_op_deferred_release;
43840-atomic_t fscache_n_op_release;
43841-atomic_t fscache_n_op_gc;
43842-atomic_t fscache_n_op_cancelled;
43843-atomic_t fscache_n_op_rejected;
43844+atomic_unchecked_t fscache_n_op_pend;
43845+atomic_unchecked_t fscache_n_op_run;
43846+atomic_unchecked_t fscache_n_op_enqueue;
43847+atomic_unchecked_t fscache_n_op_requeue;
43848+atomic_unchecked_t fscache_n_op_deferred_release;
43849+atomic_unchecked_t fscache_n_op_release;
43850+atomic_unchecked_t fscache_n_op_gc;
43851+atomic_unchecked_t fscache_n_op_cancelled;
43852+atomic_unchecked_t fscache_n_op_rejected;
43853
43854-atomic_t fscache_n_attr_changed;
43855-atomic_t fscache_n_attr_changed_ok;
43856-atomic_t fscache_n_attr_changed_nobufs;
43857-atomic_t fscache_n_attr_changed_nomem;
43858-atomic_t fscache_n_attr_changed_calls;
43859+atomic_unchecked_t fscache_n_attr_changed;
43860+atomic_unchecked_t fscache_n_attr_changed_ok;
43861+atomic_unchecked_t fscache_n_attr_changed_nobufs;
43862+atomic_unchecked_t fscache_n_attr_changed_nomem;
43863+atomic_unchecked_t fscache_n_attr_changed_calls;
43864
43865-atomic_t fscache_n_allocs;
43866-atomic_t fscache_n_allocs_ok;
43867-atomic_t fscache_n_allocs_wait;
43868-atomic_t fscache_n_allocs_nobufs;
43869-atomic_t fscache_n_allocs_intr;
43870-atomic_t fscache_n_allocs_object_dead;
43871-atomic_t fscache_n_alloc_ops;
43872-atomic_t fscache_n_alloc_op_waits;
43873+atomic_unchecked_t fscache_n_allocs;
43874+atomic_unchecked_t fscache_n_allocs_ok;
43875+atomic_unchecked_t fscache_n_allocs_wait;
43876+atomic_unchecked_t fscache_n_allocs_nobufs;
43877+atomic_unchecked_t fscache_n_allocs_intr;
43878+atomic_unchecked_t fscache_n_allocs_object_dead;
43879+atomic_unchecked_t fscache_n_alloc_ops;
43880+atomic_unchecked_t fscache_n_alloc_op_waits;
43881
43882-atomic_t fscache_n_retrievals;
43883-atomic_t fscache_n_retrievals_ok;
43884-atomic_t fscache_n_retrievals_wait;
43885-atomic_t fscache_n_retrievals_nodata;
43886-atomic_t fscache_n_retrievals_nobufs;
43887-atomic_t fscache_n_retrievals_intr;
43888-atomic_t fscache_n_retrievals_nomem;
43889-atomic_t fscache_n_retrievals_object_dead;
43890-atomic_t fscache_n_retrieval_ops;
43891-atomic_t fscache_n_retrieval_op_waits;
43892+atomic_unchecked_t fscache_n_retrievals;
43893+atomic_unchecked_t fscache_n_retrievals_ok;
43894+atomic_unchecked_t fscache_n_retrievals_wait;
43895+atomic_unchecked_t fscache_n_retrievals_nodata;
43896+atomic_unchecked_t fscache_n_retrievals_nobufs;
43897+atomic_unchecked_t fscache_n_retrievals_intr;
43898+atomic_unchecked_t fscache_n_retrievals_nomem;
43899+atomic_unchecked_t fscache_n_retrievals_object_dead;
43900+atomic_unchecked_t fscache_n_retrieval_ops;
43901+atomic_unchecked_t fscache_n_retrieval_op_waits;
43902
43903-atomic_t fscache_n_stores;
43904-atomic_t fscache_n_stores_ok;
43905-atomic_t fscache_n_stores_again;
43906-atomic_t fscache_n_stores_nobufs;
43907-atomic_t fscache_n_stores_oom;
43908-atomic_t fscache_n_store_ops;
43909-atomic_t fscache_n_store_calls;
43910-atomic_t fscache_n_store_pages;
43911-atomic_t fscache_n_store_radix_deletes;
43912-atomic_t fscache_n_store_pages_over_limit;
43913+atomic_unchecked_t fscache_n_stores;
43914+atomic_unchecked_t fscache_n_stores_ok;
43915+atomic_unchecked_t fscache_n_stores_again;
43916+atomic_unchecked_t fscache_n_stores_nobufs;
43917+atomic_unchecked_t fscache_n_stores_oom;
43918+atomic_unchecked_t fscache_n_store_ops;
43919+atomic_unchecked_t fscache_n_store_calls;
43920+atomic_unchecked_t fscache_n_store_pages;
43921+atomic_unchecked_t fscache_n_store_radix_deletes;
43922+atomic_unchecked_t fscache_n_store_pages_over_limit;
43923
43924-atomic_t fscache_n_store_vmscan_not_storing;
43925-atomic_t fscache_n_store_vmscan_gone;
43926-atomic_t fscache_n_store_vmscan_busy;
43927-atomic_t fscache_n_store_vmscan_cancelled;
43928+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43929+atomic_unchecked_t fscache_n_store_vmscan_gone;
43930+atomic_unchecked_t fscache_n_store_vmscan_busy;
43931+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43932
43933-atomic_t fscache_n_marks;
43934-atomic_t fscache_n_uncaches;
43935+atomic_unchecked_t fscache_n_marks;
43936+atomic_unchecked_t fscache_n_uncaches;
43937
43938-atomic_t fscache_n_acquires;
43939-atomic_t fscache_n_acquires_null;
43940-atomic_t fscache_n_acquires_no_cache;
43941-atomic_t fscache_n_acquires_ok;
43942-atomic_t fscache_n_acquires_nobufs;
43943-atomic_t fscache_n_acquires_oom;
43944+atomic_unchecked_t fscache_n_acquires;
43945+atomic_unchecked_t fscache_n_acquires_null;
43946+atomic_unchecked_t fscache_n_acquires_no_cache;
43947+atomic_unchecked_t fscache_n_acquires_ok;
43948+atomic_unchecked_t fscache_n_acquires_nobufs;
43949+atomic_unchecked_t fscache_n_acquires_oom;
43950
43951-atomic_t fscache_n_updates;
43952-atomic_t fscache_n_updates_null;
43953-atomic_t fscache_n_updates_run;
43954+atomic_unchecked_t fscache_n_updates;
43955+atomic_unchecked_t fscache_n_updates_null;
43956+atomic_unchecked_t fscache_n_updates_run;
43957
43958-atomic_t fscache_n_relinquishes;
43959-atomic_t fscache_n_relinquishes_null;
43960-atomic_t fscache_n_relinquishes_waitcrt;
43961-atomic_t fscache_n_relinquishes_retire;
43962+atomic_unchecked_t fscache_n_relinquishes;
43963+atomic_unchecked_t fscache_n_relinquishes_null;
43964+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43965+atomic_unchecked_t fscache_n_relinquishes_retire;
43966
43967-atomic_t fscache_n_cookie_index;
43968-atomic_t fscache_n_cookie_data;
43969-atomic_t fscache_n_cookie_special;
43970+atomic_unchecked_t fscache_n_cookie_index;
43971+atomic_unchecked_t fscache_n_cookie_data;
43972+atomic_unchecked_t fscache_n_cookie_special;
43973
43974-atomic_t fscache_n_object_alloc;
43975-atomic_t fscache_n_object_no_alloc;
43976-atomic_t fscache_n_object_lookups;
43977-atomic_t fscache_n_object_lookups_negative;
43978-atomic_t fscache_n_object_lookups_positive;
43979-atomic_t fscache_n_object_lookups_timed_out;
43980-atomic_t fscache_n_object_created;
43981-atomic_t fscache_n_object_avail;
43982-atomic_t fscache_n_object_dead;
43983+atomic_unchecked_t fscache_n_object_alloc;
43984+atomic_unchecked_t fscache_n_object_no_alloc;
43985+atomic_unchecked_t fscache_n_object_lookups;
43986+atomic_unchecked_t fscache_n_object_lookups_negative;
43987+atomic_unchecked_t fscache_n_object_lookups_positive;
43988+atomic_unchecked_t fscache_n_object_lookups_timed_out;
43989+atomic_unchecked_t fscache_n_object_created;
43990+atomic_unchecked_t fscache_n_object_avail;
43991+atomic_unchecked_t fscache_n_object_dead;
43992
43993-atomic_t fscache_n_checkaux_none;
43994-atomic_t fscache_n_checkaux_okay;
43995-atomic_t fscache_n_checkaux_update;
43996-atomic_t fscache_n_checkaux_obsolete;
43997+atomic_unchecked_t fscache_n_checkaux_none;
43998+atomic_unchecked_t fscache_n_checkaux_okay;
43999+atomic_unchecked_t fscache_n_checkaux_update;
44000+atomic_unchecked_t fscache_n_checkaux_obsolete;
44001
44002 atomic_t fscache_n_cop_alloc_object;
44003 atomic_t fscache_n_cop_lookup_object;
44004@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
44005 seq_puts(m, "FS-Cache statistics\n");
44006
44007 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
44008- atomic_read(&fscache_n_cookie_index),
44009- atomic_read(&fscache_n_cookie_data),
44010- atomic_read(&fscache_n_cookie_special));
44011+ atomic_read_unchecked(&fscache_n_cookie_index),
44012+ atomic_read_unchecked(&fscache_n_cookie_data),
44013+ atomic_read_unchecked(&fscache_n_cookie_special));
44014
44015 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
44016- atomic_read(&fscache_n_object_alloc),
44017- atomic_read(&fscache_n_object_no_alloc),
44018- atomic_read(&fscache_n_object_avail),
44019- atomic_read(&fscache_n_object_dead));
44020+ atomic_read_unchecked(&fscache_n_object_alloc),
44021+ atomic_read_unchecked(&fscache_n_object_no_alloc),
44022+ atomic_read_unchecked(&fscache_n_object_avail),
44023+ atomic_read_unchecked(&fscache_n_object_dead));
44024 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
44025- atomic_read(&fscache_n_checkaux_none),
44026- atomic_read(&fscache_n_checkaux_okay),
44027- atomic_read(&fscache_n_checkaux_update),
44028- atomic_read(&fscache_n_checkaux_obsolete));
44029+ atomic_read_unchecked(&fscache_n_checkaux_none),
44030+ atomic_read_unchecked(&fscache_n_checkaux_okay),
44031+ atomic_read_unchecked(&fscache_n_checkaux_update),
44032+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
44033
44034 seq_printf(m, "Pages : mrk=%u unc=%u\n",
44035- atomic_read(&fscache_n_marks),
44036- atomic_read(&fscache_n_uncaches));
44037+ atomic_read_unchecked(&fscache_n_marks),
44038+ atomic_read_unchecked(&fscache_n_uncaches));
44039
44040 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
44041 " oom=%u\n",
44042- atomic_read(&fscache_n_acquires),
44043- atomic_read(&fscache_n_acquires_null),
44044- atomic_read(&fscache_n_acquires_no_cache),
44045- atomic_read(&fscache_n_acquires_ok),
44046- atomic_read(&fscache_n_acquires_nobufs),
44047- atomic_read(&fscache_n_acquires_oom));
44048+ atomic_read_unchecked(&fscache_n_acquires),
44049+ atomic_read_unchecked(&fscache_n_acquires_null),
44050+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
44051+ atomic_read_unchecked(&fscache_n_acquires_ok),
44052+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
44053+ atomic_read_unchecked(&fscache_n_acquires_oom));
44054
44055 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
44056- atomic_read(&fscache_n_object_lookups),
44057- atomic_read(&fscache_n_object_lookups_negative),
44058- atomic_read(&fscache_n_object_lookups_positive),
44059- atomic_read(&fscache_n_object_created),
44060- atomic_read(&fscache_n_object_lookups_timed_out));
44061+ atomic_read_unchecked(&fscache_n_object_lookups),
44062+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
44063+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
44064+ atomic_read_unchecked(&fscache_n_object_created),
44065+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
44066
44067 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
44068- atomic_read(&fscache_n_updates),
44069- atomic_read(&fscache_n_updates_null),
44070- atomic_read(&fscache_n_updates_run));
44071+ atomic_read_unchecked(&fscache_n_updates),
44072+ atomic_read_unchecked(&fscache_n_updates_null),
44073+ atomic_read_unchecked(&fscache_n_updates_run));
44074
44075 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
44076- atomic_read(&fscache_n_relinquishes),
44077- atomic_read(&fscache_n_relinquishes_null),
44078- atomic_read(&fscache_n_relinquishes_waitcrt),
44079- atomic_read(&fscache_n_relinquishes_retire));
44080+ atomic_read_unchecked(&fscache_n_relinquishes),
44081+ atomic_read_unchecked(&fscache_n_relinquishes_null),
44082+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
44083+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
44084
44085 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
44086- atomic_read(&fscache_n_attr_changed),
44087- atomic_read(&fscache_n_attr_changed_ok),
44088- atomic_read(&fscache_n_attr_changed_nobufs),
44089- atomic_read(&fscache_n_attr_changed_nomem),
44090- atomic_read(&fscache_n_attr_changed_calls));
44091+ atomic_read_unchecked(&fscache_n_attr_changed),
44092+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
44093+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
44094+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
44095+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
44096
44097 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
44098- atomic_read(&fscache_n_allocs),
44099- atomic_read(&fscache_n_allocs_ok),
44100- atomic_read(&fscache_n_allocs_wait),
44101- atomic_read(&fscache_n_allocs_nobufs),
44102- atomic_read(&fscache_n_allocs_intr));
44103+ atomic_read_unchecked(&fscache_n_allocs),
44104+ atomic_read_unchecked(&fscache_n_allocs_ok),
44105+ atomic_read_unchecked(&fscache_n_allocs_wait),
44106+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
44107+ atomic_read_unchecked(&fscache_n_allocs_intr));
44108 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
44109- atomic_read(&fscache_n_alloc_ops),
44110- atomic_read(&fscache_n_alloc_op_waits),
44111- atomic_read(&fscache_n_allocs_object_dead));
44112+ atomic_read_unchecked(&fscache_n_alloc_ops),
44113+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
44114+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
44115
44116 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
44117 " int=%u oom=%u\n",
44118- atomic_read(&fscache_n_retrievals),
44119- atomic_read(&fscache_n_retrievals_ok),
44120- atomic_read(&fscache_n_retrievals_wait),
44121- atomic_read(&fscache_n_retrievals_nodata),
44122- atomic_read(&fscache_n_retrievals_nobufs),
44123- atomic_read(&fscache_n_retrievals_intr),
44124- atomic_read(&fscache_n_retrievals_nomem));
44125+ atomic_read_unchecked(&fscache_n_retrievals),
44126+ atomic_read_unchecked(&fscache_n_retrievals_ok),
44127+ atomic_read_unchecked(&fscache_n_retrievals_wait),
44128+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
44129+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
44130+ atomic_read_unchecked(&fscache_n_retrievals_intr),
44131+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
44132 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
44133- atomic_read(&fscache_n_retrieval_ops),
44134- atomic_read(&fscache_n_retrieval_op_waits),
44135- atomic_read(&fscache_n_retrievals_object_dead));
44136+ atomic_read_unchecked(&fscache_n_retrieval_ops),
44137+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
44138+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
44139
44140 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
44141- atomic_read(&fscache_n_stores),
44142- atomic_read(&fscache_n_stores_ok),
44143- atomic_read(&fscache_n_stores_again),
44144- atomic_read(&fscache_n_stores_nobufs),
44145- atomic_read(&fscache_n_stores_oom));
44146+ atomic_read_unchecked(&fscache_n_stores),
44147+ atomic_read_unchecked(&fscache_n_stores_ok),
44148+ atomic_read_unchecked(&fscache_n_stores_again),
44149+ atomic_read_unchecked(&fscache_n_stores_nobufs),
44150+ atomic_read_unchecked(&fscache_n_stores_oom));
44151 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
44152- atomic_read(&fscache_n_store_ops),
44153- atomic_read(&fscache_n_store_calls),
44154- atomic_read(&fscache_n_store_pages),
44155- atomic_read(&fscache_n_store_radix_deletes),
44156- atomic_read(&fscache_n_store_pages_over_limit));
44157+ atomic_read_unchecked(&fscache_n_store_ops),
44158+ atomic_read_unchecked(&fscache_n_store_calls),
44159+ atomic_read_unchecked(&fscache_n_store_pages),
44160+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
44161+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
44162
44163 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
44164- atomic_read(&fscache_n_store_vmscan_not_storing),
44165- atomic_read(&fscache_n_store_vmscan_gone),
44166- atomic_read(&fscache_n_store_vmscan_busy),
44167- atomic_read(&fscache_n_store_vmscan_cancelled));
44168+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
44169+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
44170+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
44171+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
44172
44173 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
44174- atomic_read(&fscache_n_op_pend),
44175- atomic_read(&fscache_n_op_run),
44176- atomic_read(&fscache_n_op_enqueue),
44177- atomic_read(&fscache_n_op_cancelled),
44178- atomic_read(&fscache_n_op_rejected));
44179+ atomic_read_unchecked(&fscache_n_op_pend),
44180+ atomic_read_unchecked(&fscache_n_op_run),
44181+ atomic_read_unchecked(&fscache_n_op_enqueue),
44182+ atomic_read_unchecked(&fscache_n_op_cancelled),
44183+ atomic_read_unchecked(&fscache_n_op_rejected));
44184 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
44185- atomic_read(&fscache_n_op_deferred_release),
44186- atomic_read(&fscache_n_op_release),
44187- atomic_read(&fscache_n_op_gc));
44188+ atomic_read_unchecked(&fscache_n_op_deferred_release),
44189+ atomic_read_unchecked(&fscache_n_op_release),
44190+ atomic_read_unchecked(&fscache_n_op_gc));
44191
44192 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
44193 atomic_read(&fscache_n_cop_alloc_object),
44194diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
44195index 3426521..3b75162 100644
44196--- a/fs/fuse/cuse.c
44197+++ b/fs/fuse/cuse.c
44198@@ -587,10 +587,12 @@ static int __init cuse_init(void)
44199 INIT_LIST_HEAD(&cuse_conntbl[i]);
44200
44201 /* inherit and extend fuse_dev_operations */
44202- cuse_channel_fops = fuse_dev_operations;
44203- cuse_channel_fops.owner = THIS_MODULE;
44204- cuse_channel_fops.open = cuse_channel_open;
44205- cuse_channel_fops.release = cuse_channel_release;
44206+ pax_open_kernel();
44207+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
44208+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
44209+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
44210+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
44211+ pax_close_kernel();
44212
44213 cuse_class = class_create(THIS_MODULE, "cuse");
44214 if (IS_ERR(cuse_class))
44215diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
44216index 2aaf3ea..8e50863 100644
44217--- a/fs/fuse/dev.c
44218+++ b/fs/fuse/dev.c
44219@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
44220 ret = 0;
44221 pipe_lock(pipe);
44222
44223- if (!pipe->readers) {
44224+ if (!atomic_read(&pipe->readers)) {
44225 send_sig(SIGPIPE, current, 0);
44226 if (!ret)
44227 ret = -EPIPE;
44228diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
44229index 9f63e49..d8a64c0 100644
44230--- a/fs/fuse/dir.c
44231+++ b/fs/fuse/dir.c
44232@@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *dentry)
44233 return link;
44234 }
44235
44236-static void free_link(char *link)
44237+static void free_link(const char *link)
44238 {
44239 if (!IS_ERR(link))
44240 free_page((unsigned long) link);
44241diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
44242index cfd4959..a780959 100644
44243--- a/fs/gfs2/inode.c
44244+++ b/fs/gfs2/inode.c
44245@@ -1490,7 +1490,7 @@ out:
44246
44247 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
44248 {
44249- char *s = nd_get_link(nd);
44250+ const char *s = nd_get_link(nd);
44251 if (!IS_ERR(s))
44252 kfree(s);
44253 }
44254diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
44255index 0be5a78..9cfb853 100644
44256--- a/fs/hugetlbfs/inode.c
44257+++ b/fs/hugetlbfs/inode.c
44258@@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs_fs_type = {
44259 .kill_sb = kill_litter_super,
44260 };
44261
44262-static struct vfsmount *hugetlbfs_vfsmount;
44263+struct vfsmount *hugetlbfs_vfsmount;
44264
44265 static int can_do_hugetlb_shm(void)
44266 {
44267diff --git a/fs/inode.c b/fs/inode.c
44268index ee4e66b..0451521 100644
44269--- a/fs/inode.c
44270+++ b/fs/inode.c
44271@@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
44272
44273 #ifdef CONFIG_SMP
44274 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
44275- static atomic_t shared_last_ino;
44276- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
44277+ static atomic_unchecked_t shared_last_ino;
44278+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
44279
44280 res = next - LAST_INO_BATCH;
44281 }
44282diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
44283index e513f19..2ab1351 100644
44284--- a/fs/jffs2/erase.c
44285+++ b/fs/jffs2/erase.c
44286@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
44287 struct jffs2_unknown_node marker = {
44288 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
44289 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44290- .totlen = cpu_to_je32(c->cleanmarker_size)
44291+ .totlen = cpu_to_je32(c->cleanmarker_size),
44292+ .hdr_crc = cpu_to_je32(0)
44293 };
44294
44295 jffs2_prealloc_raw_node_refs(c, jeb, 1);
44296diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
44297index b09e51d..e482afa 100644
44298--- a/fs/jffs2/wbuf.c
44299+++ b/fs/jffs2/wbuf.c
44300@@ -1011,7 +1011,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
44301 {
44302 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
44303 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44304- .totlen = constant_cpu_to_je32(8)
44305+ .totlen = constant_cpu_to_je32(8),
44306+ .hdr_crc = constant_cpu_to_je32(0)
44307 };
44308
44309 /*
44310diff --git a/fs/jfs/super.c b/fs/jfs/super.c
44311index a44eff0..462e07d 100644
44312--- a/fs/jfs/super.c
44313+++ b/fs/jfs/super.c
44314@@ -802,7 +802,7 @@ static int __init init_jfs_fs(void)
44315
44316 jfs_inode_cachep =
44317 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
44318- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
44319+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
44320 init_once);
44321 if (jfs_inode_cachep == NULL)
44322 return -ENOMEM;
44323diff --git a/fs/libfs.c b/fs/libfs.c
44324index f6d411e..e82a08d 100644
44325--- a/fs/libfs.c
44326+++ b/fs/libfs.c
44327@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
44328
44329 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
44330 struct dentry *next;
44331+ char d_name[sizeof(next->d_iname)];
44332+ const unsigned char *name;
44333+
44334 next = list_entry(p, struct dentry, d_u.d_child);
44335 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
44336 if (!simple_positive(next)) {
44337@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
44338
44339 spin_unlock(&next->d_lock);
44340 spin_unlock(&dentry->d_lock);
44341- if (filldir(dirent, next->d_name.name,
44342+ name = next->d_name.name;
44343+ if (name == next->d_iname) {
44344+ memcpy(d_name, name, next->d_name.len);
44345+ name = d_name;
44346+ }
44347+ if (filldir(dirent, name,
44348 next->d_name.len, filp->f_pos,
44349 next->d_inode->i_ino,
44350 dt_type(next->d_inode)) < 0)
44351diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
44352index 8392cb8..80d6193 100644
44353--- a/fs/lockd/clntproc.c
44354+++ b/fs/lockd/clntproc.c
44355@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
44356 /*
44357 * Cookie counter for NLM requests
44358 */
44359-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
44360+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
44361
44362 void nlmclnt_next_cookie(struct nlm_cookie *c)
44363 {
44364- u32 cookie = atomic_inc_return(&nlm_cookie);
44365+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
44366
44367 memcpy(c->data, &cookie, 4);
44368 c->len=4;
44369diff --git a/fs/locks.c b/fs/locks.c
44370index 637694b..f84a121 100644
44371--- a/fs/locks.c
44372+++ b/fs/locks.c
44373@@ -2074,16 +2074,16 @@ void locks_remove_flock(struct file *filp)
44374 return;
44375
44376 if (filp->f_op && filp->f_op->flock) {
44377- struct file_lock fl = {
44378+ struct file_lock flock = {
44379 .fl_pid = current->tgid,
44380 .fl_file = filp,
44381 .fl_flags = FL_FLOCK,
44382 .fl_type = F_UNLCK,
44383 .fl_end = OFFSET_MAX,
44384 };
44385- filp->f_op->flock(filp, F_SETLKW, &fl);
44386- if (fl.fl_ops && fl.fl_ops->fl_release_private)
44387- fl.fl_ops->fl_release_private(&fl);
44388+ filp->f_op->flock(filp, F_SETLKW, &flock);
44389+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
44390+ flock.fl_ops->fl_release_private(&flock);
44391 }
44392
44393 lock_flocks();
44394diff --git a/fs/namei.c b/fs/namei.c
44395index 744e942..24ef47f 100644
44396--- a/fs/namei.c
44397+++ b/fs/namei.c
44398@@ -279,16 +279,32 @@ int generic_permission(struct inode *inode, int mask)
44399 if (ret != -EACCES)
44400 return ret;
44401
44402+#ifdef CONFIG_GRKERNSEC
44403+ /* we'll block if we have to log due to a denied capability use */
44404+ if (mask & MAY_NOT_BLOCK)
44405+ return -ECHILD;
44406+#endif
44407+
44408 if (S_ISDIR(inode->i_mode)) {
44409 /* DACs are overridable for directories */
44410- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44411- return 0;
44412 if (!(mask & MAY_WRITE))
44413- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44414+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
44415+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44416 return 0;
44417+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44418+ return 0;
44419 return -EACCES;
44420 }
44421 /*
44422+ * Searching includes executable on directories, else just read.
44423+ */
44424+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44425+ if (mask == MAY_READ)
44426+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
44427+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44428+ return 0;
44429+
44430+ /*
44431 * Read/write DACs are always overridable.
44432 * Executable DACs are overridable when there is
44433 * at least one exec bit set.
44434@@ -297,14 +313,6 @@ int generic_permission(struct inode *inode, int mask)
44435 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44436 return 0;
44437
44438- /*
44439- * Searching includes executable on directories, else just read.
44440- */
44441- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44442- if (mask == MAY_READ)
44443- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44444- return 0;
44445-
44446 return -EACCES;
44447 }
44448
44449@@ -653,11 +661,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
44450 return error;
44451 }
44452
44453+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
44454+ dentry->d_inode, dentry, nd->path.mnt)) {
44455+ error = -EACCES;
44456+ *p = ERR_PTR(error); /* no ->put_link(), please */
44457+ path_put(&nd->path);
44458+ return error;
44459+ }
44460+
44461 nd->last_type = LAST_BIND;
44462 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
44463 error = PTR_ERR(*p);
44464 if (!IS_ERR(*p)) {
44465- char *s = nd_get_link(nd);
44466+ const char *s = nd_get_link(nd);
44467 error = 0;
44468 if (s)
44469 error = __vfs_follow_link(nd, s);
44470@@ -1624,6 +1640,21 @@ static int path_lookupat(int dfd, const char *name,
44471 if (!err)
44472 err = complete_walk(nd);
44473
44474+ if (!(nd->flags & LOOKUP_PARENT)) {
44475+#ifdef CONFIG_GRKERNSEC
44476+ if (flags & LOOKUP_RCU) {
44477+ if (!err)
44478+ path_put(&nd->path);
44479+ err = -ECHILD;
44480+ } else
44481+#endif
44482+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44483+ if (!err)
44484+ path_put(&nd->path);
44485+ err = -ENOENT;
44486+ }
44487+ }
44488+
44489 if (!err && nd->flags & LOOKUP_DIRECTORY) {
44490 if (!nd->inode->i_op->lookup) {
44491 path_put(&nd->path);
44492@@ -1651,6 +1682,15 @@ static int do_path_lookup(int dfd, const char *name,
44493 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
44494
44495 if (likely(!retval)) {
44496+ if (*name != '/' && nd->path.dentry && nd->inode) {
44497+#ifdef CONFIG_GRKERNSEC
44498+ if (flags & LOOKUP_RCU)
44499+ return -ECHILD;
44500+#endif
44501+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
44502+ return -ENOENT;
44503+ }
44504+
44505 if (unlikely(!audit_dummy_context())) {
44506 if (nd->path.dentry && nd->inode)
44507 audit_inode(name, nd->path.dentry);
44508@@ -2048,6 +2088,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
44509 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
44510 return -EPERM;
44511
44512+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
44513+ return -EPERM;
44514+ if (gr_handle_rawio(inode))
44515+ return -EPERM;
44516+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
44517+ return -EACCES;
44518+
44519 return 0;
44520 }
44521
44522@@ -2109,6 +2156,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44523 error = complete_walk(nd);
44524 if (error)
44525 return ERR_PTR(error);
44526+#ifdef CONFIG_GRKERNSEC
44527+ if (nd->flags & LOOKUP_RCU) {
44528+ error = -ECHILD;
44529+ goto exit;
44530+ }
44531+#endif
44532+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44533+ error = -ENOENT;
44534+ goto exit;
44535+ }
44536 audit_inode(pathname, nd->path.dentry);
44537 if (open_flag & O_CREAT) {
44538 error = -EISDIR;
44539@@ -2119,6 +2176,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44540 error = complete_walk(nd);
44541 if (error)
44542 return ERR_PTR(error);
44543+#ifdef CONFIG_GRKERNSEC
44544+ if (nd->flags & LOOKUP_RCU) {
44545+ error = -ECHILD;
44546+ goto exit;
44547+ }
44548+#endif
44549+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
44550+ error = -ENOENT;
44551+ goto exit;
44552+ }
44553 audit_inode(pathname, dir);
44554 goto ok;
44555 }
44556@@ -2140,6 +2207,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44557 error = complete_walk(nd);
44558 if (error)
44559 return ERR_PTR(-ECHILD);
44560+#ifdef CONFIG_GRKERNSEC
44561+ if (nd->flags & LOOKUP_RCU) {
44562+ error = -ECHILD;
44563+ goto exit;
44564+ }
44565+#endif
44566+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44567+ error = -ENOENT;
44568+ goto exit;
44569+ }
44570
44571 error = -ENOTDIR;
44572 if (nd->flags & LOOKUP_DIRECTORY) {
44573@@ -2180,6 +2257,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44574 /* Negative dentry, just create the file */
44575 if (!dentry->d_inode) {
44576 int mode = op->mode;
44577+
44578+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
44579+ error = -EACCES;
44580+ goto exit_mutex_unlock;
44581+ }
44582+
44583 if (!IS_POSIXACL(dir->d_inode))
44584 mode &= ~current_umask();
44585 /*
44586@@ -2203,6 +2286,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44587 error = vfs_create(dir->d_inode, dentry, mode, nd);
44588 if (error)
44589 goto exit_mutex_unlock;
44590+ else
44591+ gr_handle_create(path->dentry, path->mnt);
44592 mutex_unlock(&dir->d_inode->i_mutex);
44593 dput(nd->path.dentry);
44594 nd->path.dentry = dentry;
44595@@ -2212,6 +2297,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44596 /*
44597 * It already exists.
44598 */
44599+
44600+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
44601+ error = -ENOENT;
44602+ goto exit_mutex_unlock;
44603+ }
44604+
44605+ /* only check if O_CREAT is specified, all other checks need to go
44606+ into may_open */
44607+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
44608+ error = -EACCES;
44609+ goto exit_mutex_unlock;
44610+ }
44611+
44612 mutex_unlock(&dir->d_inode->i_mutex);
44613 audit_inode(pathname, path->dentry);
44614
44615@@ -2424,6 +2522,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
44616 *path = nd.path;
44617 return dentry;
44618 eexist:
44619+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
44620+ dput(dentry);
44621+ dentry = ERR_PTR(-ENOENT);
44622+ goto fail;
44623+ }
44624 dput(dentry);
44625 dentry = ERR_PTR(-EEXIST);
44626 fail:
44627@@ -2446,6 +2549,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
44628 }
44629 EXPORT_SYMBOL(user_path_create);
44630
44631+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
44632+{
44633+ char *tmp = getname(pathname);
44634+ struct dentry *res;
44635+ if (IS_ERR(tmp))
44636+ return ERR_CAST(tmp);
44637+ res = kern_path_create(dfd, tmp, path, is_dir);
44638+ if (IS_ERR(res))
44639+ putname(tmp);
44640+ else
44641+ *to = tmp;
44642+ return res;
44643+}
44644+
44645 int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
44646 {
44647 int error = may_create(dir, dentry);
44648@@ -2513,6 +2630,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44649 error = mnt_want_write(path.mnt);
44650 if (error)
44651 goto out_dput;
44652+
44653+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
44654+ error = -EPERM;
44655+ goto out_drop_write;
44656+ }
44657+
44658+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
44659+ error = -EACCES;
44660+ goto out_drop_write;
44661+ }
44662+
44663 error = security_path_mknod(&path, dentry, mode, dev);
44664 if (error)
44665 goto out_drop_write;
44666@@ -2530,6 +2658,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44667 }
44668 out_drop_write:
44669 mnt_drop_write(path.mnt);
44670+
44671+ if (!error)
44672+ gr_handle_create(dentry, path.mnt);
44673 out_dput:
44674 dput(dentry);
44675 mutex_unlock(&path.dentry->d_inode->i_mutex);
44676@@ -2579,12 +2710,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
44677 error = mnt_want_write(path.mnt);
44678 if (error)
44679 goto out_dput;
44680+
44681+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
44682+ error = -EACCES;
44683+ goto out_drop_write;
44684+ }
44685+
44686 error = security_path_mkdir(&path, dentry, mode);
44687 if (error)
44688 goto out_drop_write;
44689 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
44690 out_drop_write:
44691 mnt_drop_write(path.mnt);
44692+
44693+ if (!error)
44694+ gr_handle_create(dentry, path.mnt);
44695 out_dput:
44696 dput(dentry);
44697 mutex_unlock(&path.dentry->d_inode->i_mutex);
44698@@ -2664,6 +2804,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44699 char * name;
44700 struct dentry *dentry;
44701 struct nameidata nd;
44702+ ino_t saved_ino = 0;
44703+ dev_t saved_dev = 0;
44704
44705 error = user_path_parent(dfd, pathname, &nd, &name);
44706 if (error)
44707@@ -2692,6 +2834,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
44708 error = -ENOENT;
44709 goto exit3;
44710 }
44711+
44712+ saved_ino = dentry->d_inode->i_ino;
44713+ saved_dev = gr_get_dev_from_dentry(dentry);
44714+
44715+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
44716+ error = -EACCES;
44717+ goto exit3;
44718+ }
44719+
44720 error = mnt_want_write(nd.path.mnt);
44721 if (error)
44722 goto exit3;
44723@@ -2699,6 +2850,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44724 if (error)
44725 goto exit4;
44726 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
44727+ if (!error && (saved_dev || saved_ino))
44728+ gr_handle_delete(saved_ino, saved_dev);
44729 exit4:
44730 mnt_drop_write(nd.path.mnt);
44731 exit3:
44732@@ -2761,6 +2914,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44733 struct dentry *dentry;
44734 struct nameidata nd;
44735 struct inode *inode = NULL;
44736+ ino_t saved_ino = 0;
44737+ dev_t saved_dev = 0;
44738
44739 error = user_path_parent(dfd, pathname, &nd, &name);
44740 if (error)
44741@@ -2783,6 +2938,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44742 if (!inode)
44743 goto slashes;
44744 ihold(inode);
44745+
44746+ if (inode->i_nlink <= 1) {
44747+ saved_ino = inode->i_ino;
44748+ saved_dev = gr_get_dev_from_dentry(dentry);
44749+ }
44750+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44751+ error = -EACCES;
44752+ goto exit2;
44753+ }
44754+
44755 error = mnt_want_write(nd.path.mnt);
44756 if (error)
44757 goto exit2;
44758@@ -2790,6 +2955,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44759 if (error)
44760 goto exit3;
44761 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44762+ if (!error && (saved_ino || saved_dev))
44763+ gr_handle_delete(saved_ino, saved_dev);
44764 exit3:
44765 mnt_drop_write(nd.path.mnt);
44766 exit2:
44767@@ -2865,10 +3032,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
44768 error = mnt_want_write(path.mnt);
44769 if (error)
44770 goto out_dput;
44771+
44772+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
44773+ error = -EACCES;
44774+ goto out_drop_write;
44775+ }
44776+
44777 error = security_path_symlink(&path, dentry, from);
44778 if (error)
44779 goto out_drop_write;
44780 error = vfs_symlink(path.dentry->d_inode, dentry, from);
44781+ if (!error)
44782+ gr_handle_create(dentry, path.mnt);
44783 out_drop_write:
44784 mnt_drop_write(path.mnt);
44785 out_dput:
44786@@ -2940,6 +3115,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44787 {
44788 struct dentry *new_dentry;
44789 struct path old_path, new_path;
44790+ char *to = NULL;
44791 int how = 0;
44792 int error;
44793
44794@@ -2963,7 +3139,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44795 if (error)
44796 return error;
44797
44798- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
44799+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
44800 error = PTR_ERR(new_dentry);
44801 if (IS_ERR(new_dentry))
44802 goto out;
44803@@ -2974,13 +3150,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44804 error = mnt_want_write(new_path.mnt);
44805 if (error)
44806 goto out_dput;
44807+
44808+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44809+ old_path.dentry->d_inode,
44810+ old_path.dentry->d_inode->i_mode, to)) {
44811+ error = -EACCES;
44812+ goto out_drop_write;
44813+ }
44814+
44815+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
44816+ old_path.dentry, old_path.mnt, to)) {
44817+ error = -EACCES;
44818+ goto out_drop_write;
44819+ }
44820+
44821 error = security_path_link(old_path.dentry, &new_path, new_dentry);
44822 if (error)
44823 goto out_drop_write;
44824 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
44825+ if (!error)
44826+ gr_handle_create(new_dentry, new_path.mnt);
44827 out_drop_write:
44828 mnt_drop_write(new_path.mnt);
44829 out_dput:
44830+ putname(to);
44831 dput(new_dentry);
44832 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
44833 path_put(&new_path);
44834@@ -3208,6 +3401,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44835 if (new_dentry == trap)
44836 goto exit5;
44837
44838+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44839+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
44840+ to);
44841+ if (error)
44842+ goto exit5;
44843+
44844 error = mnt_want_write(oldnd.path.mnt);
44845 if (error)
44846 goto exit5;
44847@@ -3217,6 +3416,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44848 goto exit6;
44849 error = vfs_rename(old_dir->d_inode, old_dentry,
44850 new_dir->d_inode, new_dentry);
44851+ if (!error)
44852+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44853+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44854 exit6:
44855 mnt_drop_write(oldnd.path.mnt);
44856 exit5:
44857@@ -3242,6 +3444,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
44858
44859 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44860 {
44861+ char tmpbuf[64];
44862+ const char *newlink;
44863 int len;
44864
44865 len = PTR_ERR(link);
44866@@ -3251,7 +3455,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
44867 len = strlen(link);
44868 if (len > (unsigned) buflen)
44869 len = buflen;
44870- if (copy_to_user(buffer, link, len))
44871+
44872+ if (len < sizeof(tmpbuf)) {
44873+ memcpy(tmpbuf, link, len);
44874+ newlink = tmpbuf;
44875+ } else
44876+ newlink = link;
44877+
44878+ if (copy_to_user(buffer, newlink, len))
44879 len = -EFAULT;
44880 out:
44881 return len;
44882diff --git a/fs/namespace.c b/fs/namespace.c
44883index cfc6d44..b4632a5 100644
44884--- a/fs/namespace.c
44885+++ b/fs/namespace.c
44886@@ -1326,6 +1326,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44887 if (!(sb->s_flags & MS_RDONLY))
44888 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44889 up_write(&sb->s_umount);
44890+
44891+ gr_log_remount(mnt->mnt_devname, retval);
44892+
44893 return retval;
44894 }
44895
44896@@ -1345,6 +1348,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44897 br_write_unlock(vfsmount_lock);
44898 up_write(&namespace_sem);
44899 release_mounts(&umount_list);
44900+
44901+ gr_log_unmount(mnt->mnt_devname, retval);
44902+
44903 return retval;
44904 }
44905
44906@@ -2336,6 +2342,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44907 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
44908 MS_STRICTATIME);
44909
44910+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44911+ retval = -EPERM;
44912+ goto dput_out;
44913+ }
44914+
44915+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44916+ retval = -EPERM;
44917+ goto dput_out;
44918+ }
44919+
44920 if (flags & MS_REMOUNT)
44921 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44922 data_page);
44923@@ -2350,6 +2366,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44924 dev_name, data_page);
44925 dput_out:
44926 path_put(&path);
44927+
44928+ gr_log_mount(dev_name, dir_name, retval);
44929+
44930 return retval;
44931 }
44932
44933@@ -2605,6 +2624,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
44934 if (error)
44935 goto out2;
44936
44937+ if (gr_handle_chroot_pivot()) {
44938+ error = -EPERM;
44939+ goto out2;
44940+ }
44941+
44942 get_fs_root(current->fs, &root);
44943 error = lock_mount(&old);
44944 if (error)
44945diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
44946index 3db6b82..a57597e 100644
44947--- a/fs/nfs/blocklayout/blocklayout.c
44948+++ b/fs/nfs/blocklayout/blocklayout.c
44949@@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block_extent *be, sector_t isect)
44950 */
44951 struct parallel_io {
44952 struct kref refcnt;
44953- struct rpc_call_ops call_ops;
44954+ rpc_call_ops_no_const call_ops;
44955 void (*pnfs_callback) (void *data);
44956 void *data;
44957 };
44958diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
44959index 50a15fa..ca113f9 100644
44960--- a/fs/nfs/inode.c
44961+++ b/fs/nfs/inode.c
44962@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
44963 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44964 nfsi->attrtimeo_timestamp = jiffies;
44965
44966- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44967+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44968 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44969 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44970 else
44971@@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
44972 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44973 }
44974
44975-static atomic_long_t nfs_attr_generation_counter;
44976+static atomic_long_unchecked_t nfs_attr_generation_counter;
44977
44978 static unsigned long nfs_read_attr_generation_counter(void)
44979 {
44980- return atomic_long_read(&nfs_attr_generation_counter);
44981+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44982 }
44983
44984 unsigned long nfs_inc_attr_generation_counter(void)
44985 {
44986- return atomic_long_inc_return(&nfs_attr_generation_counter);
44987+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44988 }
44989
44990 void nfs_fattr_init(struct nfs_fattr *fattr)
44991diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
44992index 7a2e442..8e544cc 100644
44993--- a/fs/nfsd/vfs.c
44994+++ b/fs/nfsd/vfs.c
44995@@ -914,7 +914,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
44996 } else {
44997 oldfs = get_fs();
44998 set_fs(KERNEL_DS);
44999- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
45000+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
45001 set_fs(oldfs);
45002 }
45003
45004@@ -1018,7 +1018,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
45005
45006 /* Write the data. */
45007 oldfs = get_fs(); set_fs(KERNEL_DS);
45008- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
45009+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
45010 set_fs(oldfs);
45011 if (host_err < 0)
45012 goto out_nfserr;
45013@@ -1553,7 +1553,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
45014 */
45015
45016 oldfs = get_fs(); set_fs(KERNEL_DS);
45017- host_err = inode->i_op->readlink(dentry, buf, *lenp);
45018+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
45019 set_fs(oldfs);
45020
45021 if (host_err < 0)
45022diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
45023index 9fde1c0..14e8827 100644
45024--- a/fs/notify/fanotify/fanotify_user.c
45025+++ b/fs/notify/fanotify/fanotify_user.c
45026@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
45027 goto out_close_fd;
45028
45029 ret = -EFAULT;
45030- if (copy_to_user(buf, &fanotify_event_metadata,
45031+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
45032+ copy_to_user(buf, &fanotify_event_metadata,
45033 fanotify_event_metadata.event_len))
45034 goto out_kill_access_response;
45035
45036diff --git a/fs/notify/notification.c b/fs/notify/notification.c
45037index ee18815..7aa5d01 100644
45038--- a/fs/notify/notification.c
45039+++ b/fs/notify/notification.c
45040@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
45041 * get set to 0 so it will never get 'freed'
45042 */
45043 static struct fsnotify_event *q_overflow_event;
45044-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45045+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45046
45047 /**
45048 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
45049@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45050 */
45051 u32 fsnotify_get_cookie(void)
45052 {
45053- return atomic_inc_return(&fsnotify_sync_cookie);
45054+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
45055 }
45056 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
45057
45058diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
45059index 99e3610..02c1068 100644
45060--- a/fs/ntfs/dir.c
45061+++ b/fs/ntfs/dir.c
45062@@ -1329,7 +1329,7 @@ find_next_index_buffer:
45063 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
45064 ~(s64)(ndir->itype.index.block_size - 1)));
45065 /* Bounds checks. */
45066- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
45067+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
45068 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
45069 "inode 0x%lx or driver bug.", vdir->i_ino);
45070 goto err_out;
45071diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
45072index c587e2d..3641eaa 100644
45073--- a/fs/ntfs/file.c
45074+++ b/fs/ntfs/file.c
45075@@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
45076 #endif /* NTFS_RW */
45077 };
45078
45079-const struct file_operations ntfs_empty_file_ops = {};
45080+const struct file_operations ntfs_empty_file_ops __read_only;
45081
45082-const struct inode_operations ntfs_empty_inode_ops = {};
45083+const struct inode_operations ntfs_empty_inode_ops __read_only;
45084diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
45085index 210c352..a174f83 100644
45086--- a/fs/ocfs2/localalloc.c
45087+++ b/fs/ocfs2/localalloc.c
45088@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
45089 goto bail;
45090 }
45091
45092- atomic_inc(&osb->alloc_stats.moves);
45093+ atomic_inc_unchecked(&osb->alloc_stats.moves);
45094
45095 bail:
45096 if (handle)
45097diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
45098index d355e6e..578d905 100644
45099--- a/fs/ocfs2/ocfs2.h
45100+++ b/fs/ocfs2/ocfs2.h
45101@@ -235,11 +235,11 @@ enum ocfs2_vol_state
45102
45103 struct ocfs2_alloc_stats
45104 {
45105- atomic_t moves;
45106- atomic_t local_data;
45107- atomic_t bitmap_data;
45108- atomic_t bg_allocs;
45109- atomic_t bg_extends;
45110+ atomic_unchecked_t moves;
45111+ atomic_unchecked_t local_data;
45112+ atomic_unchecked_t bitmap_data;
45113+ atomic_unchecked_t bg_allocs;
45114+ atomic_unchecked_t bg_extends;
45115 };
45116
45117 enum ocfs2_local_alloc_state
45118diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
45119index ba5d97e..c77db25 100644
45120--- a/fs/ocfs2/suballoc.c
45121+++ b/fs/ocfs2/suballoc.c
45122@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
45123 mlog_errno(status);
45124 goto bail;
45125 }
45126- atomic_inc(&osb->alloc_stats.bg_extends);
45127+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
45128
45129 /* You should never ask for this much metadata */
45130 BUG_ON(bits_wanted >
45131@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
45132 mlog_errno(status);
45133 goto bail;
45134 }
45135- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45136+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45137
45138 *suballoc_loc = res.sr_bg_blkno;
45139 *suballoc_bit_start = res.sr_bit_offset;
45140@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
45141 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
45142 res->sr_bits);
45143
45144- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45145+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45146
45147 BUG_ON(res->sr_bits != 1);
45148
45149@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
45150 mlog_errno(status);
45151 goto bail;
45152 }
45153- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45154+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45155
45156 BUG_ON(res.sr_bits != 1);
45157
45158@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
45159 cluster_start,
45160 num_clusters);
45161 if (!status)
45162- atomic_inc(&osb->alloc_stats.local_data);
45163+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
45164 } else {
45165 if (min_clusters > (osb->bitmap_cpg - 1)) {
45166 /* The only paths asking for contiguousness
45167@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
45168 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
45169 res.sr_bg_blkno,
45170 res.sr_bit_offset);
45171- atomic_inc(&osb->alloc_stats.bitmap_data);
45172+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
45173 *num_clusters = res.sr_bits;
45174 }
45175 }
45176diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
45177index 4994f8b..eaab8eb 100644
45178--- a/fs/ocfs2/super.c
45179+++ b/fs/ocfs2/super.c
45180@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
45181 "%10s => GlobalAllocs: %d LocalAllocs: %d "
45182 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
45183 "Stats",
45184- atomic_read(&osb->alloc_stats.bitmap_data),
45185- atomic_read(&osb->alloc_stats.local_data),
45186- atomic_read(&osb->alloc_stats.bg_allocs),
45187- atomic_read(&osb->alloc_stats.moves),
45188- atomic_read(&osb->alloc_stats.bg_extends));
45189+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
45190+ atomic_read_unchecked(&osb->alloc_stats.local_data),
45191+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
45192+ atomic_read_unchecked(&osb->alloc_stats.moves),
45193+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
45194
45195 out += snprintf(buf + out, len - out,
45196 "%10s => State: %u Descriptor: %llu Size: %u bits "
45197@@ -2119,11 +2119,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
45198 spin_lock_init(&osb->osb_xattr_lock);
45199 ocfs2_init_steal_slots(osb);
45200
45201- atomic_set(&osb->alloc_stats.moves, 0);
45202- atomic_set(&osb->alloc_stats.local_data, 0);
45203- atomic_set(&osb->alloc_stats.bitmap_data, 0);
45204- atomic_set(&osb->alloc_stats.bg_allocs, 0);
45205- atomic_set(&osb->alloc_stats.bg_extends, 0);
45206+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
45207+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
45208+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
45209+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
45210+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
45211
45212 /* Copy the blockcheck stats from the superblock probe */
45213 osb->osb_ecc_stats = *stats;
45214diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
45215index 5d22872..523db20 100644
45216--- a/fs/ocfs2/symlink.c
45217+++ b/fs/ocfs2/symlink.c
45218@@ -142,7 +142,7 @@ bail:
45219
45220 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
45221 {
45222- char *link = nd_get_link(nd);
45223+ const char *link = nd_get_link(nd);
45224 if (!IS_ERR(link))
45225 kfree(link);
45226 }
45227diff --git a/fs/open.c b/fs/open.c
45228index 22c41b5..78894cf 100644
45229--- a/fs/open.c
45230+++ b/fs/open.c
45231@@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
45232 error = locks_verify_truncate(inode, NULL, length);
45233 if (!error)
45234 error = security_path_truncate(&path);
45235+
45236+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
45237+ error = -EACCES;
45238+
45239 if (!error)
45240 error = do_truncate(path.dentry, length, 0, NULL);
45241
45242@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
45243 if (__mnt_is_readonly(path.mnt))
45244 res = -EROFS;
45245
45246+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
45247+ res = -EACCES;
45248+
45249 out_path_release:
45250 path_put(&path);
45251 out:
45252@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
45253 if (error)
45254 goto dput_and_out;
45255
45256+ gr_log_chdir(path.dentry, path.mnt);
45257+
45258 set_fs_pwd(current->fs, &path);
45259
45260 dput_and_out:
45261@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
45262 goto out_putf;
45263
45264 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
45265+
45266+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
45267+ error = -EPERM;
45268+
45269+ if (!error)
45270+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
45271+
45272 if (!error)
45273 set_fs_pwd(current->fs, &file->f_path);
45274 out_putf:
45275@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
45276 if (error)
45277 goto dput_and_out;
45278
45279+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
45280+ goto dput_and_out;
45281+
45282 set_fs_root(current->fs, &path);
45283+
45284+ gr_handle_chroot_chdir(&path);
45285+
45286 error = 0;
45287 dput_and_out:
45288 path_put(&path);
45289@@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
45290 if (error)
45291 return error;
45292 mutex_lock(&inode->i_mutex);
45293+
45294+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
45295+ error = -EACCES;
45296+ goto out_unlock;
45297+ }
45298+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
45299+ error = -EACCES;
45300+ goto out_unlock;
45301+ }
45302+
45303 error = security_path_chmod(path->dentry, path->mnt, mode);
45304 if (error)
45305 goto out_unlock;
45306@@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
45307 int error;
45308 struct iattr newattrs;
45309
45310+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
45311+ return -EACCES;
45312+
45313 newattrs.ia_valid = ATTR_CTIME;
45314 if (user != (uid_t) -1) {
45315 newattrs.ia_valid |= ATTR_UID;
45316diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
45317index 6296b40..417c00f 100644
45318--- a/fs/partitions/efi.c
45319+++ b/fs/partitions/efi.c
45320@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
45321 if (!gpt)
45322 return NULL;
45323
45324+ if (!le32_to_cpu(gpt->num_partition_entries))
45325+ return NULL;
45326+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
45327+ if (!pte)
45328+ return NULL;
45329+
45330 count = le32_to_cpu(gpt->num_partition_entries) *
45331 le32_to_cpu(gpt->sizeof_partition_entry);
45332- if (!count)
45333- return NULL;
45334- pte = kzalloc(count, GFP_KERNEL);
45335- if (!pte)
45336- return NULL;
45337-
45338 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
45339 (u8 *) pte,
45340 count) < count) {
45341diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
45342index bd8ae78..539d250 100644
45343--- a/fs/partitions/ldm.c
45344+++ b/fs/partitions/ldm.c
45345@@ -1324,7 +1324,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
45346 goto found;
45347 }
45348
45349- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
45350+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
45351 if (!f) {
45352 ldm_crit ("Out of memory.");
45353 return false;
45354diff --git a/fs/pipe.c b/fs/pipe.c
45355index 4065f07..68c0706 100644
45356--- a/fs/pipe.c
45357+++ b/fs/pipe.c
45358@@ -420,9 +420,9 @@ redo:
45359 }
45360 if (bufs) /* More to do? */
45361 continue;
45362- if (!pipe->writers)
45363+ if (!atomic_read(&pipe->writers))
45364 break;
45365- if (!pipe->waiting_writers) {
45366+ if (!atomic_read(&pipe->waiting_writers)) {
45367 /* syscall merging: Usually we must not sleep
45368 * if O_NONBLOCK is set, or if we got some data.
45369 * But if a writer sleeps in kernel space, then
45370@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
45371 mutex_lock(&inode->i_mutex);
45372 pipe = inode->i_pipe;
45373
45374- if (!pipe->readers) {
45375+ if (!atomic_read(&pipe->readers)) {
45376 send_sig(SIGPIPE, current, 0);
45377 ret = -EPIPE;
45378 goto out;
45379@@ -530,7 +530,7 @@ redo1:
45380 for (;;) {
45381 int bufs;
45382
45383- if (!pipe->readers) {
45384+ if (!atomic_read(&pipe->readers)) {
45385 send_sig(SIGPIPE, current, 0);
45386 if (!ret)
45387 ret = -EPIPE;
45388@@ -616,9 +616,9 @@ redo2:
45389 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
45390 do_wakeup = 0;
45391 }
45392- pipe->waiting_writers++;
45393+ atomic_inc(&pipe->waiting_writers);
45394 pipe_wait(pipe);
45395- pipe->waiting_writers--;
45396+ atomic_dec(&pipe->waiting_writers);
45397 }
45398 out:
45399 mutex_unlock(&inode->i_mutex);
45400@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
45401 mask = 0;
45402 if (filp->f_mode & FMODE_READ) {
45403 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
45404- if (!pipe->writers && filp->f_version != pipe->w_counter)
45405+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
45406 mask |= POLLHUP;
45407 }
45408
45409@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
45410 * Most Unices do not set POLLERR for FIFOs but on Linux they
45411 * behave exactly like pipes for poll().
45412 */
45413- if (!pipe->readers)
45414+ if (!atomic_read(&pipe->readers))
45415 mask |= POLLERR;
45416 }
45417
45418@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
45419
45420 mutex_lock(&inode->i_mutex);
45421 pipe = inode->i_pipe;
45422- pipe->readers -= decr;
45423- pipe->writers -= decw;
45424+ atomic_sub(decr, &pipe->readers);
45425+ atomic_sub(decw, &pipe->writers);
45426
45427- if (!pipe->readers && !pipe->writers) {
45428+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
45429 free_pipe_info(inode);
45430 } else {
45431 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
45432@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
45433
45434 if (inode->i_pipe) {
45435 ret = 0;
45436- inode->i_pipe->readers++;
45437+ atomic_inc(&inode->i_pipe->readers);
45438 }
45439
45440 mutex_unlock(&inode->i_mutex);
45441@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
45442
45443 if (inode->i_pipe) {
45444 ret = 0;
45445- inode->i_pipe->writers++;
45446+ atomic_inc(&inode->i_pipe->writers);
45447 }
45448
45449 mutex_unlock(&inode->i_mutex);
45450@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
45451 if (inode->i_pipe) {
45452 ret = 0;
45453 if (filp->f_mode & FMODE_READ)
45454- inode->i_pipe->readers++;
45455+ atomic_inc(&inode->i_pipe->readers);
45456 if (filp->f_mode & FMODE_WRITE)
45457- inode->i_pipe->writers++;
45458+ atomic_inc(&inode->i_pipe->writers);
45459 }
45460
45461 mutex_unlock(&inode->i_mutex);
45462@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
45463 inode->i_pipe = NULL;
45464 }
45465
45466-static struct vfsmount *pipe_mnt __read_mostly;
45467+struct vfsmount *pipe_mnt __read_mostly;
45468
45469 /*
45470 * pipefs_dname() is called from d_path().
45471@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
45472 goto fail_iput;
45473 inode->i_pipe = pipe;
45474
45475- pipe->readers = pipe->writers = 1;
45476+ atomic_set(&pipe->readers, 1);
45477+ atomic_set(&pipe->writers, 1);
45478 inode->i_fop = &rdwr_pipefifo_fops;
45479
45480 /*
45481diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
45482index 15af622..0e9f4467 100644
45483--- a/fs/proc/Kconfig
45484+++ b/fs/proc/Kconfig
45485@@ -30,12 +30,12 @@ config PROC_FS
45486
45487 config PROC_KCORE
45488 bool "/proc/kcore support" if !ARM
45489- depends on PROC_FS && MMU
45490+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45491
45492 config PROC_VMCORE
45493 bool "/proc/vmcore support"
45494- depends on PROC_FS && CRASH_DUMP
45495- default y
45496+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45497+ default n
45498 help
45499 Exports the dump image of crashed kernel in ELF format.
45500
45501@@ -59,8 +59,8 @@ config PROC_SYSCTL
45502 limited in memory.
45503
45504 config PROC_PAGE_MONITOR
45505- default y
45506- depends on PROC_FS && MMU
45507+ default n
45508+ depends on PROC_FS && MMU && !GRKERNSEC
45509 bool "Enable /proc page monitoring" if EXPERT
45510 help
45511 Various /proc files exist to monitor process memory utilization:
45512diff --git a/fs/proc/array.c b/fs/proc/array.c
45513index 3a1dafd..1456746 100644
45514--- a/fs/proc/array.c
45515+++ b/fs/proc/array.c
45516@@ -60,6 +60,7 @@
45517 #include <linux/tty.h>
45518 #include <linux/string.h>
45519 #include <linux/mman.h>
45520+#include <linux/grsecurity.h>
45521 #include <linux/proc_fs.h>
45522 #include <linux/ioport.h>
45523 #include <linux/uaccess.h>
45524@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
45525 seq_putc(m, '\n');
45526 }
45527
45528+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45529+static inline void task_pax(struct seq_file *m, struct task_struct *p)
45530+{
45531+ if (p->mm)
45532+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
45533+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
45534+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
45535+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
45536+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
45537+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
45538+ else
45539+ seq_printf(m, "PaX:\t-----\n");
45540+}
45541+#endif
45542+
45543 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45544 struct pid *pid, struct task_struct *task)
45545 {
45546@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45547 task_cpus_allowed(m, task);
45548 cpuset_task_status_allowed(m, task);
45549 task_context_switch_counts(m, task);
45550+
45551+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45552+ task_pax(m, task);
45553+#endif
45554+
45555+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
45556+ task_grsec_rbac(m, task);
45557+#endif
45558+
45559 return 0;
45560 }
45561
45562+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45563+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45564+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45565+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45566+#endif
45567+
45568 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45569 struct pid *pid, struct task_struct *task, int whole)
45570 {
45571@@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45572 char tcomm[sizeof(task->comm)];
45573 unsigned long flags;
45574
45575+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45576+ if (current->exec_id != m->exec_id) {
45577+ gr_log_badprocpid("stat");
45578+ return 0;
45579+ }
45580+#endif
45581+
45582 state = *get_task_state(task);
45583 vsize = eip = esp = 0;
45584 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
45585@@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45586 gtime = task->gtime;
45587 }
45588
45589+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45590+ if (PAX_RAND_FLAGS(mm)) {
45591+ eip = 0;
45592+ esp = 0;
45593+ wchan = 0;
45594+ }
45595+#endif
45596+#ifdef CONFIG_GRKERNSEC_HIDESYM
45597+ wchan = 0;
45598+ eip =0;
45599+ esp =0;
45600+#endif
45601+
45602 /* scale priority and nice values from timeslices to -20..20 */
45603 /* to make it look like a "normal" Unix priority/nice value */
45604 priority = task_prio(task);
45605@@ -489,9 +540,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45606 vsize,
45607 mm ? get_mm_rss(mm) : 0,
45608 rsslim,
45609+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45610+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
45611+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45612+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45613+#else
45614 mm ? (permitted ? mm->start_code : 1) : 0,
45615 mm ? (permitted ? mm->end_code : 1) : 0,
45616 (permitted && mm) ? mm->start_stack : 0,
45617+#endif
45618 esp,
45619 eip,
45620 /* The signal information here is obsolete.
45621@@ -535,6 +592,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45622 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
45623 struct mm_struct *mm = get_task_mm(task);
45624
45625+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45626+ if (current->exec_id != m->exec_id) {
45627+ gr_log_badprocpid("statm");
45628+ return 0;
45629+ }
45630+#endif
45631+
45632 if (mm) {
45633 size = task_statm(mm, &shared, &text, &data, &resident);
45634 mmput(mm);
45635@@ -544,3 +608,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45636
45637 return 0;
45638 }
45639+
45640+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45641+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45642+{
45643+ u32 curr_ip = 0;
45644+ unsigned long flags;
45645+
45646+ if (lock_task_sighand(task, &flags)) {
45647+ curr_ip = task->signal->curr_ip;
45648+ unlock_task_sighand(task, &flags);
45649+ }
45650+
45651+ return sprintf(buffer, "%pI4\n", &curr_ip);
45652+}
45653+#endif
45654diff --git a/fs/proc/base.c b/fs/proc/base.c
45655index 1ace83d..f5e575d 100644
45656--- a/fs/proc/base.c
45657+++ b/fs/proc/base.c
45658@@ -107,6 +107,22 @@ struct pid_entry {
45659 union proc_op op;
45660 };
45661
45662+struct getdents_callback {
45663+ struct linux_dirent __user * current_dir;
45664+ struct linux_dirent __user * previous;
45665+ struct file * file;
45666+ int count;
45667+ int error;
45668+};
45669+
45670+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45671+ loff_t offset, u64 ino, unsigned int d_type)
45672+{
45673+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
45674+ buf->error = -EINVAL;
45675+ return 0;
45676+}
45677+
45678 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45679 .name = (NAME), \
45680 .len = sizeof(NAME) - 1, \
45681@@ -194,26 +210,6 @@ static int proc_root_link(struct inode *inode, struct path *path)
45682 return result;
45683 }
45684
45685-static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
45686-{
45687- struct mm_struct *mm;
45688- int err;
45689-
45690- err = mutex_lock_killable(&task->signal->cred_guard_mutex);
45691- if (err)
45692- return ERR_PTR(err);
45693-
45694- mm = get_task_mm(task);
45695- if (mm && mm != current->mm &&
45696- !ptrace_may_access(task, mode)) {
45697- mmput(mm);
45698- mm = ERR_PTR(-EACCES);
45699- }
45700- mutex_unlock(&task->signal->cred_guard_mutex);
45701-
45702- return mm;
45703-}
45704-
45705 struct mm_struct *mm_for_maps(struct task_struct *task)
45706 {
45707 return mm_access(task, PTRACE_MODE_READ);
45708@@ -229,6 +225,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
45709 if (!mm->arg_end)
45710 goto out_mm; /* Shh! No looking before we're done */
45711
45712+ if (gr_acl_handle_procpidmem(task))
45713+ goto out_mm;
45714+
45715 len = mm->arg_end - mm->arg_start;
45716
45717 if (len > PAGE_SIZE)
45718@@ -256,12 +255,28 @@ out:
45719 return res;
45720 }
45721
45722+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45723+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45724+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45725+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45726+#endif
45727+
45728 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45729 {
45730 struct mm_struct *mm = mm_for_maps(task);
45731 int res = PTR_ERR(mm);
45732 if (mm && !IS_ERR(mm)) {
45733 unsigned int nwords = 0;
45734+
45735+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45736+ /* allow if we're currently ptracing this task */
45737+ if (PAX_RAND_FLAGS(mm) &&
45738+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45739+ mmput(mm);
45740+ return 0;
45741+ }
45742+#endif
45743+
45744 do {
45745 nwords += 2;
45746 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45747@@ -275,7 +290,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
45748 }
45749
45750
45751-#ifdef CONFIG_KALLSYMS
45752+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45753 /*
45754 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45755 * Returns the resolved symbol. If that fails, simply return the address.
45756@@ -314,7 +329,7 @@ static void unlock_trace(struct task_struct *task)
45757 mutex_unlock(&task->signal->cred_guard_mutex);
45758 }
45759
45760-#ifdef CONFIG_STACKTRACE
45761+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45762
45763 #define MAX_STACK_TRACE_DEPTH 64
45764
45765@@ -505,7 +520,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
45766 return count;
45767 }
45768
45769-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45770+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45771 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45772 {
45773 long nr;
45774@@ -534,7 +549,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
45775 /************************************************************************/
45776
45777 /* permission checks */
45778-static int proc_fd_access_allowed(struct inode *inode)
45779+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45780 {
45781 struct task_struct *task;
45782 int allowed = 0;
45783@@ -544,7 +559,10 @@ static int proc_fd_access_allowed(struct inode *inode)
45784 */
45785 task = get_proc_task(inode);
45786 if (task) {
45787- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45788+ if (log)
45789+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45790+ else
45791+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45792 put_task_struct(task);
45793 }
45794 return allowed;
45795@@ -786,6 +804,10 @@ static int mem_open(struct inode* inode, struct file* file)
45796 file->f_mode |= FMODE_UNSIGNED_OFFSET;
45797 file->private_data = mm;
45798
45799+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45800+ file->f_version = current->exec_id;
45801+#endif
45802+
45803 return 0;
45804 }
45805
45806@@ -797,6 +819,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
45807 ssize_t copied;
45808 char *page;
45809
45810+#ifdef CONFIG_GRKERNSEC
45811+ if (write)
45812+ return -EPERM;
45813+#endif
45814+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45815+ if (file->f_version != current->exec_id) {
45816+ gr_log_badprocpid("mem");
45817+ return 0;
45818+ }
45819+#endif
45820+
45821 if (!mm)
45822 return 0;
45823
45824@@ -897,6 +930,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
45825 if (!task)
45826 goto out_no_task;
45827
45828+ if (gr_acl_handle_procpidmem(task))
45829+ goto out;
45830+
45831 ret = -ENOMEM;
45832 page = (char *)__get_free_page(GFP_TEMPORARY);
45833 if (!page)
45834@@ -1519,7 +1555,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
45835 path_put(&nd->path);
45836
45837 /* Are we allowed to snoop on the tasks file descriptors? */
45838- if (!proc_fd_access_allowed(inode))
45839+ if (!proc_fd_access_allowed(inode,0))
45840 goto out;
45841
45842 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45843@@ -1558,8 +1594,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
45844 struct path path;
45845
45846 /* Are we allowed to snoop on the tasks file descriptors? */
45847- if (!proc_fd_access_allowed(inode))
45848- goto out;
45849+ /* logging this is needed for learning on chromium to work properly,
45850+ but we don't want to flood the logs from 'ps' which does a readlink
45851+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45852+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
45853+ */
45854+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45855+ if (!proc_fd_access_allowed(inode,0))
45856+ goto out;
45857+ } else {
45858+ if (!proc_fd_access_allowed(inode,1))
45859+ goto out;
45860+ }
45861
45862 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45863 if (error)
45864@@ -1624,7 +1670,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
45865 rcu_read_lock();
45866 cred = __task_cred(task);
45867 inode->i_uid = cred->euid;
45868+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45869+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45870+#else
45871 inode->i_gid = cred->egid;
45872+#endif
45873 rcu_read_unlock();
45874 }
45875 security_task_to_inode(task, inode);
45876@@ -1642,6 +1692,9 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45877 struct inode *inode = dentry->d_inode;
45878 struct task_struct *task;
45879 const struct cred *cred;
45880+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45881+ const struct cred *tmpcred = current_cred();
45882+#endif
45883
45884 generic_fillattr(inode, stat);
45885
45886@@ -1649,13 +1702,41 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45887 stat->uid = 0;
45888 stat->gid = 0;
45889 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45890+
45891+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45892+ rcu_read_unlock();
45893+ return -ENOENT;
45894+ }
45895+
45896 if (task) {
45897+ cred = __task_cred(task);
45898+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45899+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45900+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45901+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45902+#endif
45903+ ) {
45904+#endif
45905 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45906+#ifdef CONFIG_GRKERNSEC_PROC_USER
45907+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45908+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45909+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45910+#endif
45911 task_dumpable(task)) {
45912- cred = __task_cred(task);
45913 stat->uid = cred->euid;
45914+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45915+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45916+#else
45917 stat->gid = cred->egid;
45918+#endif
45919 }
45920+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45921+ } else {
45922+ rcu_read_unlock();
45923+ return -ENOENT;
45924+ }
45925+#endif
45926 }
45927 rcu_read_unlock();
45928 return 0;
45929@@ -1692,11 +1773,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
45930
45931 if (task) {
45932 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45933+#ifdef CONFIG_GRKERNSEC_PROC_USER
45934+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45935+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45936+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45937+#endif
45938 task_dumpable(task)) {
45939 rcu_read_lock();
45940 cred = __task_cred(task);
45941 inode->i_uid = cred->euid;
45942+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45943+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45944+#else
45945 inode->i_gid = cred->egid;
45946+#endif
45947 rcu_read_unlock();
45948 } else {
45949 inode->i_uid = 0;
45950@@ -1814,7 +1904,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
45951 int fd = proc_fd(inode);
45952
45953 if (task) {
45954- files = get_files_struct(task);
45955+ if (!gr_acl_handle_procpidmem(task))
45956+ files = get_files_struct(task);
45957 put_task_struct(task);
45958 }
45959 if (files) {
45960@@ -2082,11 +2173,21 @@ static const struct file_operations proc_fd_operations = {
45961 */
45962 static int proc_fd_permission(struct inode *inode, int mask)
45963 {
45964+ struct task_struct *task;
45965 int rv = generic_permission(inode, mask);
45966- if (rv == 0)
45967- return 0;
45968+
45969 if (task_pid(current) == proc_pid(inode))
45970 rv = 0;
45971+
45972+ task = get_proc_task(inode);
45973+ if (task == NULL)
45974+ return rv;
45975+
45976+ if (gr_acl_handle_procpidmem(task))
45977+ rv = -EACCES;
45978+
45979+ put_task_struct(task);
45980+
45981 return rv;
45982 }
45983
45984@@ -2196,6 +2297,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
45985 if (!task)
45986 goto out_no_task;
45987
45988+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45989+ goto out;
45990+
45991 /*
45992 * Yes, it does not scale. And it should not. Don't add
45993 * new entries into /proc/<tgid>/ without very good reasons.
45994@@ -2240,6 +2344,9 @@ static int proc_pident_readdir(struct file *filp,
45995 if (!task)
45996 goto out_no_task;
45997
45998+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45999+ goto out;
46000+
46001 ret = 0;
46002 i = filp->f_pos;
46003 switch (i) {
46004@@ -2510,7 +2617,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
46005 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
46006 void *cookie)
46007 {
46008- char *s = nd_get_link(nd);
46009+ const char *s = nd_get_link(nd);
46010 if (!IS_ERR(s))
46011 __putname(s);
46012 }
46013@@ -2708,7 +2815,7 @@ static const struct pid_entry tgid_base_stuff[] = {
46014 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
46015 #endif
46016 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
46017-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
46018+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
46019 INF("syscall", S_IRUGO, proc_pid_syscall),
46020 #endif
46021 INF("cmdline", S_IRUGO, proc_pid_cmdline),
46022@@ -2733,10 +2840,10 @@ static const struct pid_entry tgid_base_stuff[] = {
46023 #ifdef CONFIG_SECURITY
46024 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
46025 #endif
46026-#ifdef CONFIG_KALLSYMS
46027+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46028 INF("wchan", S_IRUGO, proc_pid_wchan),
46029 #endif
46030-#ifdef CONFIG_STACKTRACE
46031+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46032 ONE("stack", S_IRUGO, proc_pid_stack),
46033 #endif
46034 #ifdef CONFIG_SCHEDSTATS
46035@@ -2770,6 +2877,9 @@ static const struct pid_entry tgid_base_stuff[] = {
46036 #ifdef CONFIG_HARDWALL
46037 INF("hardwall", S_IRUGO, proc_pid_hardwall),
46038 #endif
46039+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
46040+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
46041+#endif
46042 };
46043
46044 static int proc_tgid_base_readdir(struct file * filp,
46045@@ -2895,7 +3005,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
46046 if (!inode)
46047 goto out;
46048
46049+#ifdef CONFIG_GRKERNSEC_PROC_USER
46050+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
46051+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46052+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
46053+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
46054+#else
46055 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
46056+#endif
46057 inode->i_op = &proc_tgid_base_inode_operations;
46058 inode->i_fop = &proc_tgid_base_operations;
46059 inode->i_flags|=S_IMMUTABLE;
46060@@ -2937,7 +3054,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
46061 if (!task)
46062 goto out;
46063
46064+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
46065+ goto out_put_task;
46066+
46067 result = proc_pid_instantiate(dir, dentry, task, NULL);
46068+out_put_task:
46069 put_task_struct(task);
46070 out:
46071 return result;
46072@@ -3002,6 +3123,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
46073 {
46074 unsigned int nr;
46075 struct task_struct *reaper;
46076+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46077+ const struct cred *tmpcred = current_cred();
46078+ const struct cred *itercred;
46079+#endif
46080+ filldir_t __filldir = filldir;
46081 struct tgid_iter iter;
46082 struct pid_namespace *ns;
46083
46084@@ -3025,8 +3151,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
46085 for (iter = next_tgid(ns, iter);
46086 iter.task;
46087 iter.tgid += 1, iter = next_tgid(ns, iter)) {
46088+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46089+ rcu_read_lock();
46090+ itercred = __task_cred(iter.task);
46091+#endif
46092+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
46093+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46094+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
46095+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46096+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
46097+#endif
46098+ )
46099+#endif
46100+ )
46101+ __filldir = &gr_fake_filldir;
46102+ else
46103+ __filldir = filldir;
46104+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46105+ rcu_read_unlock();
46106+#endif
46107 filp->f_pos = iter.tgid + TGID_OFFSET;
46108- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
46109+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
46110 put_task_struct(iter.task);
46111 goto out;
46112 }
46113@@ -3054,7 +3199,7 @@ static const struct pid_entry tid_base_stuff[] = {
46114 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
46115 #endif
46116 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
46117-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
46118+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
46119 INF("syscall", S_IRUGO, proc_pid_syscall),
46120 #endif
46121 INF("cmdline", S_IRUGO, proc_pid_cmdline),
46122@@ -3078,10 +3223,10 @@ static const struct pid_entry tid_base_stuff[] = {
46123 #ifdef CONFIG_SECURITY
46124 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
46125 #endif
46126-#ifdef CONFIG_KALLSYMS
46127+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46128 INF("wchan", S_IRUGO, proc_pid_wchan),
46129 #endif
46130-#ifdef CONFIG_STACKTRACE
46131+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46132 ONE("stack", S_IRUGO, proc_pid_stack),
46133 #endif
46134 #ifdef CONFIG_SCHEDSTATS
46135diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
46136index 82676e3..5f8518a 100644
46137--- a/fs/proc/cmdline.c
46138+++ b/fs/proc/cmdline.c
46139@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
46140
46141 static int __init proc_cmdline_init(void)
46142 {
46143+#ifdef CONFIG_GRKERNSEC_PROC_ADD
46144+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
46145+#else
46146 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
46147+#endif
46148 return 0;
46149 }
46150 module_init(proc_cmdline_init);
46151diff --git a/fs/proc/devices.c b/fs/proc/devices.c
46152index b143471..bb105e5 100644
46153--- a/fs/proc/devices.c
46154+++ b/fs/proc/devices.c
46155@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
46156
46157 static int __init proc_devices_init(void)
46158 {
46159+#ifdef CONFIG_GRKERNSEC_PROC_ADD
46160+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
46161+#else
46162 proc_create("devices", 0, NULL, &proc_devinfo_operations);
46163+#endif
46164 return 0;
46165 }
46166 module_init(proc_devices_init);
46167diff --git a/fs/proc/inode.c b/fs/proc/inode.c
46168index 7737c54..7172574 100644
46169--- a/fs/proc/inode.c
46170+++ b/fs/proc/inode.c
46171@@ -18,12 +18,18 @@
46172 #include <linux/module.h>
46173 #include <linux/sysctl.h>
46174 #include <linux/slab.h>
46175+#include <linux/grsecurity.h>
46176
46177 #include <asm/system.h>
46178 #include <asm/uaccess.h>
46179
46180 #include "internal.h"
46181
46182+#ifdef CONFIG_PROC_SYSCTL
46183+extern const struct inode_operations proc_sys_inode_operations;
46184+extern const struct inode_operations proc_sys_dir_operations;
46185+#endif
46186+
46187 static void proc_evict_inode(struct inode *inode)
46188 {
46189 struct proc_dir_entry *de;
46190@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
46191 ns_ops = PROC_I(inode)->ns_ops;
46192 if (ns_ops && ns_ops->put)
46193 ns_ops->put(PROC_I(inode)->ns);
46194+
46195+#ifdef CONFIG_PROC_SYSCTL
46196+ if (inode->i_op == &proc_sys_inode_operations ||
46197+ inode->i_op == &proc_sys_dir_operations)
46198+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
46199+#endif
46200+
46201 }
46202
46203 static struct kmem_cache * proc_inode_cachep;
46204@@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
46205 if (de->mode) {
46206 inode->i_mode = de->mode;
46207 inode->i_uid = de->uid;
46208+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46209+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
46210+#else
46211 inode->i_gid = de->gid;
46212+#endif
46213 }
46214 if (de->size)
46215 inode->i_size = de->size;
46216diff --git a/fs/proc/internal.h b/fs/proc/internal.h
46217index 7838e5c..ff92cbc 100644
46218--- a/fs/proc/internal.h
46219+++ b/fs/proc/internal.h
46220@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
46221 struct pid *pid, struct task_struct *task);
46222 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
46223 struct pid *pid, struct task_struct *task);
46224+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
46225+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
46226+#endif
46227 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
46228
46229 extern const struct file_operations proc_maps_operations;
46230diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
46231index d245cb2..f4e8498 100644
46232--- a/fs/proc/kcore.c
46233+++ b/fs/proc/kcore.c
46234@@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46235 * the addresses in the elf_phdr on our list.
46236 */
46237 start = kc_offset_to_vaddr(*fpos - elf_buflen);
46238- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
46239+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
46240+ if (tsz > buflen)
46241 tsz = buflen;
46242-
46243+
46244 while (buflen) {
46245 struct kcore_list *m;
46246
46247@@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46248 kfree(elf_buf);
46249 } else {
46250 if (kern_addr_valid(start)) {
46251- unsigned long n;
46252+ char *elf_buf;
46253+ mm_segment_t oldfs;
46254
46255- n = copy_to_user(buffer, (char *)start, tsz);
46256- /*
46257- * We cannot distingush between fault on source
46258- * and fault on destination. When this happens
46259- * we clear too and hope it will trigger the
46260- * EFAULT again.
46261- */
46262- if (n) {
46263- if (clear_user(buffer + tsz - n,
46264- n))
46265+ elf_buf = kmalloc(tsz, GFP_KERNEL);
46266+ if (!elf_buf)
46267+ return -ENOMEM;
46268+ oldfs = get_fs();
46269+ set_fs(KERNEL_DS);
46270+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
46271+ set_fs(oldfs);
46272+ if (copy_to_user(buffer, elf_buf, tsz)) {
46273+ kfree(elf_buf);
46274 return -EFAULT;
46275+ }
46276 }
46277+ set_fs(oldfs);
46278+ kfree(elf_buf);
46279 } else {
46280 if (clear_user(buffer, tsz))
46281 return -EFAULT;
46282@@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46283
46284 static int open_kcore(struct inode *inode, struct file *filp)
46285 {
46286+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
46287+ return -EPERM;
46288+#endif
46289 if (!capable(CAP_SYS_RAWIO))
46290 return -EPERM;
46291 if (kcore_need_update)
46292diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
46293index 80e4645..53e5fcf 100644
46294--- a/fs/proc/meminfo.c
46295+++ b/fs/proc/meminfo.c
46296@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
46297 vmi.used >> 10,
46298 vmi.largest_chunk >> 10
46299 #ifdef CONFIG_MEMORY_FAILURE
46300- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
46301+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
46302 #endif
46303 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
46304 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
46305diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
46306index b1822dd..df622cb 100644
46307--- a/fs/proc/nommu.c
46308+++ b/fs/proc/nommu.c
46309@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
46310 if (len < 1)
46311 len = 1;
46312 seq_printf(m, "%*c", len, ' ');
46313- seq_path(m, &file->f_path, "");
46314+ seq_path(m, &file->f_path, "\n\\");
46315 }
46316
46317 seq_putc(m, '\n');
46318diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
46319index f738024..876984a 100644
46320--- a/fs/proc/proc_net.c
46321+++ b/fs/proc/proc_net.c
46322@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
46323 struct task_struct *task;
46324 struct nsproxy *ns;
46325 struct net *net = NULL;
46326+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46327+ const struct cred *cred = current_cred();
46328+#endif
46329+
46330+#ifdef CONFIG_GRKERNSEC_PROC_USER
46331+ if (cred->fsuid)
46332+ return net;
46333+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46334+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
46335+ return net;
46336+#endif
46337
46338 rcu_read_lock();
46339 task = pid_task(proc_pid(dir), PIDTYPE_PID);
46340diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
46341index a6b6217..1e0579d 100644
46342--- a/fs/proc/proc_sysctl.c
46343+++ b/fs/proc/proc_sysctl.c
46344@@ -9,11 +9,13 @@
46345 #include <linux/namei.h>
46346 #include "internal.h"
46347
46348+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
46349+
46350 static const struct dentry_operations proc_sys_dentry_operations;
46351 static const struct file_operations proc_sys_file_operations;
46352-static const struct inode_operations proc_sys_inode_operations;
46353+const struct inode_operations proc_sys_inode_operations;
46354 static const struct file_operations proc_sys_dir_file_operations;
46355-static const struct inode_operations proc_sys_dir_operations;
46356+const struct inode_operations proc_sys_dir_operations;
46357
46358 void proc_sys_poll_notify(struct ctl_table_poll *poll)
46359 {
46360@@ -131,8 +133,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
46361
46362 err = NULL;
46363 d_set_d_op(dentry, &proc_sys_dentry_operations);
46364+
46365+ gr_handle_proc_create(dentry, inode);
46366+
46367 d_add(dentry, inode);
46368
46369+ if (gr_handle_sysctl(p, MAY_EXEC))
46370+ err = ERR_PTR(-ENOENT);
46371+
46372 out:
46373 sysctl_head_finish(head);
46374 return err;
46375@@ -163,6 +171,12 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
46376 if (!table->proc_handler)
46377 goto out;
46378
46379+#ifdef CONFIG_GRKERNSEC
46380+ error = -EPERM;
46381+ if (write && !capable(CAP_SYS_ADMIN))
46382+ goto out;
46383+#endif
46384+
46385 /* careful: calling conventions are nasty here */
46386 res = count;
46387 error = table->proc_handler(table, write, buf, &res, ppos);
46388@@ -245,6 +259,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
46389 return -ENOMEM;
46390 } else {
46391 d_set_d_op(child, &proc_sys_dentry_operations);
46392+
46393+ gr_handle_proc_create(child, inode);
46394+
46395 d_add(child, inode);
46396 }
46397 } else {
46398@@ -273,6 +290,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
46399 if (*pos < file->f_pos)
46400 continue;
46401
46402+ if (gr_handle_sysctl(table, 0))
46403+ continue;
46404+
46405 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
46406 if (res)
46407 return res;
46408@@ -398,6 +418,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
46409 if (IS_ERR(head))
46410 return PTR_ERR(head);
46411
46412+ if (table && gr_handle_sysctl(table, MAY_EXEC))
46413+ return -ENOENT;
46414+
46415 generic_fillattr(inode, stat);
46416 if (table)
46417 stat->mode = (stat->mode & S_IFMT) | table->mode;
46418@@ -420,13 +443,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
46419 .llseek = generic_file_llseek,
46420 };
46421
46422-static const struct inode_operations proc_sys_inode_operations = {
46423+const struct inode_operations proc_sys_inode_operations = {
46424 .permission = proc_sys_permission,
46425 .setattr = proc_sys_setattr,
46426 .getattr = proc_sys_getattr,
46427 };
46428
46429-static const struct inode_operations proc_sys_dir_operations = {
46430+const struct inode_operations proc_sys_dir_operations = {
46431 .lookup = proc_sys_lookup,
46432 .permission = proc_sys_permission,
46433 .setattr = proc_sys_setattr,
46434diff --git a/fs/proc/root.c b/fs/proc/root.c
46435index 03102d9..4ae347e 100644
46436--- a/fs/proc/root.c
46437+++ b/fs/proc/root.c
46438@@ -121,7 +121,15 @@ void __init proc_root_init(void)
46439 #ifdef CONFIG_PROC_DEVICETREE
46440 proc_device_tree_init();
46441 #endif
46442+#ifdef CONFIG_GRKERNSEC_PROC_ADD
46443+#ifdef CONFIG_GRKERNSEC_PROC_USER
46444+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
46445+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46446+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
46447+#endif
46448+#else
46449 proc_mkdir("bus", NULL);
46450+#endif
46451 proc_sys_init();
46452 }
46453
46454diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
46455index 7dcd2a2..b2f410e 100644
46456--- a/fs/proc/task_mmu.c
46457+++ b/fs/proc/task_mmu.c
46458@@ -11,6 +11,7 @@
46459 #include <linux/rmap.h>
46460 #include <linux/swap.h>
46461 #include <linux/swapops.h>
46462+#include <linux/grsecurity.h>
46463
46464 #include <asm/elf.h>
46465 #include <asm/uaccess.h>
46466@@ -52,8 +53,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46467 "VmExe:\t%8lu kB\n"
46468 "VmLib:\t%8lu kB\n"
46469 "VmPTE:\t%8lu kB\n"
46470- "VmSwap:\t%8lu kB\n",
46471- hiwater_vm << (PAGE_SHIFT-10),
46472+ "VmSwap:\t%8lu kB\n"
46473+
46474+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46475+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
46476+#endif
46477+
46478+ ,hiwater_vm << (PAGE_SHIFT-10),
46479 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
46480 mm->locked_vm << (PAGE_SHIFT-10),
46481 mm->pinned_vm << (PAGE_SHIFT-10),
46482@@ -62,7 +68,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46483 data << (PAGE_SHIFT-10),
46484 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
46485 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
46486- swap << (PAGE_SHIFT-10));
46487+ swap << (PAGE_SHIFT-10)
46488+
46489+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46490+ , mm->context.user_cs_base, mm->context.user_cs_limit
46491+#endif
46492+
46493+ );
46494 }
46495
46496 unsigned long task_vsize(struct mm_struct *mm)
46497@@ -209,6 +221,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
46498 return ret;
46499 }
46500
46501+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46502+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46503+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
46504+ _mm->pax_flags & MF_PAX_SEGMEXEC))
46505+#endif
46506+
46507 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46508 {
46509 struct mm_struct *mm = vma->vm_mm;
46510@@ -227,13 +245,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46511 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
46512 }
46513
46514- /* We don't show the stack guard page in /proc/maps */
46515+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46516+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
46517+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
46518+#else
46519 start = vma->vm_start;
46520- if (stack_guard_page_start(vma, start))
46521- start += PAGE_SIZE;
46522 end = vma->vm_end;
46523- if (stack_guard_page_end(vma, end))
46524- end -= PAGE_SIZE;
46525+#endif
46526
46527 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
46528 start,
46529@@ -242,7 +260,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46530 flags & VM_WRITE ? 'w' : '-',
46531 flags & VM_EXEC ? 'x' : '-',
46532 flags & VM_MAYSHARE ? 's' : 'p',
46533+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46534+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
46535+#else
46536 pgoff,
46537+#endif
46538 MAJOR(dev), MINOR(dev), ino, &len);
46539
46540 /*
46541@@ -251,7 +273,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46542 */
46543 if (file) {
46544 pad_len_spaces(m, len);
46545- seq_path(m, &file->f_path, "\n");
46546+ seq_path(m, &file->f_path, "\n\\");
46547 } else {
46548 const char *name = arch_vma_name(vma);
46549 if (!name) {
46550@@ -259,8 +281,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46551 if (vma->vm_start <= mm->brk &&
46552 vma->vm_end >= mm->start_brk) {
46553 name = "[heap]";
46554- } else if (vma->vm_start <= mm->start_stack &&
46555- vma->vm_end >= mm->start_stack) {
46556+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
46557+ (vma->vm_start <= mm->start_stack &&
46558+ vma->vm_end >= mm->start_stack)) {
46559 name = "[stack]";
46560 }
46561 } else {
46562@@ -281,6 +304,13 @@ static int show_map(struct seq_file *m, void *v)
46563 struct proc_maps_private *priv = m->private;
46564 struct task_struct *task = priv->task;
46565
46566+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46567+ if (current->exec_id != m->exec_id) {
46568+ gr_log_badprocpid("maps");
46569+ return 0;
46570+ }
46571+#endif
46572+
46573 show_map_vma(m, vma);
46574
46575 if (m->count < m->size) /* vma is copied successfully */
46576@@ -434,12 +464,23 @@ static int show_smap(struct seq_file *m, void *v)
46577 .private = &mss,
46578 };
46579
46580+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46581+ if (current->exec_id != m->exec_id) {
46582+ gr_log_badprocpid("smaps");
46583+ return 0;
46584+ }
46585+#endif
46586 memset(&mss, 0, sizeof mss);
46587- mss.vma = vma;
46588- /* mmap_sem is held in m_start */
46589- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46590- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46591-
46592+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46593+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
46594+#endif
46595+ mss.vma = vma;
46596+ /* mmap_sem is held in m_start */
46597+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46598+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46599+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46600+ }
46601+#endif
46602 show_map_vma(m, vma);
46603
46604 seq_printf(m,
46605@@ -457,7 +498,11 @@ static int show_smap(struct seq_file *m, void *v)
46606 "KernelPageSize: %8lu kB\n"
46607 "MMUPageSize: %8lu kB\n"
46608 "Locked: %8lu kB\n",
46609+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46610+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
46611+#else
46612 (vma->vm_end - vma->vm_start) >> 10,
46613+#endif
46614 mss.resident >> 10,
46615 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
46616 mss.shared_clean >> 10,
46617@@ -1015,6 +1060,13 @@ static int show_numa_map(struct seq_file *m, void *v)
46618 int n;
46619 char buffer[50];
46620
46621+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46622+ if (current->exec_id != m->exec_id) {
46623+ gr_log_badprocpid("numa_maps");
46624+ return 0;
46625+ }
46626+#endif
46627+
46628 if (!mm)
46629 return 0;
46630
46631@@ -1032,11 +1084,15 @@ static int show_numa_map(struct seq_file *m, void *v)
46632 mpol_to_str(buffer, sizeof(buffer), pol, 0);
46633 mpol_cond_put(pol);
46634
46635+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46636+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
46637+#else
46638 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
46639+#endif
46640
46641 if (file) {
46642 seq_printf(m, " file=");
46643- seq_path(m, &file->f_path, "\n\t= ");
46644+ seq_path(m, &file->f_path, "\n\t\\= ");
46645 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
46646 seq_printf(m, " heap");
46647 } else if (vma->vm_start <= mm->start_stack &&
46648diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
46649index 980de54..2a4db5f 100644
46650--- a/fs/proc/task_nommu.c
46651+++ b/fs/proc/task_nommu.c
46652@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46653 else
46654 bytes += kobjsize(mm);
46655
46656- if (current->fs && current->fs->users > 1)
46657+ if (current->fs && atomic_read(&current->fs->users) > 1)
46658 sbytes += kobjsize(current->fs);
46659 else
46660 bytes += kobjsize(current->fs);
46661@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
46662
46663 if (file) {
46664 pad_len_spaces(m, len);
46665- seq_path(m, &file->f_path, "");
46666+ seq_path(m, &file->f_path, "\n\\");
46667 } else if (mm) {
46668 if (vma->vm_start <= mm->start_stack &&
46669 vma->vm_end >= mm->start_stack) {
46670diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
46671index d67908b..d13f6a6 100644
46672--- a/fs/quota/netlink.c
46673+++ b/fs/quota/netlink.c
46674@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
46675 void quota_send_warning(short type, unsigned int id, dev_t dev,
46676 const char warntype)
46677 {
46678- static atomic_t seq;
46679+ static atomic_unchecked_t seq;
46680 struct sk_buff *skb;
46681 void *msg_head;
46682 int ret;
46683@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
46684 "VFS: Not enough memory to send quota warning.\n");
46685 return;
46686 }
46687- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
46688+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
46689 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
46690 if (!msg_head) {
46691 printk(KERN_ERR
46692diff --git a/fs/readdir.c b/fs/readdir.c
46693index 356f715..c918d38 100644
46694--- a/fs/readdir.c
46695+++ b/fs/readdir.c
46696@@ -17,6 +17,7 @@
46697 #include <linux/security.h>
46698 #include <linux/syscalls.h>
46699 #include <linux/unistd.h>
46700+#include <linux/namei.h>
46701
46702 #include <asm/uaccess.h>
46703
46704@@ -67,6 +68,7 @@ struct old_linux_dirent {
46705
46706 struct readdir_callback {
46707 struct old_linux_dirent __user * dirent;
46708+ struct file * file;
46709 int result;
46710 };
46711
46712@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
46713 buf->result = -EOVERFLOW;
46714 return -EOVERFLOW;
46715 }
46716+
46717+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46718+ return 0;
46719+
46720 buf->result++;
46721 dirent = buf->dirent;
46722 if (!access_ok(VERIFY_WRITE, dirent,
46723@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
46724
46725 buf.result = 0;
46726 buf.dirent = dirent;
46727+ buf.file = file;
46728
46729 error = vfs_readdir(file, fillonedir, &buf);
46730 if (buf.result)
46731@@ -142,6 +149,7 @@ struct linux_dirent {
46732 struct getdents_callback {
46733 struct linux_dirent __user * current_dir;
46734 struct linux_dirent __user * previous;
46735+ struct file * file;
46736 int count;
46737 int error;
46738 };
46739@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
46740 buf->error = -EOVERFLOW;
46741 return -EOVERFLOW;
46742 }
46743+
46744+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46745+ return 0;
46746+
46747 dirent = buf->previous;
46748 if (dirent) {
46749 if (__put_user(offset, &dirent->d_off))
46750@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
46751 buf.previous = NULL;
46752 buf.count = count;
46753 buf.error = 0;
46754+ buf.file = file;
46755
46756 error = vfs_readdir(file, filldir, &buf);
46757 if (error >= 0)
46758@@ -229,6 +242,7 @@ out:
46759 struct getdents_callback64 {
46760 struct linux_dirent64 __user * current_dir;
46761 struct linux_dirent64 __user * previous;
46762+ struct file *file;
46763 int count;
46764 int error;
46765 };
46766@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
46767 buf->error = -EINVAL; /* only used if we fail.. */
46768 if (reclen > buf->count)
46769 return -EINVAL;
46770+
46771+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46772+ return 0;
46773+
46774 dirent = buf->previous;
46775 if (dirent) {
46776 if (__put_user(offset, &dirent->d_off))
46777@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46778
46779 buf.current_dir = dirent;
46780 buf.previous = NULL;
46781+ buf.file = file;
46782 buf.count = count;
46783 buf.error = 0;
46784
46785@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46786 error = buf.error;
46787 lastdirent = buf.previous;
46788 if (lastdirent) {
46789- typeof(lastdirent->d_off) d_off = file->f_pos;
46790+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
46791 if (__put_user(d_off, &lastdirent->d_off))
46792 error = -EFAULT;
46793 else
46794diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
46795index 60c0804..d814f98 100644
46796--- a/fs/reiserfs/do_balan.c
46797+++ b/fs/reiserfs/do_balan.c
46798@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
46799 return;
46800 }
46801
46802- atomic_inc(&(fs_generation(tb->tb_sb)));
46803+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46804 do_balance_starts(tb);
46805
46806 /* balance leaf returns 0 except if combining L R and S into
46807diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
46808index 7a99811..a7c96c4 100644
46809--- a/fs/reiserfs/procfs.c
46810+++ b/fs/reiserfs/procfs.c
46811@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
46812 "SMALL_TAILS " : "NO_TAILS ",
46813 replay_only(sb) ? "REPLAY_ONLY " : "",
46814 convert_reiserfs(sb) ? "CONV " : "",
46815- atomic_read(&r->s_generation_counter),
46816+ atomic_read_unchecked(&r->s_generation_counter),
46817 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46818 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46819 SF(s_good_search_by_key_reada), SF(s_bmaps),
46820diff --git a/fs/select.c b/fs/select.c
46821index d33418f..2a5345e 100644
46822--- a/fs/select.c
46823+++ b/fs/select.c
46824@@ -20,6 +20,7 @@
46825 #include <linux/module.h>
46826 #include <linux/slab.h>
46827 #include <linux/poll.h>
46828+#include <linux/security.h>
46829 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46830 #include <linux/file.h>
46831 #include <linux/fdtable.h>
46832@@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
46833 struct poll_list *walk = head;
46834 unsigned long todo = nfds;
46835
46836+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46837 if (nfds > rlimit(RLIMIT_NOFILE))
46838 return -EINVAL;
46839
46840diff --git a/fs/seq_file.c b/fs/seq_file.c
46841index dba43c3..9fb8511 100644
46842--- a/fs/seq_file.c
46843+++ b/fs/seq_file.c
46844@@ -9,6 +9,7 @@
46845 #include <linux/module.h>
46846 #include <linux/seq_file.h>
46847 #include <linux/slab.h>
46848+#include <linux/sched.h>
46849
46850 #include <asm/uaccess.h>
46851 #include <asm/page.h>
46852@@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
46853 memset(p, 0, sizeof(*p));
46854 mutex_init(&p->lock);
46855 p->op = op;
46856+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46857+ p->exec_id = current->exec_id;
46858+#endif
46859
46860 /*
46861 * Wrappers around seq_open(e.g. swaps_open) need to be
46862@@ -76,7 +80,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46863 return 0;
46864 }
46865 if (!m->buf) {
46866- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46867+ m->size = PAGE_SIZE;
46868+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46869 if (!m->buf)
46870 return -ENOMEM;
46871 }
46872@@ -116,7 +121,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46873 Eoverflow:
46874 m->op->stop(m, p);
46875 kfree(m->buf);
46876- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46877+ m->size <<= 1;
46878+ m->buf = kmalloc(m->size, GFP_KERNEL);
46879 return !m->buf ? -ENOMEM : -EAGAIN;
46880 }
46881
46882@@ -169,7 +175,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46883 m->version = file->f_version;
46884 /* grab buffer if we didn't have one */
46885 if (!m->buf) {
46886- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46887+ m->size = PAGE_SIZE;
46888+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46889 if (!m->buf)
46890 goto Enomem;
46891 }
46892@@ -210,7 +217,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46893 goto Fill;
46894 m->op->stop(m, p);
46895 kfree(m->buf);
46896- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46897+ m->size <<= 1;
46898+ m->buf = kmalloc(m->size, GFP_KERNEL);
46899 if (!m->buf)
46900 goto Enomem;
46901 m->count = 0;
46902@@ -549,7 +557,7 @@ static void single_stop(struct seq_file *p, void *v)
46903 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
46904 void *data)
46905 {
46906- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
46907+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
46908 int res = -ENOMEM;
46909
46910 if (op) {
46911diff --git a/fs/splice.c b/fs/splice.c
46912index fa2defa..8601650 100644
46913--- a/fs/splice.c
46914+++ b/fs/splice.c
46915@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46916 pipe_lock(pipe);
46917
46918 for (;;) {
46919- if (!pipe->readers) {
46920+ if (!atomic_read(&pipe->readers)) {
46921 send_sig(SIGPIPE, current, 0);
46922 if (!ret)
46923 ret = -EPIPE;
46924@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46925 do_wakeup = 0;
46926 }
46927
46928- pipe->waiting_writers++;
46929+ atomic_inc(&pipe->waiting_writers);
46930 pipe_wait(pipe);
46931- pipe->waiting_writers--;
46932+ atomic_dec(&pipe->waiting_writers);
46933 }
46934
46935 pipe_unlock(pipe);
46936@@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
46937 old_fs = get_fs();
46938 set_fs(get_ds());
46939 /* The cast to a user pointer is valid due to the set_fs() */
46940- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46941+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
46942 set_fs(old_fs);
46943
46944 return res;
46945@@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
46946 old_fs = get_fs();
46947 set_fs(get_ds());
46948 /* The cast to a user pointer is valid due to the set_fs() */
46949- res = vfs_write(file, (const char __user *)buf, count, &pos);
46950+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
46951 set_fs(old_fs);
46952
46953 return res;
46954@@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
46955 goto err;
46956
46957 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46958- vec[i].iov_base = (void __user *) page_address(page);
46959+ vec[i].iov_base = (void __force_user *) page_address(page);
46960 vec[i].iov_len = this_len;
46961 spd.pages[i] = page;
46962 spd.nr_pages++;
46963@@ -846,10 +846,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46964 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46965 {
46966 while (!pipe->nrbufs) {
46967- if (!pipe->writers)
46968+ if (!atomic_read(&pipe->writers))
46969 return 0;
46970
46971- if (!pipe->waiting_writers && sd->num_spliced)
46972+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46973 return 0;
46974
46975 if (sd->flags & SPLICE_F_NONBLOCK)
46976@@ -1182,7 +1182,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
46977 * out of the pipe right after the splice_to_pipe(). So set
46978 * PIPE_READERS appropriately.
46979 */
46980- pipe->readers = 1;
46981+ atomic_set(&pipe->readers, 1);
46982
46983 current->splice_pipe = pipe;
46984 }
46985@@ -1734,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46986 ret = -ERESTARTSYS;
46987 break;
46988 }
46989- if (!pipe->writers)
46990+ if (!atomic_read(&pipe->writers))
46991 break;
46992- if (!pipe->waiting_writers) {
46993+ if (!atomic_read(&pipe->waiting_writers)) {
46994 if (flags & SPLICE_F_NONBLOCK) {
46995 ret = -EAGAIN;
46996 break;
46997@@ -1768,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46998 pipe_lock(pipe);
46999
47000 while (pipe->nrbufs >= pipe->buffers) {
47001- if (!pipe->readers) {
47002+ if (!atomic_read(&pipe->readers)) {
47003 send_sig(SIGPIPE, current, 0);
47004 ret = -EPIPE;
47005 break;
47006@@ -1781,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
47007 ret = -ERESTARTSYS;
47008 break;
47009 }
47010- pipe->waiting_writers++;
47011+ atomic_inc(&pipe->waiting_writers);
47012 pipe_wait(pipe);
47013- pipe->waiting_writers--;
47014+ atomic_dec(&pipe->waiting_writers);
47015 }
47016
47017 pipe_unlock(pipe);
47018@@ -1819,14 +1819,14 @@ retry:
47019 pipe_double_lock(ipipe, opipe);
47020
47021 do {
47022- if (!opipe->readers) {
47023+ if (!atomic_read(&opipe->readers)) {
47024 send_sig(SIGPIPE, current, 0);
47025 if (!ret)
47026 ret = -EPIPE;
47027 break;
47028 }
47029
47030- if (!ipipe->nrbufs && !ipipe->writers)
47031+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
47032 break;
47033
47034 /*
47035@@ -1923,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
47036 pipe_double_lock(ipipe, opipe);
47037
47038 do {
47039- if (!opipe->readers) {
47040+ if (!atomic_read(&opipe->readers)) {
47041 send_sig(SIGPIPE, current, 0);
47042 if (!ret)
47043 ret = -EPIPE;
47044@@ -1968,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
47045 * return EAGAIN if we have the potential of some data in the
47046 * future, otherwise just return 0
47047 */
47048- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
47049+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
47050 ret = -EAGAIN;
47051
47052 pipe_unlock(ipipe);
47053diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
47054index 7fdf6a7..e6cd8ad 100644
47055--- a/fs/sysfs/dir.c
47056+++ b/fs/sysfs/dir.c
47057@@ -642,6 +642,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
47058 struct sysfs_dirent *sd;
47059 int rc;
47060
47061+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
47062+ const char *parent_name = parent_sd->s_name;
47063+
47064+ mode = S_IFDIR | S_IRWXU;
47065+
47066+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
47067+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
47068+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
47069+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
47070+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
47071+#endif
47072+
47073 /* allocate */
47074 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
47075 if (!sd)
47076diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
47077index 779789a..f58193c 100644
47078--- a/fs/sysfs/file.c
47079+++ b/fs/sysfs/file.c
47080@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
47081
47082 struct sysfs_open_dirent {
47083 atomic_t refcnt;
47084- atomic_t event;
47085+ atomic_unchecked_t event;
47086 wait_queue_head_t poll;
47087 struct list_head buffers; /* goes through sysfs_buffer.list */
47088 };
47089@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
47090 if (!sysfs_get_active(attr_sd))
47091 return -ENODEV;
47092
47093- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
47094+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
47095 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
47096
47097 sysfs_put_active(attr_sd);
47098@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
47099 return -ENOMEM;
47100
47101 atomic_set(&new_od->refcnt, 0);
47102- atomic_set(&new_od->event, 1);
47103+ atomic_set_unchecked(&new_od->event, 1);
47104 init_waitqueue_head(&new_od->poll);
47105 INIT_LIST_HEAD(&new_od->buffers);
47106 goto retry;
47107@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
47108
47109 sysfs_put_active(attr_sd);
47110
47111- if (buffer->event != atomic_read(&od->event))
47112+ if (buffer->event != atomic_read_unchecked(&od->event))
47113 goto trigger;
47114
47115 return DEFAULT_POLLMASK;
47116@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
47117
47118 od = sd->s_attr.open;
47119 if (od) {
47120- atomic_inc(&od->event);
47121+ atomic_inc_unchecked(&od->event);
47122 wake_up_interruptible(&od->poll);
47123 }
47124
47125diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
47126index a7ac78f..02158e1 100644
47127--- a/fs/sysfs/symlink.c
47128+++ b/fs/sysfs/symlink.c
47129@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
47130
47131 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
47132 {
47133- char *page = nd_get_link(nd);
47134+ const char *page = nd_get_link(nd);
47135 if (!IS_ERR(page))
47136 free_page((unsigned long)page);
47137 }
47138diff --git a/fs/udf/misc.c b/fs/udf/misc.c
47139index c175b4d..8f36a16 100644
47140--- a/fs/udf/misc.c
47141+++ b/fs/udf/misc.c
47142@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
47143
47144 u8 udf_tag_checksum(const struct tag *t)
47145 {
47146- u8 *data = (u8 *)t;
47147+ const u8 *data = (const u8 *)t;
47148 u8 checksum = 0;
47149 int i;
47150 for (i = 0; i < sizeof(struct tag); ++i)
47151diff --git a/fs/utimes.c b/fs/utimes.c
47152index ba653f3..06ea4b1 100644
47153--- a/fs/utimes.c
47154+++ b/fs/utimes.c
47155@@ -1,6 +1,7 @@
47156 #include <linux/compiler.h>
47157 #include <linux/file.h>
47158 #include <linux/fs.h>
47159+#include <linux/security.h>
47160 #include <linux/linkage.h>
47161 #include <linux/mount.h>
47162 #include <linux/namei.h>
47163@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
47164 goto mnt_drop_write_and_out;
47165 }
47166 }
47167+
47168+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
47169+ error = -EACCES;
47170+ goto mnt_drop_write_and_out;
47171+ }
47172+
47173 mutex_lock(&inode->i_mutex);
47174 error = notify_change(path->dentry, &newattrs);
47175 mutex_unlock(&inode->i_mutex);
47176diff --git a/fs/xattr.c b/fs/xattr.c
47177index 67583de..c5aad14 100644
47178--- a/fs/xattr.c
47179+++ b/fs/xattr.c
47180@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
47181 * Extended attribute SET operations
47182 */
47183 static long
47184-setxattr(struct dentry *d, const char __user *name, const void __user *value,
47185+setxattr(struct path *path, const char __user *name, const void __user *value,
47186 size_t size, int flags)
47187 {
47188 int error;
47189@@ -339,7 +339,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
47190 return PTR_ERR(kvalue);
47191 }
47192
47193- error = vfs_setxattr(d, kname, kvalue, size, flags);
47194+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
47195+ error = -EACCES;
47196+ goto out;
47197+ }
47198+
47199+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
47200+out:
47201 kfree(kvalue);
47202 return error;
47203 }
47204@@ -356,7 +362,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
47205 return error;
47206 error = mnt_want_write(path.mnt);
47207 if (!error) {
47208- error = setxattr(path.dentry, name, value, size, flags);
47209+ error = setxattr(&path, name, value, size, flags);
47210 mnt_drop_write(path.mnt);
47211 }
47212 path_put(&path);
47213@@ -375,7 +381,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
47214 return error;
47215 error = mnt_want_write(path.mnt);
47216 if (!error) {
47217- error = setxattr(path.dentry, name, value, size, flags);
47218+ error = setxattr(&path, name, value, size, flags);
47219 mnt_drop_write(path.mnt);
47220 }
47221 path_put(&path);
47222@@ -386,17 +392,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
47223 const void __user *,value, size_t, size, int, flags)
47224 {
47225 struct file *f;
47226- struct dentry *dentry;
47227 int error = -EBADF;
47228
47229 f = fget(fd);
47230 if (!f)
47231 return error;
47232- dentry = f->f_path.dentry;
47233- audit_inode(NULL, dentry);
47234+ audit_inode(NULL, f->f_path.dentry);
47235 error = mnt_want_write_file(f);
47236 if (!error) {
47237- error = setxattr(dentry, name, value, size, flags);
47238+ error = setxattr(&f->f_path, name, value, size, flags);
47239 mnt_drop_write(f->f_path.mnt);
47240 }
47241 fput(f);
47242diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
47243index 8d5a506..7f62712 100644
47244--- a/fs/xattr_acl.c
47245+++ b/fs/xattr_acl.c
47246@@ -17,8 +17,8 @@
47247 struct posix_acl *
47248 posix_acl_from_xattr(const void *value, size_t size)
47249 {
47250- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
47251- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
47252+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
47253+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
47254 int count;
47255 struct posix_acl *acl;
47256 struct posix_acl_entry *acl_e;
47257diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
47258index d0ab788..827999b 100644
47259--- a/fs/xfs/xfs_bmap.c
47260+++ b/fs/xfs/xfs_bmap.c
47261@@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
47262 int nmap,
47263 int ret_nmap);
47264 #else
47265-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
47266+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
47267 #endif /* DEBUG */
47268
47269 STATIC int
47270diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
47271index 79d05e8..e3e5861 100644
47272--- a/fs/xfs/xfs_dir2_sf.c
47273+++ b/fs/xfs/xfs_dir2_sf.c
47274@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
47275 }
47276
47277 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
47278- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47279+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
47280+ char name[sfep->namelen];
47281+ memcpy(name, sfep->name, sfep->namelen);
47282+ if (filldir(dirent, name, sfep->namelen,
47283+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
47284+ *offset = off & 0x7fffffff;
47285+ return 0;
47286+ }
47287+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47288 off & 0x7fffffff, ino, DT_UNKNOWN)) {
47289 *offset = off & 0x7fffffff;
47290 return 0;
47291diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
47292index d99a905..9f88202 100644
47293--- a/fs/xfs/xfs_ioctl.c
47294+++ b/fs/xfs/xfs_ioctl.c
47295@@ -128,7 +128,7 @@ xfs_find_handle(
47296 }
47297
47298 error = -EFAULT;
47299- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
47300+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
47301 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
47302 goto out_put;
47303
47304diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
47305index 23ce927..e274cc1 100644
47306--- a/fs/xfs/xfs_iops.c
47307+++ b/fs/xfs/xfs_iops.c
47308@@ -447,7 +447,7 @@ xfs_vn_put_link(
47309 struct nameidata *nd,
47310 void *p)
47311 {
47312- char *s = nd_get_link(nd);
47313+ const char *s = nd_get_link(nd);
47314
47315 if (!IS_ERR(s))
47316 kfree(s);
47317diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
47318new file mode 100644
47319index 0000000..41df561
47320--- /dev/null
47321+++ b/grsecurity/Kconfig
47322@@ -0,0 +1,1075 @@
47323+#
47324+# grecurity configuration
47325+#
47326+
47327+menu "Grsecurity"
47328+
47329+config GRKERNSEC
47330+ bool "Grsecurity"
47331+ select CRYPTO
47332+ select CRYPTO_SHA256
47333+ help
47334+ If you say Y here, you will be able to configure many features
47335+ that will enhance the security of your system. It is highly
47336+ recommended that you say Y here and read through the help
47337+ for each option so that you fully understand the features and
47338+ can evaluate their usefulness for your machine.
47339+
47340+choice
47341+ prompt "Security Level"
47342+ depends on GRKERNSEC
47343+ default GRKERNSEC_CUSTOM
47344+
47345+config GRKERNSEC_LOW
47346+ bool "Low"
47347+ select GRKERNSEC_LINK
47348+ select GRKERNSEC_FIFO
47349+ select GRKERNSEC_RANDNET
47350+ select GRKERNSEC_DMESG
47351+ select GRKERNSEC_CHROOT
47352+ select GRKERNSEC_CHROOT_CHDIR
47353+
47354+ help
47355+ If you choose this option, several of the grsecurity options will
47356+ be enabled that will give you greater protection against a number
47357+ of attacks, while assuring that none of your software will have any
47358+ conflicts with the additional security measures. If you run a lot
47359+ of unusual software, or you are having problems with the higher
47360+ security levels, you should say Y here. With this option, the
47361+ following features are enabled:
47362+
47363+ - Linking restrictions
47364+ - FIFO restrictions
47365+ - Restricted dmesg
47366+ - Enforced chdir("/") on chroot
47367+ - Runtime module disabling
47368+
47369+config GRKERNSEC_MEDIUM
47370+ bool "Medium"
47371+ select PAX
47372+ select PAX_EI_PAX
47373+ select PAX_PT_PAX_FLAGS
47374+ select PAX_HAVE_ACL_FLAGS
47375+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
47376+ select GRKERNSEC_CHROOT
47377+ select GRKERNSEC_CHROOT_SYSCTL
47378+ select GRKERNSEC_LINK
47379+ select GRKERNSEC_FIFO
47380+ select GRKERNSEC_DMESG
47381+ select GRKERNSEC_RANDNET
47382+ select GRKERNSEC_FORKFAIL
47383+ select GRKERNSEC_TIME
47384+ select GRKERNSEC_SIGNAL
47385+ select GRKERNSEC_CHROOT
47386+ select GRKERNSEC_CHROOT_UNIX
47387+ select GRKERNSEC_CHROOT_MOUNT
47388+ select GRKERNSEC_CHROOT_PIVOT
47389+ select GRKERNSEC_CHROOT_DOUBLE
47390+ select GRKERNSEC_CHROOT_CHDIR
47391+ select GRKERNSEC_CHROOT_MKNOD
47392+ select GRKERNSEC_PROC
47393+ select GRKERNSEC_PROC_USERGROUP
47394+ select PAX_RANDUSTACK
47395+ select PAX_ASLR
47396+ select PAX_RANDMMAP
47397+ select PAX_REFCOUNT if (X86 || SPARC64)
47398+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
47399+
47400+ help
47401+ If you say Y here, several features in addition to those included
47402+ in the low additional security level will be enabled. These
47403+ features provide even more security to your system, though in rare
47404+ cases they may be incompatible with very old or poorly written
47405+ software. If you enable this option, make sure that your auth
47406+ service (identd) is running as gid 1001. With this option,
47407+ the following features (in addition to those provided in the
47408+ low additional security level) will be enabled:
47409+
47410+ - Failed fork logging
47411+ - Time change logging
47412+ - Signal logging
47413+ - Deny mounts in chroot
47414+ - Deny double chrooting
47415+ - Deny sysctl writes in chroot
47416+ - Deny mknod in chroot
47417+ - Deny access to abstract AF_UNIX sockets out of chroot
47418+ - Deny pivot_root in chroot
47419+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
47420+ - /proc restrictions with special GID set to 10 (usually wheel)
47421+ - Address Space Layout Randomization (ASLR)
47422+ - Prevent exploitation of most refcount overflows
47423+ - Bounds checking of copying between the kernel and userland
47424+
47425+config GRKERNSEC_HIGH
47426+ bool "High"
47427+ select GRKERNSEC_LINK
47428+ select GRKERNSEC_FIFO
47429+ select GRKERNSEC_DMESG
47430+ select GRKERNSEC_FORKFAIL
47431+ select GRKERNSEC_TIME
47432+ select GRKERNSEC_SIGNAL
47433+ select GRKERNSEC_CHROOT
47434+ select GRKERNSEC_CHROOT_SHMAT
47435+ select GRKERNSEC_CHROOT_UNIX
47436+ select GRKERNSEC_CHROOT_MOUNT
47437+ select GRKERNSEC_CHROOT_FCHDIR
47438+ select GRKERNSEC_CHROOT_PIVOT
47439+ select GRKERNSEC_CHROOT_DOUBLE
47440+ select GRKERNSEC_CHROOT_CHDIR
47441+ select GRKERNSEC_CHROOT_MKNOD
47442+ select GRKERNSEC_CHROOT_CAPS
47443+ select GRKERNSEC_CHROOT_SYSCTL
47444+ select GRKERNSEC_CHROOT_FINDTASK
47445+ select GRKERNSEC_SYSFS_RESTRICT
47446+ select GRKERNSEC_PROC
47447+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
47448+ select GRKERNSEC_HIDESYM
47449+ select GRKERNSEC_BRUTE
47450+ select GRKERNSEC_PROC_USERGROUP
47451+ select GRKERNSEC_KMEM
47452+ select GRKERNSEC_RESLOG
47453+ select GRKERNSEC_RANDNET
47454+ select GRKERNSEC_PROC_ADD
47455+ select GRKERNSEC_CHROOT_CHMOD
47456+ select GRKERNSEC_CHROOT_NICE
47457+ select GRKERNSEC_SETXID
47458+ select GRKERNSEC_AUDIT_MOUNT
47459+ select GRKERNSEC_MODHARDEN if (MODULES)
47460+ select GRKERNSEC_HARDEN_PTRACE
47461+ select GRKERNSEC_PTRACE_READEXEC
47462+ select GRKERNSEC_VM86 if (X86_32)
47463+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
47464+ select PAX
47465+ select PAX_RANDUSTACK
47466+ select PAX_ASLR
47467+ select PAX_RANDMMAP
47468+ select PAX_NOEXEC
47469+ select PAX_MPROTECT
47470+ select PAX_EI_PAX
47471+ select PAX_PT_PAX_FLAGS
47472+ select PAX_HAVE_ACL_FLAGS
47473+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
47474+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
47475+ select PAX_RANDKSTACK if (X86_TSC && X86)
47476+ select PAX_SEGMEXEC if (X86_32)
47477+ select PAX_PAGEEXEC
47478+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
47479+ select PAX_EMUTRAMP if (PARISC)
47480+ select PAX_EMUSIGRT if (PARISC)
47481+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
47482+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
47483+ select PAX_REFCOUNT if (X86 || SPARC64)
47484+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
47485+ help
47486+ If you say Y here, many of the features of grsecurity will be
47487+ enabled, which will protect you against many kinds of attacks
47488+ against your system. The heightened security comes at a cost
47489+ of an increased chance of incompatibilities with rare software
47490+ on your machine. Since this security level enables PaX, you should
47491+ view <http://pax.grsecurity.net> and read about the PaX
47492+ project. While you are there, download chpax and run it on
47493+ binaries that cause problems with PaX. Also remember that
47494+ since the /proc restrictions are enabled, you must run your
47495+ identd as gid 1001. This security level enables the following
47496+ features in addition to those listed in the low and medium
47497+ security levels:
47498+
47499+ - Additional /proc restrictions
47500+ - Chmod restrictions in chroot
47501+ - No signals, ptrace, or viewing of processes outside of chroot
47502+ - Capability restrictions in chroot
47503+ - Deny fchdir out of chroot
47504+ - Priority restrictions in chroot
47505+ - Segmentation-based implementation of PaX
47506+ - Mprotect restrictions
47507+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
47508+ - Kernel stack randomization
47509+ - Mount/unmount/remount logging
47510+ - Kernel symbol hiding
47511+ - Hardening of module auto-loading
47512+ - Ptrace restrictions
47513+ - Restricted vm86 mode
47514+ - Restricted sysfs/debugfs
47515+ - Active kernel exploit response
47516+
47517+config GRKERNSEC_CUSTOM
47518+ bool "Custom"
47519+ help
47520+ If you say Y here, you will be able to configure every grsecurity
47521+ option, which allows you to enable many more features that aren't
47522+ covered in the basic security levels. These additional features
47523+ include TPE, socket restrictions, and the sysctl system for
47524+ grsecurity. It is advised that you read through the help for
47525+ each option to determine its usefulness in your situation.
47526+
47527+endchoice
47528+
47529+menu "Memory Protections"
47530+depends on GRKERNSEC
47531+
47532+config GRKERNSEC_KMEM
47533+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
47534+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
47535+ help
47536+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
47537+ be written to or read from to modify or leak the contents of the running
47538+ kernel. /dev/port will also not be allowed to be opened. If you have module
47539+ support disabled, enabling this will close up four ways that are
47540+ currently used to insert malicious code into the running kernel.
47541+ Even with all these features enabled, we still highly recommend that
47542+ you use the RBAC system, as it is still possible for an attacker to
47543+ modify the running kernel through privileged I/O granted by ioperm/iopl.
47544+ If you are not using XFree86, you may be able to stop this additional
47545+ case by enabling the 'Disable privileged I/O' option. Though nothing
47546+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
47547+ but only to video memory, which is the only writing we allow in this
47548+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
47549+ not be allowed to mprotect it with PROT_WRITE later.
47550+ It is highly recommended that you say Y here if you meet all the
47551+ conditions above.
47552+
47553+config GRKERNSEC_VM86
47554+ bool "Restrict VM86 mode"
47555+ depends on X86_32
47556+
47557+ help
47558+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
47559+ make use of a special execution mode on 32bit x86 processors called
47560+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
47561+ video cards and will still work with this option enabled. The purpose
47562+ of the option is to prevent exploitation of emulation errors in
47563+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
47564+ Nearly all users should be able to enable this option.
47565+
47566+config GRKERNSEC_IO
47567+ bool "Disable privileged I/O"
47568+ depends on X86
47569+ select RTC_CLASS
47570+ select RTC_INTF_DEV
47571+ select RTC_DRV_CMOS
47572+
47573+ help
47574+ If you say Y here, all ioperm and iopl calls will return an error.
47575+ Ioperm and iopl can be used to modify the running kernel.
47576+ Unfortunately, some programs need this access to operate properly,
47577+ the most notable of which are XFree86 and hwclock. hwclock can be
47578+ remedied by having RTC support in the kernel, so real-time
47579+ clock support is enabled if this option is enabled, to ensure
47580+ that hwclock operates correctly. XFree86 still will not
47581+ operate correctly with this option enabled, so DO NOT CHOOSE Y
47582+ IF YOU USE XFree86. If you use XFree86 and you still want to
47583+ protect your kernel against modification, use the RBAC system.
47584+
47585+config GRKERNSEC_PROC_MEMMAP
47586+ bool "Harden ASLR against information leaks and entropy reduction"
47587+ default y if (PAX_NOEXEC || PAX_ASLR)
47588+ depends on PAX_NOEXEC || PAX_ASLR
47589+ help
47590+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
47591+ give no information about the addresses of its mappings if
47592+ PaX features that rely on random addresses are enabled on the task.
47593+ In addition to sanitizing this information and disabling other
47594+ dangerous sources of information, this option causes reads of sensitive
47595+ /proc/<pid> entries where the file descriptor was opened in a different
47596+ task than the one performing the read. Such attempts are logged.
47597+ Finally, this option limits argv/env strings for suid/sgid binaries
47598+ to 1MB to prevent a complete exhaustion of the stack entropy provided
47599+ by ASLR.
47600+ If you use PaX it is essential that you say Y here as it closes up
47601+ several holes that make full ASLR useless for suid/sgid binaries.
47602+
47603+config GRKERNSEC_BRUTE
47604+ bool "Deter exploit bruteforcing"
47605+ help
47606+ If you say Y here, attempts to bruteforce exploits against forking
47607+ daemons such as apache or sshd, as well as against suid/sgid binaries
47608+ will be deterred. When a child of a forking daemon is killed by PaX
47609+ or crashes due to an illegal instruction or other suspicious signal,
47610+ the parent process will be delayed 30 seconds upon every subsequent
47611+ fork until the administrator is able to assess the situation and
47612+ restart the daemon.
47613+ In the suid/sgid case, the attempt is logged, the user has all their
47614+ processes terminated, and they are prevented from executing any further
47615+ processes for 15 minutes.
47616+ It is recommended that you also enable signal logging in the auditing
47617+ section so that logs are generated when a process triggers a suspicious
47618+ signal.
47619+ If the sysctl option is enabled, a sysctl option with name
47620+ "deter_bruteforce" is created.
47621+
47622+
47623+config GRKERNSEC_MODHARDEN
47624+ bool "Harden module auto-loading"
47625+ depends on MODULES
47626+ help
47627+ If you say Y here, module auto-loading in response to use of some
47628+ feature implemented by an unloaded module will be restricted to
47629+ root users. Enabling this option helps defend against attacks
47630+ by unprivileged users who abuse the auto-loading behavior to
47631+ cause a vulnerable module to load that is then exploited.
47632+
47633+ If this option prevents a legitimate use of auto-loading for a
47634+ non-root user, the administrator can execute modprobe manually
47635+ with the exact name of the module mentioned in the alert log.
47636+ Alternatively, the administrator can add the module to the list
47637+ of modules loaded at boot by modifying init scripts.
47638+
47639+ Modification of init scripts will most likely be needed on
47640+ Ubuntu servers with encrypted home directory support enabled,
47641+ as the first non-root user logging in will cause the ecb(aes),
47642+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
47643+
47644+config GRKERNSEC_HIDESYM
47645+ bool "Hide kernel symbols"
47646+ help
47647+ If you say Y here, getting information on loaded modules, and
47648+ displaying all kernel symbols through a syscall will be restricted
47649+ to users with CAP_SYS_MODULE. For software compatibility reasons,
47650+ /proc/kallsyms will be restricted to the root user. The RBAC
47651+ system can hide that entry even from root.
47652+
47653+ This option also prevents leaking of kernel addresses through
47654+ several /proc entries.
47655+
47656+ Note that this option is only effective provided the following
47657+ conditions are met:
47658+ 1) The kernel using grsecurity is not precompiled by some distribution
47659+ 2) You have also enabled GRKERNSEC_DMESG
47660+ 3) You are using the RBAC system and hiding other files such as your
47661+ kernel image and System.map. Alternatively, enabling this option
47662+ causes the permissions on /boot, /lib/modules, and the kernel
47663+ source directory to change at compile time to prevent
47664+ reading by non-root users.
47665+ If the above conditions are met, this option will aid in providing a
47666+ useful protection against local kernel exploitation of overflows
47667+ and arbitrary read/write vulnerabilities.
47668+
47669+config GRKERNSEC_KERN_LOCKOUT
47670+ bool "Active kernel exploit response"
47671+ depends on X86 || ARM || PPC || SPARC
47672+ help
47673+ If you say Y here, when a PaX alert is triggered due to suspicious
47674+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
47675+ or an OOPs occurs due to bad memory accesses, instead of just
47676+ terminating the offending process (and potentially allowing
47677+ a subsequent exploit from the same user), we will take one of two
47678+ actions:
47679+ If the user was root, we will panic the system
47680+ If the user was non-root, we will log the attempt, terminate
47681+ all processes owned by the user, then prevent them from creating
47682+ any new processes until the system is restarted
47683+ This deters repeated kernel exploitation/bruteforcing attempts
47684+ and is useful for later forensics.
47685+
47686+endmenu
47687+menu "Role Based Access Control Options"
47688+depends on GRKERNSEC
47689+
47690+config GRKERNSEC_RBAC_DEBUG
47691+ bool
47692+
47693+config GRKERNSEC_NO_RBAC
47694+ bool "Disable RBAC system"
47695+ help
47696+ If you say Y here, the /dev/grsec device will be removed from the kernel,
47697+ preventing the RBAC system from being enabled. You should only say Y
47698+ here if you have no intention of using the RBAC system, so as to prevent
47699+ an attacker with root access from misusing the RBAC system to hide files
47700+ and processes when loadable module support and /dev/[k]mem have been
47701+ locked down.
47702+
47703+config GRKERNSEC_ACL_HIDEKERN
47704+ bool "Hide kernel processes"
47705+ help
47706+ If you say Y here, all kernel threads will be hidden to all
47707+ processes but those whose subject has the "view hidden processes"
47708+ flag.
47709+
47710+config GRKERNSEC_ACL_MAXTRIES
47711+ int "Maximum tries before password lockout"
47712+ default 3
47713+ help
47714+ This option enforces the maximum number of times a user can attempt
47715+ to authorize themselves with the grsecurity RBAC system before being
47716+ denied the ability to attempt authorization again for a specified time.
47717+ The lower the number, the harder it will be to brute-force a password.
47718+
47719+config GRKERNSEC_ACL_TIMEOUT
47720+ int "Time to wait after max password tries, in seconds"
47721+ default 30
47722+ help
47723+ This option specifies the time the user must wait after attempting to
47724+ authorize to the RBAC system with the maximum number of invalid
47725+ passwords. The higher the number, the harder it will be to brute-force
47726+ a password.
47727+
47728+endmenu
47729+menu "Filesystem Protections"
47730+depends on GRKERNSEC
47731+
47732+config GRKERNSEC_PROC
47733+ bool "Proc restrictions"
47734+ help
47735+ If you say Y here, the permissions of the /proc filesystem
47736+ will be altered to enhance system security and privacy. You MUST
47737+ choose either a user only restriction or a user and group restriction.
47738+ Depending upon the option you choose, you can either restrict users to
47739+ see only the processes they themselves run, or choose a group that can
47740+ view all processes and files normally restricted to root if you choose
47741+ the "restrict to user only" option. NOTE: If you're running identd as
47742+ a non-root user, you will have to run it as the group you specify here.
47743+
47744+config GRKERNSEC_PROC_USER
47745+ bool "Restrict /proc to user only"
47746+ depends on GRKERNSEC_PROC
47747+ help
47748+ If you say Y here, non-root users will only be able to view their own
47749+ processes, and restricts them from viewing network-related information,
47750+ and viewing kernel symbol and module information.
47751+
47752+config GRKERNSEC_PROC_USERGROUP
47753+ bool "Allow special group"
47754+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
47755+ help
47756+ If you say Y here, you will be able to select a group that will be
47757+ able to view all processes and network-related information. If you've
47758+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
47759+ remain hidden. This option is useful if you want to run identd as
47760+ a non-root user.
47761+
47762+config GRKERNSEC_PROC_GID
47763+ int "GID for special group"
47764+ depends on GRKERNSEC_PROC_USERGROUP
47765+ default 1001
47766+
47767+config GRKERNSEC_PROC_ADD
47768+ bool "Additional restrictions"
47769+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
47770+ help
47771+ If you say Y here, additional restrictions will be placed on
47772+ /proc that keep normal users from viewing device information and
47773+ slabinfo information that could be useful for exploits.
47774+
47775+config GRKERNSEC_LINK
47776+ bool "Linking restrictions"
47777+ help
47778+ If you say Y here, /tmp race exploits will be prevented, since users
47779+ will no longer be able to follow symlinks owned by other users in
47780+ world-writable +t directories (e.g. /tmp), unless the owner of the
47781+ symlink is the owner of the directory. users will also not be
47782+ able to hardlink to files they do not own. If the sysctl option is
47783+ enabled, a sysctl option with name "linking_restrictions" is created.
47784+
47785+config GRKERNSEC_FIFO
47786+ bool "FIFO restrictions"
47787+ help
47788+ If you say Y here, users will not be able to write to FIFOs they don't
47789+ own in world-writable +t directories (e.g. /tmp), unless the owner of
47790+ the FIFO is the same owner of the directory it's held in. If the sysctl
47791+ option is enabled, a sysctl option with name "fifo_restrictions" is
47792+ created.
47793+
47794+config GRKERNSEC_SYSFS_RESTRICT
47795+ bool "Sysfs/debugfs restriction"
47796+ depends on SYSFS
47797+ help
47798+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
47799+ any filesystem normally mounted under it (e.g. debugfs) will be
47800+ mostly accessible only by root. These filesystems generally provide access
47801+ to hardware and debug information that isn't appropriate for unprivileged
47802+ users of the system. Sysfs and debugfs have also become a large source
47803+ of new vulnerabilities, ranging from infoleaks to local compromise.
47804+ There has been very little oversight with an eye toward security involved
47805+ in adding new exporters of information to these filesystems, so their
47806+ use is discouraged.
47807+ For reasons of compatibility, a few directories have been whitelisted
47808+ for access by non-root users:
47809+ /sys/fs/selinux
47810+ /sys/fs/fuse
47811+ /sys/devices/system/cpu
47812+
47813+config GRKERNSEC_ROFS
47814+ bool "Runtime read-only mount protection"
47815+ help
47816+ If you say Y here, a sysctl option with name "romount_protect" will
47817+ be created. By setting this option to 1 at runtime, filesystems
47818+ will be protected in the following ways:
47819+ * No new writable mounts will be allowed
47820+ * Existing read-only mounts won't be able to be remounted read/write
47821+ * Write operations will be denied on all block devices
47822+ This option acts independently of grsec_lock: once it is set to 1,
47823+ it cannot be turned off. Therefore, please be mindful of the resulting
47824+ behavior if this option is enabled in an init script on a read-only
47825+ filesystem. This feature is mainly intended for secure embedded systems.
47826+
47827+config GRKERNSEC_CHROOT
47828+ bool "Chroot jail restrictions"
47829+ help
47830+ If you say Y here, you will be able to choose several options that will
47831+ make breaking out of a chrooted jail much more difficult. If you
47832+ encounter no software incompatibilities with the following options, it
47833+ is recommended that you enable each one.
47834+
47835+config GRKERNSEC_CHROOT_MOUNT
47836+ bool "Deny mounts"
47837+ depends on GRKERNSEC_CHROOT
47838+ help
47839+ If you say Y here, processes inside a chroot will not be able to
47840+ mount or remount filesystems. If the sysctl option is enabled, a
47841+ sysctl option with name "chroot_deny_mount" is created.
47842+
47843+config GRKERNSEC_CHROOT_DOUBLE
47844+ bool "Deny double-chroots"
47845+ depends on GRKERNSEC_CHROOT
47846+ help
47847+ If you say Y here, processes inside a chroot will not be able to chroot
47848+ again outside the chroot. This is a widely used method of breaking
47849+ out of a chroot jail and should not be allowed. If the sysctl
47850+ option is enabled, a sysctl option with name
47851+ "chroot_deny_chroot" is created.
47852+
47853+config GRKERNSEC_CHROOT_PIVOT
47854+ bool "Deny pivot_root in chroot"
47855+ depends on GRKERNSEC_CHROOT
47856+ help
47857+ If you say Y here, processes inside a chroot will not be able to use
47858+ a function called pivot_root() that was introduced in Linux 2.3.41. It
47859+ works similar to chroot in that it changes the root filesystem. This
47860+ function could be misused in a chrooted process to attempt to break out
47861+ of the chroot, and therefore should not be allowed. If the sysctl
47862+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
47863+ created.
47864+
47865+config GRKERNSEC_CHROOT_CHDIR
47866+ bool "Enforce chdir(\"/\") on all chroots"
47867+ depends on GRKERNSEC_CHROOT
47868+ help
47869+ If you say Y here, the current working directory of all newly-chrooted
47870+ applications will be set to the the root directory of the chroot.
47871+ The man page on chroot(2) states:
47872+ Note that this call does not change the current working
47873+ directory, so that `.' can be outside the tree rooted at
47874+ `/'. In particular, the super-user can escape from a
47875+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
47876+
47877+ It is recommended that you say Y here, since it's not known to break
47878+ any software. If the sysctl option is enabled, a sysctl option with
47879+ name "chroot_enforce_chdir" is created.
47880+
47881+config GRKERNSEC_CHROOT_CHMOD
47882+ bool "Deny (f)chmod +s"
47883+ depends on GRKERNSEC_CHROOT
47884+ help
47885+ If you say Y here, processes inside a chroot will not be able to chmod
47886+ or fchmod files to make them have suid or sgid bits. This protects
47887+ against another published method of breaking a chroot. If the sysctl
47888+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
47889+ created.
47890+
47891+config GRKERNSEC_CHROOT_FCHDIR
47892+ bool "Deny fchdir out of chroot"
47893+ depends on GRKERNSEC_CHROOT
47894+ help
47895+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
47896+ to a file descriptor of the chrooting process that points to a directory
47897+ outside the filesystem will be stopped. If the sysctl option
47898+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
47899+
47900+config GRKERNSEC_CHROOT_MKNOD
47901+ bool "Deny mknod"
47902+ depends on GRKERNSEC_CHROOT
47903+ help
47904+ If you say Y here, processes inside a chroot will not be allowed to
47905+ mknod. The problem with using mknod inside a chroot is that it
47906+ would allow an attacker to create a device entry that is the same
47907+ as one on the physical root of your system, which could range from
47908+ anything from the console device to a device for your harddrive (which
47909+ they could then use to wipe the drive or steal data). It is recommended
47910+ that you say Y here, unless you run into software incompatibilities.
47911+ If the sysctl option is enabled, a sysctl option with name
47912+ "chroot_deny_mknod" is created.
47913+
47914+config GRKERNSEC_CHROOT_SHMAT
47915+ bool "Deny shmat() out of chroot"
47916+ depends on GRKERNSEC_CHROOT
47917+ help
47918+ If you say Y here, processes inside a chroot will not be able to attach
47919+ to shared memory segments that were created outside of the chroot jail.
47920+ It is recommended that you say Y here. If the sysctl option is enabled,
47921+ a sysctl option with name "chroot_deny_shmat" is created.
47922+
47923+config GRKERNSEC_CHROOT_UNIX
47924+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
47925+ depends on GRKERNSEC_CHROOT
47926+ help
47927+ If you say Y here, processes inside a chroot will not be able to
47928+ connect to abstract (meaning not belonging to a filesystem) Unix
47929+ domain sockets that were bound outside of a chroot. It is recommended
47930+ that you say Y here. If the sysctl option is enabled, a sysctl option
47931+ with name "chroot_deny_unix" is created.
47932+
47933+config GRKERNSEC_CHROOT_FINDTASK
47934+ bool "Protect outside processes"
47935+ depends on GRKERNSEC_CHROOT
47936+ help
47937+ If you say Y here, processes inside a chroot will not be able to
47938+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
47939+ getsid, or view any process outside of the chroot. If the sysctl
47940+ option is enabled, a sysctl option with name "chroot_findtask" is
47941+ created.
47942+
47943+config GRKERNSEC_CHROOT_NICE
47944+ bool "Restrict priority changes"
47945+ depends on GRKERNSEC_CHROOT
47946+ help
47947+ If you say Y here, processes inside a chroot will not be able to raise
47948+ the priority of processes in the chroot, or alter the priority of
47949+ processes outside the chroot. This provides more security than simply
47950+ removing CAP_SYS_NICE from the process' capability set. If the
47951+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
47952+ is created.
47953+
47954+config GRKERNSEC_CHROOT_SYSCTL
47955+ bool "Deny sysctl writes"
47956+ depends on GRKERNSEC_CHROOT
47957+ help
47958+ If you say Y here, an attacker in a chroot will not be able to
47959+ write to sysctl entries, either by sysctl(2) or through a /proc
47960+ interface. It is strongly recommended that you say Y here. If the
47961+ sysctl option is enabled, a sysctl option with name
47962+ "chroot_deny_sysctl" is created.
47963+
47964+config GRKERNSEC_CHROOT_CAPS
47965+ bool "Capability restrictions"
47966+ depends on GRKERNSEC_CHROOT
47967+ help
47968+ If you say Y here, the capabilities on all processes within a
47969+ chroot jail will be lowered to stop module insertion, raw i/o,
47970+ system and net admin tasks, rebooting the system, modifying immutable
47971+ files, modifying IPC owned by another, and changing the system time.
47972+ This is left an option because it can break some apps. Disable this
47973+ if your chrooted apps are having problems performing those kinds of
47974+ tasks. If the sysctl option is enabled, a sysctl option with
47975+ name "chroot_caps" is created.
47976+
47977+endmenu
47978+menu "Kernel Auditing"
47979+depends on GRKERNSEC
47980+
47981+config GRKERNSEC_AUDIT_GROUP
47982+ bool "Single group for auditing"
47983+ help
47984+ If you say Y here, the exec, chdir, and (un)mount logging features
47985+ will only operate on a group you specify. This option is recommended
47986+ if you only want to watch certain users instead of having a large
47987+ amount of logs from the entire system. If the sysctl option is enabled,
47988+ a sysctl option with name "audit_group" is created.
47989+
47990+config GRKERNSEC_AUDIT_GID
47991+ int "GID for auditing"
47992+ depends on GRKERNSEC_AUDIT_GROUP
47993+ default 1007
47994+
47995+config GRKERNSEC_EXECLOG
47996+ bool "Exec logging"
47997+ help
47998+ If you say Y here, all execve() calls will be logged (since the
47999+ other exec*() calls are frontends to execve(), all execution
48000+ will be logged). Useful for shell-servers that like to keep track
48001+ of their users. If the sysctl option is enabled, a sysctl option with
48002+ name "exec_logging" is created.
48003+ WARNING: This option when enabled will produce a LOT of logs, especially
48004+ on an active system.
48005+
48006+config GRKERNSEC_RESLOG
48007+ bool "Resource logging"
48008+ help
48009+ If you say Y here, all attempts to overstep resource limits will
48010+ be logged with the resource name, the requested size, and the current
48011+ limit. It is highly recommended that you say Y here. If the sysctl
48012+ option is enabled, a sysctl option with name "resource_logging" is
48013+ created. If the RBAC system is enabled, the sysctl value is ignored.
48014+
48015+config GRKERNSEC_CHROOT_EXECLOG
48016+ bool "Log execs within chroot"
48017+ help
48018+ If you say Y here, all executions inside a chroot jail will be logged
48019+ to syslog. This can cause a large amount of logs if certain
48020+ applications (eg. djb's daemontools) are installed on the system, and
48021+ is therefore left as an option. If the sysctl option is enabled, a
48022+ sysctl option with name "chroot_execlog" is created.
48023+
48024+config GRKERNSEC_AUDIT_PTRACE
48025+ bool "Ptrace logging"
48026+ help
48027+ If you say Y here, all attempts to attach to a process via ptrace
48028+ will be logged. If the sysctl option is enabled, a sysctl option
48029+ with name "audit_ptrace" is created.
48030+
48031+config GRKERNSEC_AUDIT_CHDIR
48032+ bool "Chdir logging"
48033+ help
48034+ If you say Y here, all chdir() calls will be logged. If the sysctl
48035+ option is enabled, a sysctl option with name "audit_chdir" is created.
48036+
48037+config GRKERNSEC_AUDIT_MOUNT
48038+ bool "(Un)Mount logging"
48039+ help
48040+ If you say Y here, all mounts and unmounts will be logged. If the
48041+ sysctl option is enabled, a sysctl option with name "audit_mount" is
48042+ created.
48043+
48044+config GRKERNSEC_SIGNAL
48045+ bool "Signal logging"
48046+ help
48047+ If you say Y here, certain important signals will be logged, such as
48048+ SIGSEGV, which will as a result inform you of when a error in a program
48049+ occurred, which in some cases could mean a possible exploit attempt.
48050+ If the sysctl option is enabled, a sysctl option with name
48051+ "signal_logging" is created.
48052+
48053+config GRKERNSEC_FORKFAIL
48054+ bool "Fork failure logging"
48055+ help
48056+ If you say Y here, all failed fork() attempts will be logged.
48057+ This could suggest a fork bomb, or someone attempting to overstep
48058+ their process limit. If the sysctl option is enabled, a sysctl option
48059+ with name "forkfail_logging" is created.
48060+
48061+config GRKERNSEC_TIME
48062+ bool "Time change logging"
48063+ help
48064+ If you say Y here, any changes of the system clock will be logged.
48065+ If the sysctl option is enabled, a sysctl option with name
48066+ "timechange_logging" is created.
48067+
48068+config GRKERNSEC_PROC_IPADDR
48069+ bool "/proc/<pid>/ipaddr support"
48070+ help
48071+ If you say Y here, a new entry will be added to each /proc/<pid>
48072+ directory that contains the IP address of the person using the task.
48073+ The IP is carried across local TCP and AF_UNIX stream sockets.
48074+ This information can be useful for IDS/IPSes to perform remote response
48075+ to a local attack. The entry is readable by only the owner of the
48076+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
48077+ the RBAC system), and thus does not create privacy concerns.
48078+
48079+config GRKERNSEC_RWXMAP_LOG
48080+ bool 'Denied RWX mmap/mprotect logging'
48081+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
48082+ help
48083+ If you say Y here, calls to mmap() and mprotect() with explicit
48084+ usage of PROT_WRITE and PROT_EXEC together will be logged when
48085+ denied by the PAX_MPROTECT feature. If the sysctl option is
48086+ enabled, a sysctl option with name "rwxmap_logging" is created.
48087+
48088+config GRKERNSEC_AUDIT_TEXTREL
48089+ bool 'ELF text relocations logging (READ HELP)'
48090+ depends on PAX_MPROTECT
48091+ help
48092+ If you say Y here, text relocations will be logged with the filename
48093+ of the offending library or binary. The purpose of the feature is
48094+ to help Linux distribution developers get rid of libraries and
48095+ binaries that need text relocations which hinder the future progress
48096+ of PaX. Only Linux distribution developers should say Y here, and
48097+ never on a production machine, as this option creates an information
48098+ leak that could aid an attacker in defeating the randomization of
48099+ a single memory region. If the sysctl option is enabled, a sysctl
48100+ option with name "audit_textrel" is created.
48101+
48102+endmenu
48103+
48104+menu "Executable Protections"
48105+depends on GRKERNSEC
48106+
48107+config GRKERNSEC_DMESG
48108+ bool "Dmesg(8) restriction"
48109+ help
48110+ If you say Y here, non-root users will not be able to use dmesg(8)
48111+ to view up to the last 4kb of messages in the kernel's log buffer.
48112+ The kernel's log buffer often contains kernel addresses and other
48113+ identifying information useful to an attacker in fingerprinting a
48114+ system for a targeted exploit.
48115+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
48116+ created.
48117+
48118+config GRKERNSEC_HARDEN_PTRACE
48119+ bool "Deter ptrace-based process snooping"
48120+ help
48121+ If you say Y here, TTY sniffers and other malicious monitoring
48122+ programs implemented through ptrace will be defeated. If you
48123+ have been using the RBAC system, this option has already been
48124+ enabled for several years for all users, with the ability to make
48125+ fine-grained exceptions.
48126+
48127+ This option only affects the ability of non-root users to ptrace
48128+ processes that are not a descendent of the ptracing process.
48129+ This means that strace ./binary and gdb ./binary will still work,
48130+ but attaching to arbitrary processes will not. If the sysctl
48131+ option is enabled, a sysctl option with name "harden_ptrace" is
48132+ created.
48133+
48134+config GRKERNSEC_PTRACE_READEXEC
48135+ bool "Require read access to ptrace sensitive binaries"
48136+ help
48137+ If you say Y here, unprivileged users will not be able to ptrace unreadable
48138+ binaries. This option is useful in environments that
48139+ remove the read bits (e.g. file mode 4711) from suid binaries to
48140+ prevent infoleaking of their contents. This option adds
48141+ consistency to the use of that file mode, as the binary could normally
48142+ be read out when run without privileges while ptracing.
48143+
48144+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
48145+ is created.
48146+
48147+config GRKERNSEC_SETXID
48148+ bool "Enforce consistent multithreaded privileges"
48149+ help
48150+ If you say Y here, a change from a root uid to a non-root uid
48151+ in a multithreaded application will cause the resulting uids,
48152+ gids, supplementary groups, and capabilities in that thread
48153+ to be propagated to the other threads of the process. In most
48154+ cases this is unnecessary, as glibc will emulate this behavior
48155+ on behalf of the application. Other libcs do not act in the
48156+ same way, allowing the other threads of the process to continue
48157+ running with root privileges. If the sysctl option is enabled,
48158+ a sysctl option with name "consistent_setxid" is created.
48159+
48160+config GRKERNSEC_TPE
48161+ bool "Trusted Path Execution (TPE)"
48162+ help
48163+ If you say Y here, you will be able to choose a gid to add to the
48164+ supplementary groups of users you want to mark as "untrusted."
48165+ These users will not be able to execute any files that are not in
48166+ root-owned directories writable only by root. If the sysctl option
48167+ is enabled, a sysctl option with name "tpe" is created.
48168+
48169+config GRKERNSEC_TPE_ALL
48170+ bool "Partially restrict all non-root users"
48171+ depends on GRKERNSEC_TPE
48172+ help
48173+ If you say Y here, all non-root users will be covered under
48174+ a weaker TPE restriction. This is separate from, and in addition to,
48175+ the main TPE options that you have selected elsewhere. Thus, if a
48176+ "trusted" GID is chosen, this restriction applies to even that GID.
48177+ Under this restriction, all non-root users will only be allowed to
48178+ execute files in directories they own that are not group or
48179+ world-writable, or in directories owned by root and writable only by
48180+ root. If the sysctl option is enabled, a sysctl option with name
48181+ "tpe_restrict_all" is created.
48182+
48183+config GRKERNSEC_TPE_INVERT
48184+ bool "Invert GID option"
48185+ depends on GRKERNSEC_TPE
48186+ help
48187+ If you say Y here, the group you specify in the TPE configuration will
48188+ decide what group TPE restrictions will be *disabled* for. This
48189+ option is useful if you want TPE restrictions to be applied to most
48190+ users on the system. If the sysctl option is enabled, a sysctl option
48191+ with name "tpe_invert" is created. Unlike other sysctl options, this
48192+ entry will default to on for backward-compatibility.
48193+
48194+config GRKERNSEC_TPE_GID
48195+ int "GID for untrusted users"
48196+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
48197+ default 1005
48198+ help
48199+ Setting this GID determines what group TPE restrictions will be
48200+ *enabled* for. If the sysctl option is enabled, a sysctl option
48201+ with name "tpe_gid" is created.
48202+
48203+config GRKERNSEC_TPE_GID
48204+ int "GID for trusted users"
48205+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
48206+ default 1005
48207+ help
48208+ Setting this GID determines what group TPE restrictions will be
48209+ *disabled* for. If the sysctl option is enabled, a sysctl option
48210+ with name "tpe_gid" is created.
48211+
48212+endmenu
48213+menu "Network Protections"
48214+depends on GRKERNSEC
48215+
48216+config GRKERNSEC_RANDNET
48217+ bool "Larger entropy pools"
48218+ help
48219+ If you say Y here, the entropy pools used for many features of Linux
48220+ and grsecurity will be doubled in size. Since several grsecurity
48221+ features use additional randomness, it is recommended that you say Y
48222+ here. Saying Y here has a similar effect as modifying
48223+ /proc/sys/kernel/random/poolsize.
48224+
48225+config GRKERNSEC_BLACKHOLE
48226+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
48227+ depends on NET
48228+ help
48229+ If you say Y here, neither TCP resets nor ICMP
48230+ destination-unreachable packets will be sent in response to packets
48231+ sent to ports for which no associated listening process exists.
48232+ This feature supports both IPV4 and IPV6 and exempts the
48233+ loopback interface from blackholing. Enabling this feature
48234+ makes a host more resilient to DoS attacks and reduces network
48235+ visibility against scanners.
48236+
48237+ The blackhole feature as-implemented is equivalent to the FreeBSD
48238+ blackhole feature, as it prevents RST responses to all packets, not
48239+ just SYNs. Under most application behavior this causes no
48240+ problems, but applications (like haproxy) may not close certain
48241+ connections in a way that cleanly terminates them on the remote
48242+ end, leaving the remote host in LAST_ACK state. Because of this
48243+ side-effect and to prevent intentional LAST_ACK DoSes, this
48244+ feature also adds automatic mitigation against such attacks.
48245+ The mitigation drastically reduces the amount of time a socket
48246+ can spend in LAST_ACK state. If you're using haproxy and not
48247+ all servers it connects to have this option enabled, consider
48248+ disabling this feature on the haproxy host.
48249+
48250+ If the sysctl option is enabled, two sysctl options with names
48251+ "ip_blackhole" and "lastack_retries" will be created.
48252+ While "ip_blackhole" takes the standard zero/non-zero on/off
48253+ toggle, "lastack_retries" uses the same kinds of values as
48254+ "tcp_retries1" and "tcp_retries2". The default value of 4
48255+ prevents a socket from lasting more than 45 seconds in LAST_ACK
48256+ state.
48257+
48258+config GRKERNSEC_SOCKET
48259+ bool "Socket restrictions"
48260+ depends on NET
48261+ help
48262+ If you say Y here, you will be able to choose from several options.
48263+ If you assign a GID on your system and add it to the supplementary
48264+ groups of users you want to restrict socket access to, this patch
48265+ will perform up to three things, based on the option(s) you choose.
48266+
48267+config GRKERNSEC_SOCKET_ALL
48268+ bool "Deny any sockets to group"
48269+ depends on GRKERNSEC_SOCKET
48270+ help
48271+ If you say Y here, you will be able to choose a GID of whose users will
48272+ be unable to connect to other hosts from your machine or run server
48273+ applications from your machine. If the sysctl option is enabled, a
48274+ sysctl option with name "socket_all" is created.
48275+
48276+config GRKERNSEC_SOCKET_ALL_GID
48277+ int "GID to deny all sockets for"
48278+ depends on GRKERNSEC_SOCKET_ALL
48279+ default 1004
48280+ help
48281+ Here you can choose the GID to disable socket access for. Remember to
48282+ add the users you want socket access disabled for to the GID
48283+ specified here. If the sysctl option is enabled, a sysctl option
48284+ with name "socket_all_gid" is created.
48285+
48286+config GRKERNSEC_SOCKET_CLIENT
48287+ bool "Deny client sockets to group"
48288+ depends on GRKERNSEC_SOCKET
48289+ help
48290+ If you say Y here, you will be able to choose a GID of whose users will
48291+ be unable to connect to other hosts from your machine, but will be
48292+ able to run servers. If this option is enabled, all users in the group
48293+ you specify will have to use passive mode when initiating ftp transfers
48294+ from the shell on your machine. If the sysctl option is enabled, a
48295+ sysctl option with name "socket_client" is created.
48296+
48297+config GRKERNSEC_SOCKET_CLIENT_GID
48298+ int "GID to deny client sockets for"
48299+ depends on GRKERNSEC_SOCKET_CLIENT
48300+ default 1003
48301+ help
48302+ Here you can choose the GID to disable client socket access for.
48303+ Remember to add the users you want client socket access disabled for to
48304+ the GID specified here. If the sysctl option is enabled, a sysctl
48305+ option with name "socket_client_gid" is created.
48306+
48307+config GRKERNSEC_SOCKET_SERVER
48308+ bool "Deny server sockets to group"
48309+ depends on GRKERNSEC_SOCKET
48310+ help
48311+ If you say Y here, you will be able to choose a GID of whose users will
48312+ be unable to run server applications from your machine. If the sysctl
48313+ option is enabled, a sysctl option with name "socket_server" is created.
48314+
48315+config GRKERNSEC_SOCKET_SERVER_GID
48316+ int "GID to deny server sockets for"
48317+ depends on GRKERNSEC_SOCKET_SERVER
48318+ default 1002
48319+ help
48320+ Here you can choose the GID to disable server socket access for.
48321+ Remember to add the users you want server socket access disabled for to
48322+ the GID specified here. If the sysctl option is enabled, a sysctl
48323+ option with name "socket_server_gid" is created.
48324+
48325+endmenu
48326+menu "Sysctl support"
48327+depends on GRKERNSEC && SYSCTL
48328+
48329+config GRKERNSEC_SYSCTL
48330+ bool "Sysctl support"
48331+ help
48332+ If you say Y here, you will be able to change the options that
48333+ grsecurity runs with at bootup, without having to recompile your
48334+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
48335+ to enable (1) or disable (0) various features. All the sysctl entries
48336+ are mutable until the "grsec_lock" entry is set to a non-zero value.
48337+ All features enabled in the kernel configuration are disabled at boot
48338+ if you do not say Y to the "Turn on features by default" option.
48339+ All options should be set at startup, and the grsec_lock entry should
48340+ be set to a non-zero value after all the options are set.
48341+ *THIS IS EXTREMELY IMPORTANT*
48342+
48343+config GRKERNSEC_SYSCTL_DISTRO
48344+ bool "Extra sysctl support for distro makers (READ HELP)"
48345+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
48346+ help
48347+ If you say Y here, additional sysctl options will be created
48348+ for features that affect processes running as root. Therefore,
48349+ it is critical when using this option that the grsec_lock entry be
48350+ enabled after boot. Only distros with prebuilt kernel packages
48351+ with this option enabled that can ensure grsec_lock is enabled
48352+ after boot should use this option.
48353+ *Failure to set grsec_lock after boot makes all grsec features
48354+ this option covers useless*
48355+
48356+ Currently this option creates the following sysctl entries:
48357+ "Disable Privileged I/O": "disable_priv_io"
48358+
48359+config GRKERNSEC_SYSCTL_ON
48360+ bool "Turn on features by default"
48361+ depends on GRKERNSEC_SYSCTL
48362+ help
48363+ If you say Y here, instead of having all features enabled in the
48364+ kernel configuration disabled at boot time, the features will be
48365+ enabled at boot time. It is recommended you say Y here unless
48366+ there is some reason you would want all sysctl-tunable features to
48367+ be disabled by default. As mentioned elsewhere, it is important
48368+ to enable the grsec_lock entry once you have finished modifying
48369+ the sysctl entries.
48370+
48371+endmenu
48372+menu "Logging Options"
48373+depends on GRKERNSEC
48374+
48375+config GRKERNSEC_FLOODTIME
48376+ int "Seconds in between log messages (minimum)"
48377+ default 10
48378+ help
48379+ This option allows you to enforce the number of seconds between
48380+ grsecurity log messages. The default should be suitable for most
48381+ people, however, if you choose to change it, choose a value small enough
48382+ to allow informative logs to be produced, but large enough to
48383+ prevent flooding.
48384+
48385+config GRKERNSEC_FLOODBURST
48386+ int "Number of messages in a burst (maximum)"
48387+ default 6
48388+ help
48389+ This option allows you to choose the maximum number of messages allowed
48390+ within the flood time interval you chose in a separate option. The
48391+ default should be suitable for most people, however if you find that
48392+ many of your logs are being interpreted as flooding, you may want to
48393+ raise this value.
48394+
48395+endmenu
48396+
48397+endmenu
48398diff --git a/grsecurity/Makefile b/grsecurity/Makefile
48399new file mode 100644
48400index 0000000..496e60d
48401--- /dev/null
48402+++ b/grsecurity/Makefile
48403@@ -0,0 +1,40 @@
48404+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
48405+# during 2001-2009 it has been completely redesigned by Brad Spengler
48406+# into an RBAC system
48407+#
48408+# All code in this directory and various hooks inserted throughout the kernel
48409+# are copyright Brad Spengler - Open Source Security, Inc., and released
48410+# under the GPL v2 or higher
48411+
48412+ifndef CONFIG_IA64
48413+KBUILD_CFLAGS += -Werror
48414+endif
48415+
48416+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
48417+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
48418+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
48419+
48420+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
48421+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
48422+ gracl_learn.o grsec_log.o
48423+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
48424+
48425+ifdef CONFIG_NET
48426+obj-y += grsec_sock.o
48427+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
48428+endif
48429+
48430+ifndef CONFIG_GRKERNSEC
48431+obj-y += grsec_disabled.o
48432+endif
48433+
48434+ifdef CONFIG_GRKERNSEC_HIDESYM
48435+extra-y := grsec_hidesym.o
48436+$(obj)/grsec_hidesym.o:
48437+ @-chmod -f 500 /boot
48438+ @-chmod -f 500 /lib/modules
48439+ @-chmod -f 500 /lib64/modules
48440+ @-chmod -f 500 /lib32/modules
48441+ @-chmod -f 700 .
48442+ @echo ' grsec: protected kernel image paths'
48443+endif
48444diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
48445new file mode 100644
48446index 0000000..7715893
48447--- /dev/null
48448+++ b/grsecurity/gracl.c
48449@@ -0,0 +1,4164 @@
48450+#include <linux/kernel.h>
48451+#include <linux/module.h>
48452+#include <linux/sched.h>
48453+#include <linux/mm.h>
48454+#include <linux/file.h>
48455+#include <linux/fs.h>
48456+#include <linux/namei.h>
48457+#include <linux/mount.h>
48458+#include <linux/tty.h>
48459+#include <linux/proc_fs.h>
48460+#include <linux/lglock.h>
48461+#include <linux/slab.h>
48462+#include <linux/vmalloc.h>
48463+#include <linux/types.h>
48464+#include <linux/sysctl.h>
48465+#include <linux/netdevice.h>
48466+#include <linux/ptrace.h>
48467+#include <linux/gracl.h>
48468+#include <linux/gralloc.h>
48469+#include <linux/security.h>
48470+#include <linux/grinternal.h>
48471+#include <linux/pid_namespace.h>
48472+#include <linux/fdtable.h>
48473+#include <linux/percpu.h>
48474+
48475+#include <asm/uaccess.h>
48476+#include <asm/errno.h>
48477+#include <asm/mman.h>
48478+
48479+static struct acl_role_db acl_role_set;
48480+static struct name_db name_set;
48481+static struct inodev_db inodev_set;
48482+
48483+/* for keeping track of userspace pointers used for subjects, so we
48484+ can share references in the kernel as well
48485+*/
48486+
48487+static struct path real_root;
48488+
48489+static struct acl_subj_map_db subj_map_set;
48490+
48491+static struct acl_role_label *default_role;
48492+
48493+static struct acl_role_label *role_list;
48494+
48495+static u16 acl_sp_role_value;
48496+
48497+extern char *gr_shared_page[4];
48498+static DEFINE_MUTEX(gr_dev_mutex);
48499+DEFINE_RWLOCK(gr_inode_lock);
48500+
48501+struct gr_arg *gr_usermode;
48502+
48503+static unsigned int gr_status __read_only = GR_STATUS_INIT;
48504+
48505+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
48506+extern void gr_clear_learn_entries(void);
48507+
48508+#ifdef CONFIG_GRKERNSEC_RESLOG
48509+extern void gr_log_resource(const struct task_struct *task,
48510+ const int res, const unsigned long wanted, const int gt);
48511+#endif
48512+
48513+unsigned char *gr_system_salt;
48514+unsigned char *gr_system_sum;
48515+
48516+static struct sprole_pw **acl_special_roles = NULL;
48517+static __u16 num_sprole_pws = 0;
48518+
48519+static struct acl_role_label *kernel_role = NULL;
48520+
48521+static unsigned int gr_auth_attempts = 0;
48522+static unsigned long gr_auth_expires = 0UL;
48523+
48524+#ifdef CONFIG_NET
48525+extern struct vfsmount *sock_mnt;
48526+#endif
48527+
48528+extern struct vfsmount *pipe_mnt;
48529+extern struct vfsmount *shm_mnt;
48530+#ifdef CONFIG_HUGETLBFS
48531+extern struct vfsmount *hugetlbfs_vfsmount;
48532+#endif
48533+
48534+static struct acl_object_label *fakefs_obj_rw;
48535+static struct acl_object_label *fakefs_obj_rwx;
48536+
48537+extern int gr_init_uidset(void);
48538+extern void gr_free_uidset(void);
48539+extern void gr_remove_uid(uid_t uid);
48540+extern int gr_find_uid(uid_t uid);
48541+
48542+DECLARE_BRLOCK(vfsmount_lock);
48543+
48544+__inline__ int
48545+gr_acl_is_enabled(void)
48546+{
48547+ return (gr_status & GR_READY);
48548+}
48549+
48550+#ifdef CONFIG_BTRFS_FS
48551+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
48552+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
48553+#endif
48554+
48555+static inline dev_t __get_dev(const struct dentry *dentry)
48556+{
48557+#ifdef CONFIG_BTRFS_FS
48558+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
48559+ return get_btrfs_dev_from_inode(dentry->d_inode);
48560+ else
48561+#endif
48562+ return dentry->d_inode->i_sb->s_dev;
48563+}
48564+
48565+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
48566+{
48567+ return __get_dev(dentry);
48568+}
48569+
48570+static char gr_task_roletype_to_char(struct task_struct *task)
48571+{
48572+ switch (task->role->roletype &
48573+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
48574+ GR_ROLE_SPECIAL)) {
48575+ case GR_ROLE_DEFAULT:
48576+ return 'D';
48577+ case GR_ROLE_USER:
48578+ return 'U';
48579+ case GR_ROLE_GROUP:
48580+ return 'G';
48581+ case GR_ROLE_SPECIAL:
48582+ return 'S';
48583+ }
48584+
48585+ return 'X';
48586+}
48587+
48588+char gr_roletype_to_char(void)
48589+{
48590+ return gr_task_roletype_to_char(current);
48591+}
48592+
48593+__inline__ int
48594+gr_acl_tpe_check(void)
48595+{
48596+ if (unlikely(!(gr_status & GR_READY)))
48597+ return 0;
48598+ if (current->role->roletype & GR_ROLE_TPE)
48599+ return 1;
48600+ else
48601+ return 0;
48602+}
48603+
48604+int
48605+gr_handle_rawio(const struct inode *inode)
48606+{
48607+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48608+ if (inode && S_ISBLK(inode->i_mode) &&
48609+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
48610+ !capable(CAP_SYS_RAWIO))
48611+ return 1;
48612+#endif
48613+ return 0;
48614+}
48615+
48616+static int
48617+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
48618+{
48619+ if (likely(lena != lenb))
48620+ return 0;
48621+
48622+ return !memcmp(a, b, lena);
48623+}
48624+
48625+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
48626+{
48627+ *buflen -= namelen;
48628+ if (*buflen < 0)
48629+ return -ENAMETOOLONG;
48630+ *buffer -= namelen;
48631+ memcpy(*buffer, str, namelen);
48632+ return 0;
48633+}
48634+
48635+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
48636+{
48637+ return prepend(buffer, buflen, name->name, name->len);
48638+}
48639+
48640+static int prepend_path(const struct path *path, struct path *root,
48641+ char **buffer, int *buflen)
48642+{
48643+ struct dentry *dentry = path->dentry;
48644+ struct vfsmount *vfsmnt = path->mnt;
48645+ bool slash = false;
48646+ int error = 0;
48647+
48648+ while (dentry != root->dentry || vfsmnt != root->mnt) {
48649+ struct dentry * parent;
48650+
48651+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
48652+ /* Global root? */
48653+ if (vfsmnt->mnt_parent == vfsmnt) {
48654+ goto out;
48655+ }
48656+ dentry = vfsmnt->mnt_mountpoint;
48657+ vfsmnt = vfsmnt->mnt_parent;
48658+ continue;
48659+ }
48660+ parent = dentry->d_parent;
48661+ prefetch(parent);
48662+ spin_lock(&dentry->d_lock);
48663+ error = prepend_name(buffer, buflen, &dentry->d_name);
48664+ spin_unlock(&dentry->d_lock);
48665+ if (!error)
48666+ error = prepend(buffer, buflen, "/", 1);
48667+ if (error)
48668+ break;
48669+
48670+ slash = true;
48671+ dentry = parent;
48672+ }
48673+
48674+out:
48675+ if (!error && !slash)
48676+ error = prepend(buffer, buflen, "/", 1);
48677+
48678+ return error;
48679+}
48680+
48681+/* this must be called with vfsmount_lock and rename_lock held */
48682+
48683+static char *__our_d_path(const struct path *path, struct path *root,
48684+ char *buf, int buflen)
48685+{
48686+ char *res = buf + buflen;
48687+ int error;
48688+
48689+ prepend(&res, &buflen, "\0", 1);
48690+ error = prepend_path(path, root, &res, &buflen);
48691+ if (error)
48692+ return ERR_PTR(error);
48693+
48694+ return res;
48695+}
48696+
48697+static char *
48698+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
48699+{
48700+ char *retval;
48701+
48702+ retval = __our_d_path(path, root, buf, buflen);
48703+ if (unlikely(IS_ERR(retval)))
48704+ retval = strcpy(buf, "<path too long>");
48705+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
48706+ retval[1] = '\0';
48707+
48708+ return retval;
48709+}
48710+
48711+static char *
48712+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48713+ char *buf, int buflen)
48714+{
48715+ struct path path;
48716+ char *res;
48717+
48718+ path.dentry = (struct dentry *)dentry;
48719+ path.mnt = (struct vfsmount *)vfsmnt;
48720+
48721+ /* we can use real_root.dentry, real_root.mnt, because this is only called
48722+ by the RBAC system */
48723+ res = gen_full_path(&path, &real_root, buf, buflen);
48724+
48725+ return res;
48726+}
48727+
48728+static char *
48729+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48730+ char *buf, int buflen)
48731+{
48732+ char *res;
48733+ struct path path;
48734+ struct path root;
48735+ struct task_struct *reaper = &init_task;
48736+
48737+ path.dentry = (struct dentry *)dentry;
48738+ path.mnt = (struct vfsmount *)vfsmnt;
48739+
48740+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
48741+ get_fs_root(reaper->fs, &root);
48742+
48743+ write_seqlock(&rename_lock);
48744+ br_read_lock(vfsmount_lock);
48745+ res = gen_full_path(&path, &root, buf, buflen);
48746+ br_read_unlock(vfsmount_lock);
48747+ write_sequnlock(&rename_lock);
48748+
48749+ path_put(&root);
48750+ return res;
48751+}
48752+
48753+static char *
48754+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48755+{
48756+ char *ret;
48757+ write_seqlock(&rename_lock);
48758+ br_read_lock(vfsmount_lock);
48759+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48760+ PAGE_SIZE);
48761+ br_read_unlock(vfsmount_lock);
48762+ write_sequnlock(&rename_lock);
48763+ return ret;
48764+}
48765+
48766+static char *
48767+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48768+{
48769+ char *ret;
48770+ char *buf;
48771+ int buflen;
48772+
48773+ write_seqlock(&rename_lock);
48774+ br_read_lock(vfsmount_lock);
48775+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
48776+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
48777+ buflen = (int)(ret - buf);
48778+ if (buflen >= 5)
48779+ prepend(&ret, &buflen, "/proc", 5);
48780+ else
48781+ ret = strcpy(buf, "<path too long>");
48782+ br_read_unlock(vfsmount_lock);
48783+ write_sequnlock(&rename_lock);
48784+ return ret;
48785+}
48786+
48787+char *
48788+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
48789+{
48790+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48791+ PAGE_SIZE);
48792+}
48793+
48794+char *
48795+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
48796+{
48797+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48798+ PAGE_SIZE);
48799+}
48800+
48801+char *
48802+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
48803+{
48804+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
48805+ PAGE_SIZE);
48806+}
48807+
48808+char *
48809+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
48810+{
48811+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
48812+ PAGE_SIZE);
48813+}
48814+
48815+char *
48816+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
48817+{
48818+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
48819+ PAGE_SIZE);
48820+}
48821+
48822+__inline__ __u32
48823+to_gr_audit(const __u32 reqmode)
48824+{
48825+ /* masks off auditable permission flags, then shifts them to create
48826+ auditing flags, and adds the special case of append auditing if
48827+ we're requesting write */
48828+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
48829+}
48830+
48831+struct acl_subject_label *
48832+lookup_subject_map(const struct acl_subject_label *userp)
48833+{
48834+ unsigned int index = shash(userp, subj_map_set.s_size);
48835+ struct subject_map *match;
48836+
48837+ match = subj_map_set.s_hash[index];
48838+
48839+ while (match && match->user != userp)
48840+ match = match->next;
48841+
48842+ if (match != NULL)
48843+ return match->kernel;
48844+ else
48845+ return NULL;
48846+}
48847+
48848+static void
48849+insert_subj_map_entry(struct subject_map *subjmap)
48850+{
48851+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
48852+ struct subject_map **curr;
48853+
48854+ subjmap->prev = NULL;
48855+
48856+ curr = &subj_map_set.s_hash[index];
48857+ if (*curr != NULL)
48858+ (*curr)->prev = subjmap;
48859+
48860+ subjmap->next = *curr;
48861+ *curr = subjmap;
48862+
48863+ return;
48864+}
48865+
48866+static struct acl_role_label *
48867+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
48868+ const gid_t gid)
48869+{
48870+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
48871+ struct acl_role_label *match;
48872+ struct role_allowed_ip *ipp;
48873+ unsigned int x;
48874+ u32 curr_ip = task->signal->curr_ip;
48875+
48876+ task->signal->saved_ip = curr_ip;
48877+
48878+ match = acl_role_set.r_hash[index];
48879+
48880+ while (match) {
48881+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
48882+ for (x = 0; x < match->domain_child_num; x++) {
48883+ if (match->domain_children[x] == uid)
48884+ goto found;
48885+ }
48886+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
48887+ break;
48888+ match = match->next;
48889+ }
48890+found:
48891+ if (match == NULL) {
48892+ try_group:
48893+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
48894+ match = acl_role_set.r_hash[index];
48895+
48896+ while (match) {
48897+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
48898+ for (x = 0; x < match->domain_child_num; x++) {
48899+ if (match->domain_children[x] == gid)
48900+ goto found2;
48901+ }
48902+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
48903+ break;
48904+ match = match->next;
48905+ }
48906+found2:
48907+ if (match == NULL)
48908+ match = default_role;
48909+ if (match->allowed_ips == NULL)
48910+ return match;
48911+ else {
48912+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48913+ if (likely
48914+ ((ntohl(curr_ip) & ipp->netmask) ==
48915+ (ntohl(ipp->addr) & ipp->netmask)))
48916+ return match;
48917+ }
48918+ match = default_role;
48919+ }
48920+ } else if (match->allowed_ips == NULL) {
48921+ return match;
48922+ } else {
48923+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48924+ if (likely
48925+ ((ntohl(curr_ip) & ipp->netmask) ==
48926+ (ntohl(ipp->addr) & ipp->netmask)))
48927+ return match;
48928+ }
48929+ goto try_group;
48930+ }
48931+
48932+ return match;
48933+}
48934+
48935+struct acl_subject_label *
48936+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
48937+ const struct acl_role_label *role)
48938+{
48939+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
48940+ struct acl_subject_label *match;
48941+
48942+ match = role->subj_hash[index];
48943+
48944+ while (match && (match->inode != ino || match->device != dev ||
48945+ (match->mode & GR_DELETED))) {
48946+ match = match->next;
48947+ }
48948+
48949+ if (match && !(match->mode & GR_DELETED))
48950+ return match;
48951+ else
48952+ return NULL;
48953+}
48954+
48955+struct acl_subject_label *
48956+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
48957+ const struct acl_role_label *role)
48958+{
48959+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
48960+ struct acl_subject_label *match;
48961+
48962+ match = role->subj_hash[index];
48963+
48964+ while (match && (match->inode != ino || match->device != dev ||
48965+ !(match->mode & GR_DELETED))) {
48966+ match = match->next;
48967+ }
48968+
48969+ if (match && (match->mode & GR_DELETED))
48970+ return match;
48971+ else
48972+ return NULL;
48973+}
48974+
48975+static struct acl_object_label *
48976+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
48977+ const struct acl_subject_label *subj)
48978+{
48979+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
48980+ struct acl_object_label *match;
48981+
48982+ match = subj->obj_hash[index];
48983+
48984+ while (match && (match->inode != ino || match->device != dev ||
48985+ (match->mode & GR_DELETED))) {
48986+ match = match->next;
48987+ }
48988+
48989+ if (match && !(match->mode & GR_DELETED))
48990+ return match;
48991+ else
48992+ return NULL;
48993+}
48994+
48995+static struct acl_object_label *
48996+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
48997+ const struct acl_subject_label *subj)
48998+{
48999+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
49000+ struct acl_object_label *match;
49001+
49002+ match = subj->obj_hash[index];
49003+
49004+ while (match && (match->inode != ino || match->device != dev ||
49005+ !(match->mode & GR_DELETED))) {
49006+ match = match->next;
49007+ }
49008+
49009+ if (match && (match->mode & GR_DELETED))
49010+ return match;
49011+
49012+ match = subj->obj_hash[index];
49013+
49014+ while (match && (match->inode != ino || match->device != dev ||
49015+ (match->mode & GR_DELETED))) {
49016+ match = match->next;
49017+ }
49018+
49019+ if (match && !(match->mode & GR_DELETED))
49020+ return match;
49021+ else
49022+ return NULL;
49023+}
49024+
49025+static struct name_entry *
49026+lookup_name_entry(const char *name)
49027+{
49028+ unsigned int len = strlen(name);
49029+ unsigned int key = full_name_hash(name, len);
49030+ unsigned int index = key % name_set.n_size;
49031+ struct name_entry *match;
49032+
49033+ match = name_set.n_hash[index];
49034+
49035+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
49036+ match = match->next;
49037+
49038+ return match;
49039+}
49040+
49041+static struct name_entry *
49042+lookup_name_entry_create(const char *name)
49043+{
49044+ unsigned int len = strlen(name);
49045+ unsigned int key = full_name_hash(name, len);
49046+ unsigned int index = key % name_set.n_size;
49047+ struct name_entry *match;
49048+
49049+ match = name_set.n_hash[index];
49050+
49051+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
49052+ !match->deleted))
49053+ match = match->next;
49054+
49055+ if (match && match->deleted)
49056+ return match;
49057+
49058+ match = name_set.n_hash[index];
49059+
49060+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
49061+ match->deleted))
49062+ match = match->next;
49063+
49064+ if (match && !match->deleted)
49065+ return match;
49066+ else
49067+ return NULL;
49068+}
49069+
49070+static struct inodev_entry *
49071+lookup_inodev_entry(const ino_t ino, const dev_t dev)
49072+{
49073+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
49074+ struct inodev_entry *match;
49075+
49076+ match = inodev_set.i_hash[index];
49077+
49078+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
49079+ match = match->next;
49080+
49081+ return match;
49082+}
49083+
49084+static void
49085+insert_inodev_entry(struct inodev_entry *entry)
49086+{
49087+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
49088+ inodev_set.i_size);
49089+ struct inodev_entry **curr;
49090+
49091+ entry->prev = NULL;
49092+
49093+ curr = &inodev_set.i_hash[index];
49094+ if (*curr != NULL)
49095+ (*curr)->prev = entry;
49096+
49097+ entry->next = *curr;
49098+ *curr = entry;
49099+
49100+ return;
49101+}
49102+
49103+static void
49104+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
49105+{
49106+ unsigned int index =
49107+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
49108+ struct acl_role_label **curr;
49109+ struct acl_role_label *tmp;
49110+
49111+ curr = &acl_role_set.r_hash[index];
49112+
49113+ /* if role was already inserted due to domains and already has
49114+ a role in the same bucket as it attached, then we need to
49115+ combine these two buckets
49116+ */
49117+ if (role->next) {
49118+ tmp = role->next;
49119+ while (tmp->next)
49120+ tmp = tmp->next;
49121+ tmp->next = *curr;
49122+ } else
49123+ role->next = *curr;
49124+ *curr = role;
49125+
49126+ return;
49127+}
49128+
49129+static void
49130+insert_acl_role_label(struct acl_role_label *role)
49131+{
49132+ int i;
49133+
49134+ if (role_list == NULL) {
49135+ role_list = role;
49136+ role->prev = NULL;
49137+ } else {
49138+ role->prev = role_list;
49139+ role_list = role;
49140+ }
49141+
49142+ /* used for hash chains */
49143+ role->next = NULL;
49144+
49145+ if (role->roletype & GR_ROLE_DOMAIN) {
49146+ for (i = 0; i < role->domain_child_num; i++)
49147+ __insert_acl_role_label(role, role->domain_children[i]);
49148+ } else
49149+ __insert_acl_role_label(role, role->uidgid);
49150+}
49151+
49152+static int
49153+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
49154+{
49155+ struct name_entry **curr, *nentry;
49156+ struct inodev_entry *ientry;
49157+ unsigned int len = strlen(name);
49158+ unsigned int key = full_name_hash(name, len);
49159+ unsigned int index = key % name_set.n_size;
49160+
49161+ curr = &name_set.n_hash[index];
49162+
49163+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
49164+ curr = &((*curr)->next);
49165+
49166+ if (*curr != NULL)
49167+ return 1;
49168+
49169+ nentry = acl_alloc(sizeof (struct name_entry));
49170+ if (nentry == NULL)
49171+ return 0;
49172+ ientry = acl_alloc(sizeof (struct inodev_entry));
49173+ if (ientry == NULL)
49174+ return 0;
49175+ ientry->nentry = nentry;
49176+
49177+ nentry->key = key;
49178+ nentry->name = name;
49179+ nentry->inode = inode;
49180+ nentry->device = device;
49181+ nentry->len = len;
49182+ nentry->deleted = deleted;
49183+
49184+ nentry->prev = NULL;
49185+ curr = &name_set.n_hash[index];
49186+ if (*curr != NULL)
49187+ (*curr)->prev = nentry;
49188+ nentry->next = *curr;
49189+ *curr = nentry;
49190+
49191+ /* insert us into the table searchable by inode/dev */
49192+ insert_inodev_entry(ientry);
49193+
49194+ return 1;
49195+}
49196+
49197+static void
49198+insert_acl_obj_label(struct acl_object_label *obj,
49199+ struct acl_subject_label *subj)
49200+{
49201+ unsigned int index =
49202+ fhash(obj->inode, obj->device, subj->obj_hash_size);
49203+ struct acl_object_label **curr;
49204+
49205+
49206+ obj->prev = NULL;
49207+
49208+ curr = &subj->obj_hash[index];
49209+ if (*curr != NULL)
49210+ (*curr)->prev = obj;
49211+
49212+ obj->next = *curr;
49213+ *curr = obj;
49214+
49215+ return;
49216+}
49217+
49218+static void
49219+insert_acl_subj_label(struct acl_subject_label *obj,
49220+ struct acl_role_label *role)
49221+{
49222+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
49223+ struct acl_subject_label **curr;
49224+
49225+ obj->prev = NULL;
49226+
49227+ curr = &role->subj_hash[index];
49228+ if (*curr != NULL)
49229+ (*curr)->prev = obj;
49230+
49231+ obj->next = *curr;
49232+ *curr = obj;
49233+
49234+ return;
49235+}
49236+
49237+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
49238+
49239+static void *
49240+create_table(__u32 * len, int elementsize)
49241+{
49242+ unsigned int table_sizes[] = {
49243+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
49244+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
49245+ 4194301, 8388593, 16777213, 33554393, 67108859
49246+ };
49247+ void *newtable = NULL;
49248+ unsigned int pwr = 0;
49249+
49250+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
49251+ table_sizes[pwr] <= *len)
49252+ pwr++;
49253+
49254+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
49255+ return newtable;
49256+
49257+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
49258+ newtable =
49259+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
49260+ else
49261+ newtable = vmalloc(table_sizes[pwr] * elementsize);
49262+
49263+ *len = table_sizes[pwr];
49264+
49265+ return newtable;
49266+}
49267+
49268+static int
49269+init_variables(const struct gr_arg *arg)
49270+{
49271+ struct task_struct *reaper = &init_task;
49272+ unsigned int stacksize;
49273+
49274+ subj_map_set.s_size = arg->role_db.num_subjects;
49275+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
49276+ name_set.n_size = arg->role_db.num_objects;
49277+ inodev_set.i_size = arg->role_db.num_objects;
49278+
49279+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
49280+ !name_set.n_size || !inodev_set.i_size)
49281+ return 1;
49282+
49283+ if (!gr_init_uidset())
49284+ return 1;
49285+
49286+ /* set up the stack that holds allocation info */
49287+
49288+ stacksize = arg->role_db.num_pointers + 5;
49289+
49290+ if (!acl_alloc_stack_init(stacksize))
49291+ return 1;
49292+
49293+ /* grab reference for the real root dentry and vfsmount */
49294+ get_fs_root(reaper->fs, &real_root);
49295+
49296+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49297+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
49298+#endif
49299+
49300+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
49301+ if (fakefs_obj_rw == NULL)
49302+ return 1;
49303+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
49304+
49305+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
49306+ if (fakefs_obj_rwx == NULL)
49307+ return 1;
49308+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
49309+
49310+ subj_map_set.s_hash =
49311+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
49312+ acl_role_set.r_hash =
49313+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
49314+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
49315+ inodev_set.i_hash =
49316+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
49317+
49318+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
49319+ !name_set.n_hash || !inodev_set.i_hash)
49320+ return 1;
49321+
49322+ memset(subj_map_set.s_hash, 0,
49323+ sizeof(struct subject_map *) * subj_map_set.s_size);
49324+ memset(acl_role_set.r_hash, 0,
49325+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
49326+ memset(name_set.n_hash, 0,
49327+ sizeof (struct name_entry *) * name_set.n_size);
49328+ memset(inodev_set.i_hash, 0,
49329+ sizeof (struct inodev_entry *) * inodev_set.i_size);
49330+
49331+ return 0;
49332+}
49333+
49334+/* free information not needed after startup
49335+ currently contains user->kernel pointer mappings for subjects
49336+*/
49337+
49338+static void
49339+free_init_variables(void)
49340+{
49341+ __u32 i;
49342+
49343+ if (subj_map_set.s_hash) {
49344+ for (i = 0; i < subj_map_set.s_size; i++) {
49345+ if (subj_map_set.s_hash[i]) {
49346+ kfree(subj_map_set.s_hash[i]);
49347+ subj_map_set.s_hash[i] = NULL;
49348+ }
49349+ }
49350+
49351+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
49352+ PAGE_SIZE)
49353+ kfree(subj_map_set.s_hash);
49354+ else
49355+ vfree(subj_map_set.s_hash);
49356+ }
49357+
49358+ return;
49359+}
49360+
49361+static void
49362+free_variables(void)
49363+{
49364+ struct acl_subject_label *s;
49365+ struct acl_role_label *r;
49366+ struct task_struct *task, *task2;
49367+ unsigned int x;
49368+
49369+ gr_clear_learn_entries();
49370+
49371+ read_lock(&tasklist_lock);
49372+ do_each_thread(task2, task) {
49373+ task->acl_sp_role = 0;
49374+ task->acl_role_id = 0;
49375+ task->acl = NULL;
49376+ task->role = NULL;
49377+ } while_each_thread(task2, task);
49378+ read_unlock(&tasklist_lock);
49379+
49380+ /* release the reference to the real root dentry and vfsmount */
49381+ path_put(&real_root);
49382+
49383+ /* free all object hash tables */
49384+
49385+ FOR_EACH_ROLE_START(r)
49386+ if (r->subj_hash == NULL)
49387+ goto next_role;
49388+ FOR_EACH_SUBJECT_START(r, s, x)
49389+ if (s->obj_hash == NULL)
49390+ break;
49391+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49392+ kfree(s->obj_hash);
49393+ else
49394+ vfree(s->obj_hash);
49395+ FOR_EACH_SUBJECT_END(s, x)
49396+ FOR_EACH_NESTED_SUBJECT_START(r, s)
49397+ if (s->obj_hash == NULL)
49398+ break;
49399+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49400+ kfree(s->obj_hash);
49401+ else
49402+ vfree(s->obj_hash);
49403+ FOR_EACH_NESTED_SUBJECT_END(s)
49404+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
49405+ kfree(r->subj_hash);
49406+ else
49407+ vfree(r->subj_hash);
49408+ r->subj_hash = NULL;
49409+next_role:
49410+ FOR_EACH_ROLE_END(r)
49411+
49412+ acl_free_all();
49413+
49414+ if (acl_role_set.r_hash) {
49415+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
49416+ PAGE_SIZE)
49417+ kfree(acl_role_set.r_hash);
49418+ else
49419+ vfree(acl_role_set.r_hash);
49420+ }
49421+ if (name_set.n_hash) {
49422+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
49423+ PAGE_SIZE)
49424+ kfree(name_set.n_hash);
49425+ else
49426+ vfree(name_set.n_hash);
49427+ }
49428+
49429+ if (inodev_set.i_hash) {
49430+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
49431+ PAGE_SIZE)
49432+ kfree(inodev_set.i_hash);
49433+ else
49434+ vfree(inodev_set.i_hash);
49435+ }
49436+
49437+ gr_free_uidset();
49438+
49439+ memset(&name_set, 0, sizeof (struct name_db));
49440+ memset(&inodev_set, 0, sizeof (struct inodev_db));
49441+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
49442+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
49443+
49444+ default_role = NULL;
49445+ role_list = NULL;
49446+
49447+ return;
49448+}
49449+
49450+static __u32
49451+count_user_objs(struct acl_object_label *userp)
49452+{
49453+ struct acl_object_label o_tmp;
49454+ __u32 num = 0;
49455+
49456+ while (userp) {
49457+ if (copy_from_user(&o_tmp, userp,
49458+ sizeof (struct acl_object_label)))
49459+ break;
49460+
49461+ userp = o_tmp.prev;
49462+ num++;
49463+ }
49464+
49465+ return num;
49466+}
49467+
49468+static struct acl_subject_label *
49469+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
49470+
49471+static int
49472+copy_user_glob(struct acl_object_label *obj)
49473+{
49474+ struct acl_object_label *g_tmp, **guser;
49475+ unsigned int len;
49476+ char *tmp;
49477+
49478+ if (obj->globbed == NULL)
49479+ return 0;
49480+
49481+ guser = &obj->globbed;
49482+ while (*guser) {
49483+ g_tmp = (struct acl_object_label *)
49484+ acl_alloc(sizeof (struct acl_object_label));
49485+ if (g_tmp == NULL)
49486+ return -ENOMEM;
49487+
49488+ if (copy_from_user(g_tmp, *guser,
49489+ sizeof (struct acl_object_label)))
49490+ return -EFAULT;
49491+
49492+ len = strnlen_user(g_tmp->filename, PATH_MAX);
49493+
49494+ if (!len || len >= PATH_MAX)
49495+ return -EINVAL;
49496+
49497+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49498+ return -ENOMEM;
49499+
49500+ if (copy_from_user(tmp, g_tmp->filename, len))
49501+ return -EFAULT;
49502+ tmp[len-1] = '\0';
49503+ g_tmp->filename = tmp;
49504+
49505+ *guser = g_tmp;
49506+ guser = &(g_tmp->next);
49507+ }
49508+
49509+ return 0;
49510+}
49511+
49512+static int
49513+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
49514+ struct acl_role_label *role)
49515+{
49516+ struct acl_object_label *o_tmp;
49517+ unsigned int len;
49518+ int ret;
49519+ char *tmp;
49520+
49521+ while (userp) {
49522+ if ((o_tmp = (struct acl_object_label *)
49523+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
49524+ return -ENOMEM;
49525+
49526+ if (copy_from_user(o_tmp, userp,
49527+ sizeof (struct acl_object_label)))
49528+ return -EFAULT;
49529+
49530+ userp = o_tmp->prev;
49531+
49532+ len = strnlen_user(o_tmp->filename, PATH_MAX);
49533+
49534+ if (!len || len >= PATH_MAX)
49535+ return -EINVAL;
49536+
49537+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49538+ return -ENOMEM;
49539+
49540+ if (copy_from_user(tmp, o_tmp->filename, len))
49541+ return -EFAULT;
49542+ tmp[len-1] = '\0';
49543+ o_tmp->filename = tmp;
49544+
49545+ insert_acl_obj_label(o_tmp, subj);
49546+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
49547+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
49548+ return -ENOMEM;
49549+
49550+ ret = copy_user_glob(o_tmp);
49551+ if (ret)
49552+ return ret;
49553+
49554+ if (o_tmp->nested) {
49555+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
49556+ if (IS_ERR(o_tmp->nested))
49557+ return PTR_ERR(o_tmp->nested);
49558+
49559+ /* insert into nested subject list */
49560+ o_tmp->nested->next = role->hash->first;
49561+ role->hash->first = o_tmp->nested;
49562+ }
49563+ }
49564+
49565+ return 0;
49566+}
49567+
49568+static __u32
49569+count_user_subjs(struct acl_subject_label *userp)
49570+{
49571+ struct acl_subject_label s_tmp;
49572+ __u32 num = 0;
49573+
49574+ while (userp) {
49575+ if (copy_from_user(&s_tmp, userp,
49576+ sizeof (struct acl_subject_label)))
49577+ break;
49578+
49579+ userp = s_tmp.prev;
49580+ /* do not count nested subjects against this count, since
49581+ they are not included in the hash table, but are
49582+ attached to objects. We have already counted
49583+ the subjects in userspace for the allocation
49584+ stack
49585+ */
49586+ if (!(s_tmp.mode & GR_NESTED))
49587+ num++;
49588+ }
49589+
49590+ return num;
49591+}
49592+
49593+static int
49594+copy_user_allowedips(struct acl_role_label *rolep)
49595+{
49596+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
49597+
49598+ ruserip = rolep->allowed_ips;
49599+
49600+ while (ruserip) {
49601+ rlast = rtmp;
49602+
49603+ if ((rtmp = (struct role_allowed_ip *)
49604+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
49605+ return -ENOMEM;
49606+
49607+ if (copy_from_user(rtmp, ruserip,
49608+ sizeof (struct role_allowed_ip)))
49609+ return -EFAULT;
49610+
49611+ ruserip = rtmp->prev;
49612+
49613+ if (!rlast) {
49614+ rtmp->prev = NULL;
49615+ rolep->allowed_ips = rtmp;
49616+ } else {
49617+ rlast->next = rtmp;
49618+ rtmp->prev = rlast;
49619+ }
49620+
49621+ if (!ruserip)
49622+ rtmp->next = NULL;
49623+ }
49624+
49625+ return 0;
49626+}
49627+
49628+static int
49629+copy_user_transitions(struct acl_role_label *rolep)
49630+{
49631+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
49632+
49633+ unsigned int len;
49634+ char *tmp;
49635+
49636+ rusertp = rolep->transitions;
49637+
49638+ while (rusertp) {
49639+ rlast = rtmp;
49640+
49641+ if ((rtmp = (struct role_transition *)
49642+ acl_alloc(sizeof (struct role_transition))) == NULL)
49643+ return -ENOMEM;
49644+
49645+ if (copy_from_user(rtmp, rusertp,
49646+ sizeof (struct role_transition)))
49647+ return -EFAULT;
49648+
49649+ rusertp = rtmp->prev;
49650+
49651+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
49652+
49653+ if (!len || len >= GR_SPROLE_LEN)
49654+ return -EINVAL;
49655+
49656+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49657+ return -ENOMEM;
49658+
49659+ if (copy_from_user(tmp, rtmp->rolename, len))
49660+ return -EFAULT;
49661+ tmp[len-1] = '\0';
49662+ rtmp->rolename = tmp;
49663+
49664+ if (!rlast) {
49665+ rtmp->prev = NULL;
49666+ rolep->transitions = rtmp;
49667+ } else {
49668+ rlast->next = rtmp;
49669+ rtmp->prev = rlast;
49670+ }
49671+
49672+ if (!rusertp)
49673+ rtmp->next = NULL;
49674+ }
49675+
49676+ return 0;
49677+}
49678+
49679+static struct acl_subject_label *
49680+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
49681+{
49682+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
49683+ unsigned int len;
49684+ char *tmp;
49685+ __u32 num_objs;
49686+ struct acl_ip_label **i_tmp, *i_utmp2;
49687+ struct gr_hash_struct ghash;
49688+ struct subject_map *subjmap;
49689+ unsigned int i_num;
49690+ int err;
49691+
49692+ s_tmp = lookup_subject_map(userp);
49693+
49694+ /* we've already copied this subject into the kernel, just return
49695+ the reference to it, and don't copy it over again
49696+ */
49697+ if (s_tmp)
49698+ return(s_tmp);
49699+
49700+ if ((s_tmp = (struct acl_subject_label *)
49701+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
49702+ return ERR_PTR(-ENOMEM);
49703+
49704+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
49705+ if (subjmap == NULL)
49706+ return ERR_PTR(-ENOMEM);
49707+
49708+ subjmap->user = userp;
49709+ subjmap->kernel = s_tmp;
49710+ insert_subj_map_entry(subjmap);
49711+
49712+ if (copy_from_user(s_tmp, userp,
49713+ sizeof (struct acl_subject_label)))
49714+ return ERR_PTR(-EFAULT);
49715+
49716+ len = strnlen_user(s_tmp->filename, PATH_MAX);
49717+
49718+ if (!len || len >= PATH_MAX)
49719+ return ERR_PTR(-EINVAL);
49720+
49721+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49722+ return ERR_PTR(-ENOMEM);
49723+
49724+ if (copy_from_user(tmp, s_tmp->filename, len))
49725+ return ERR_PTR(-EFAULT);
49726+ tmp[len-1] = '\0';
49727+ s_tmp->filename = tmp;
49728+
49729+ if (!strcmp(s_tmp->filename, "/"))
49730+ role->root_label = s_tmp;
49731+
49732+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
49733+ return ERR_PTR(-EFAULT);
49734+
49735+ /* copy user and group transition tables */
49736+
49737+ if (s_tmp->user_trans_num) {
49738+ uid_t *uidlist;
49739+
49740+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
49741+ if (uidlist == NULL)
49742+ return ERR_PTR(-ENOMEM);
49743+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
49744+ return ERR_PTR(-EFAULT);
49745+
49746+ s_tmp->user_transitions = uidlist;
49747+ }
49748+
49749+ if (s_tmp->group_trans_num) {
49750+ gid_t *gidlist;
49751+
49752+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
49753+ if (gidlist == NULL)
49754+ return ERR_PTR(-ENOMEM);
49755+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
49756+ return ERR_PTR(-EFAULT);
49757+
49758+ s_tmp->group_transitions = gidlist;
49759+ }
49760+
49761+ /* set up object hash table */
49762+ num_objs = count_user_objs(ghash.first);
49763+
49764+ s_tmp->obj_hash_size = num_objs;
49765+ s_tmp->obj_hash =
49766+ (struct acl_object_label **)
49767+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
49768+
49769+ if (!s_tmp->obj_hash)
49770+ return ERR_PTR(-ENOMEM);
49771+
49772+ memset(s_tmp->obj_hash, 0,
49773+ s_tmp->obj_hash_size *
49774+ sizeof (struct acl_object_label *));
49775+
49776+ /* add in objects */
49777+ err = copy_user_objs(ghash.first, s_tmp, role);
49778+
49779+ if (err)
49780+ return ERR_PTR(err);
49781+
49782+ /* set pointer for parent subject */
49783+ if (s_tmp->parent_subject) {
49784+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
49785+
49786+ if (IS_ERR(s_tmp2))
49787+ return s_tmp2;
49788+
49789+ s_tmp->parent_subject = s_tmp2;
49790+ }
49791+
49792+ /* add in ip acls */
49793+
49794+ if (!s_tmp->ip_num) {
49795+ s_tmp->ips = NULL;
49796+ goto insert;
49797+ }
49798+
49799+ i_tmp =
49800+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
49801+ sizeof (struct acl_ip_label *));
49802+
49803+ if (!i_tmp)
49804+ return ERR_PTR(-ENOMEM);
49805+
49806+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
49807+ *(i_tmp + i_num) =
49808+ (struct acl_ip_label *)
49809+ acl_alloc(sizeof (struct acl_ip_label));
49810+ if (!*(i_tmp + i_num))
49811+ return ERR_PTR(-ENOMEM);
49812+
49813+ if (copy_from_user
49814+ (&i_utmp2, s_tmp->ips + i_num,
49815+ sizeof (struct acl_ip_label *)))
49816+ return ERR_PTR(-EFAULT);
49817+
49818+ if (copy_from_user
49819+ (*(i_tmp + i_num), i_utmp2,
49820+ sizeof (struct acl_ip_label)))
49821+ return ERR_PTR(-EFAULT);
49822+
49823+ if ((*(i_tmp + i_num))->iface == NULL)
49824+ continue;
49825+
49826+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
49827+ if (!len || len >= IFNAMSIZ)
49828+ return ERR_PTR(-EINVAL);
49829+ tmp = acl_alloc(len);
49830+ if (tmp == NULL)
49831+ return ERR_PTR(-ENOMEM);
49832+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
49833+ return ERR_PTR(-EFAULT);
49834+ (*(i_tmp + i_num))->iface = tmp;
49835+ }
49836+
49837+ s_tmp->ips = i_tmp;
49838+
49839+insert:
49840+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
49841+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
49842+ return ERR_PTR(-ENOMEM);
49843+
49844+ return s_tmp;
49845+}
49846+
49847+static int
49848+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
49849+{
49850+ struct acl_subject_label s_pre;
49851+ struct acl_subject_label * ret;
49852+ int err;
49853+
49854+ while (userp) {
49855+ if (copy_from_user(&s_pre, userp,
49856+ sizeof (struct acl_subject_label)))
49857+ return -EFAULT;
49858+
49859+ /* do not add nested subjects here, add
49860+ while parsing objects
49861+ */
49862+
49863+ if (s_pre.mode & GR_NESTED) {
49864+ userp = s_pre.prev;
49865+ continue;
49866+ }
49867+
49868+ ret = do_copy_user_subj(userp, role);
49869+
49870+ err = PTR_ERR(ret);
49871+ if (IS_ERR(ret))
49872+ return err;
49873+
49874+ insert_acl_subj_label(ret, role);
49875+
49876+ userp = s_pre.prev;
49877+ }
49878+
49879+ return 0;
49880+}
49881+
49882+static int
49883+copy_user_acl(struct gr_arg *arg)
49884+{
49885+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
49886+ struct sprole_pw *sptmp;
49887+ struct gr_hash_struct *ghash;
49888+ uid_t *domainlist;
49889+ unsigned int r_num;
49890+ unsigned int len;
49891+ char *tmp;
49892+ int err = 0;
49893+ __u16 i;
49894+ __u32 num_subjs;
49895+
49896+ /* we need a default and kernel role */
49897+ if (arg->role_db.num_roles < 2)
49898+ return -EINVAL;
49899+
49900+ /* copy special role authentication info from userspace */
49901+
49902+ num_sprole_pws = arg->num_sprole_pws;
49903+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
49904+
49905+ if (!acl_special_roles) {
49906+ err = -ENOMEM;
49907+ goto cleanup;
49908+ }
49909+
49910+ for (i = 0; i < num_sprole_pws; i++) {
49911+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
49912+ if (!sptmp) {
49913+ err = -ENOMEM;
49914+ goto cleanup;
49915+ }
49916+ if (copy_from_user(sptmp, arg->sprole_pws + i,
49917+ sizeof (struct sprole_pw))) {
49918+ err = -EFAULT;
49919+ goto cleanup;
49920+ }
49921+
49922+ len =
49923+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
49924+
49925+ if (!len || len >= GR_SPROLE_LEN) {
49926+ err = -EINVAL;
49927+ goto cleanup;
49928+ }
49929+
49930+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
49931+ err = -ENOMEM;
49932+ goto cleanup;
49933+ }
49934+
49935+ if (copy_from_user(tmp, sptmp->rolename, len)) {
49936+ err = -EFAULT;
49937+ goto cleanup;
49938+ }
49939+ tmp[len-1] = '\0';
49940+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49941+ printk(KERN_ALERT "Copying special role %s\n", tmp);
49942+#endif
49943+ sptmp->rolename = tmp;
49944+ acl_special_roles[i] = sptmp;
49945+ }
49946+
49947+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
49948+
49949+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
49950+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
49951+
49952+ if (!r_tmp) {
49953+ err = -ENOMEM;
49954+ goto cleanup;
49955+ }
49956+
49957+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
49958+ sizeof (struct acl_role_label *))) {
49959+ err = -EFAULT;
49960+ goto cleanup;
49961+ }
49962+
49963+ if (copy_from_user(r_tmp, r_utmp2,
49964+ sizeof (struct acl_role_label))) {
49965+ err = -EFAULT;
49966+ goto cleanup;
49967+ }
49968+
49969+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
49970+
49971+ if (!len || len >= PATH_MAX) {
49972+ err = -EINVAL;
49973+ goto cleanup;
49974+ }
49975+
49976+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
49977+ err = -ENOMEM;
49978+ goto cleanup;
49979+ }
49980+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
49981+ err = -EFAULT;
49982+ goto cleanup;
49983+ }
49984+ tmp[len-1] = '\0';
49985+ r_tmp->rolename = tmp;
49986+
49987+ if (!strcmp(r_tmp->rolename, "default")
49988+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
49989+ default_role = r_tmp;
49990+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
49991+ kernel_role = r_tmp;
49992+ }
49993+
49994+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
49995+ err = -ENOMEM;
49996+ goto cleanup;
49997+ }
49998+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
49999+ err = -EFAULT;
50000+ goto cleanup;
50001+ }
50002+
50003+ r_tmp->hash = ghash;
50004+
50005+ num_subjs = count_user_subjs(r_tmp->hash->first);
50006+
50007+ r_tmp->subj_hash_size = num_subjs;
50008+ r_tmp->subj_hash =
50009+ (struct acl_subject_label **)
50010+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
50011+
50012+ if (!r_tmp->subj_hash) {
50013+ err = -ENOMEM;
50014+ goto cleanup;
50015+ }
50016+
50017+ err = copy_user_allowedips(r_tmp);
50018+ if (err)
50019+ goto cleanup;
50020+
50021+ /* copy domain info */
50022+ if (r_tmp->domain_children != NULL) {
50023+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
50024+ if (domainlist == NULL) {
50025+ err = -ENOMEM;
50026+ goto cleanup;
50027+ }
50028+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
50029+ err = -EFAULT;
50030+ goto cleanup;
50031+ }
50032+ r_tmp->domain_children = domainlist;
50033+ }
50034+
50035+ err = copy_user_transitions(r_tmp);
50036+ if (err)
50037+ goto cleanup;
50038+
50039+ memset(r_tmp->subj_hash, 0,
50040+ r_tmp->subj_hash_size *
50041+ sizeof (struct acl_subject_label *));
50042+
50043+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
50044+
50045+ if (err)
50046+ goto cleanup;
50047+
50048+ /* set nested subject list to null */
50049+ r_tmp->hash->first = NULL;
50050+
50051+ insert_acl_role_label(r_tmp);
50052+ }
50053+
50054+ goto return_err;
50055+ cleanup:
50056+ free_variables();
50057+ return_err:
50058+ return err;
50059+
50060+}
50061+
50062+static int
50063+gracl_init(struct gr_arg *args)
50064+{
50065+ int error = 0;
50066+
50067+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
50068+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
50069+
50070+ if (init_variables(args)) {
50071+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
50072+ error = -ENOMEM;
50073+ free_variables();
50074+ goto out;
50075+ }
50076+
50077+ error = copy_user_acl(args);
50078+ free_init_variables();
50079+ if (error) {
50080+ free_variables();
50081+ goto out;
50082+ }
50083+
50084+ if ((error = gr_set_acls(0))) {
50085+ free_variables();
50086+ goto out;
50087+ }
50088+
50089+ pax_open_kernel();
50090+ gr_status |= GR_READY;
50091+ pax_close_kernel();
50092+
50093+ out:
50094+ return error;
50095+}
50096+
50097+/* derived from glibc fnmatch() 0: match, 1: no match*/
50098+
50099+static int
50100+glob_match(const char *p, const char *n)
50101+{
50102+ char c;
50103+
50104+ while ((c = *p++) != '\0') {
50105+ switch (c) {
50106+ case '?':
50107+ if (*n == '\0')
50108+ return 1;
50109+ else if (*n == '/')
50110+ return 1;
50111+ break;
50112+ case '\\':
50113+ if (*n != c)
50114+ return 1;
50115+ break;
50116+ case '*':
50117+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
50118+ if (*n == '/')
50119+ return 1;
50120+ else if (c == '?') {
50121+ if (*n == '\0')
50122+ return 1;
50123+ else
50124+ ++n;
50125+ }
50126+ }
50127+ if (c == '\0') {
50128+ return 0;
50129+ } else {
50130+ const char *endp;
50131+
50132+ if ((endp = strchr(n, '/')) == NULL)
50133+ endp = n + strlen(n);
50134+
50135+ if (c == '[') {
50136+ for (--p; n < endp; ++n)
50137+ if (!glob_match(p, n))
50138+ return 0;
50139+ } else if (c == '/') {
50140+ while (*n != '\0' && *n != '/')
50141+ ++n;
50142+ if (*n == '/' && !glob_match(p, n + 1))
50143+ return 0;
50144+ } else {
50145+ for (--p; n < endp; ++n)
50146+ if (*n == c && !glob_match(p, n))
50147+ return 0;
50148+ }
50149+
50150+ return 1;
50151+ }
50152+ case '[':
50153+ {
50154+ int not;
50155+ char cold;
50156+
50157+ if (*n == '\0' || *n == '/')
50158+ return 1;
50159+
50160+ not = (*p == '!' || *p == '^');
50161+ if (not)
50162+ ++p;
50163+
50164+ c = *p++;
50165+ for (;;) {
50166+ unsigned char fn = (unsigned char)*n;
50167+
50168+ if (c == '\0')
50169+ return 1;
50170+ else {
50171+ if (c == fn)
50172+ goto matched;
50173+ cold = c;
50174+ c = *p++;
50175+
50176+ if (c == '-' && *p != ']') {
50177+ unsigned char cend = *p++;
50178+
50179+ if (cend == '\0')
50180+ return 1;
50181+
50182+ if (cold <= fn && fn <= cend)
50183+ goto matched;
50184+
50185+ c = *p++;
50186+ }
50187+ }
50188+
50189+ if (c == ']')
50190+ break;
50191+ }
50192+ if (!not)
50193+ return 1;
50194+ break;
50195+ matched:
50196+ while (c != ']') {
50197+ if (c == '\0')
50198+ return 1;
50199+
50200+ c = *p++;
50201+ }
50202+ if (not)
50203+ return 1;
50204+ }
50205+ break;
50206+ default:
50207+ if (c != *n)
50208+ return 1;
50209+ }
50210+
50211+ ++n;
50212+ }
50213+
50214+ if (*n == '\0')
50215+ return 0;
50216+
50217+ if (*n == '/')
50218+ return 0;
50219+
50220+ return 1;
50221+}
50222+
50223+static struct acl_object_label *
50224+chk_glob_label(struct acl_object_label *globbed,
50225+ struct dentry *dentry, struct vfsmount *mnt, char **path)
50226+{
50227+ struct acl_object_label *tmp;
50228+
50229+ if (*path == NULL)
50230+ *path = gr_to_filename_nolock(dentry, mnt);
50231+
50232+ tmp = globbed;
50233+
50234+ while (tmp) {
50235+ if (!glob_match(tmp->filename, *path))
50236+ return tmp;
50237+ tmp = tmp->next;
50238+ }
50239+
50240+ return NULL;
50241+}
50242+
50243+static struct acl_object_label *
50244+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
50245+ const ino_t curr_ino, const dev_t curr_dev,
50246+ const struct acl_subject_label *subj, char **path, const int checkglob)
50247+{
50248+ struct acl_subject_label *tmpsubj;
50249+ struct acl_object_label *retval;
50250+ struct acl_object_label *retval2;
50251+
50252+ tmpsubj = (struct acl_subject_label *) subj;
50253+ read_lock(&gr_inode_lock);
50254+ do {
50255+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
50256+ if (retval) {
50257+ if (checkglob && retval->globbed) {
50258+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
50259+ (struct vfsmount *)orig_mnt, path);
50260+ if (retval2)
50261+ retval = retval2;
50262+ }
50263+ break;
50264+ }
50265+ } while ((tmpsubj = tmpsubj->parent_subject));
50266+ read_unlock(&gr_inode_lock);
50267+
50268+ return retval;
50269+}
50270+
50271+static __inline__ struct acl_object_label *
50272+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
50273+ struct dentry *curr_dentry,
50274+ const struct acl_subject_label *subj, char **path, const int checkglob)
50275+{
50276+ int newglob = checkglob;
50277+ ino_t inode;
50278+ dev_t device;
50279+
50280+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
50281+ as we don't want a / * rule to match instead of the / object
50282+ don't do this for create lookups that call this function though, since they're looking up
50283+ on the parent and thus need globbing checks on all paths
50284+ */
50285+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
50286+ newglob = GR_NO_GLOB;
50287+
50288+ spin_lock(&curr_dentry->d_lock);
50289+ inode = curr_dentry->d_inode->i_ino;
50290+ device = __get_dev(curr_dentry);
50291+ spin_unlock(&curr_dentry->d_lock);
50292+
50293+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
50294+}
50295+
50296+static struct acl_object_label *
50297+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50298+ const struct acl_subject_label *subj, char *path, const int checkglob)
50299+{
50300+ struct dentry *dentry = (struct dentry *) l_dentry;
50301+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
50302+ struct acl_object_label *retval;
50303+ struct dentry *parent;
50304+
50305+ write_seqlock(&rename_lock);
50306+ br_read_lock(vfsmount_lock);
50307+
50308+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
50309+#ifdef CONFIG_NET
50310+ mnt == sock_mnt ||
50311+#endif
50312+#ifdef CONFIG_HUGETLBFS
50313+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
50314+#endif
50315+ /* ignore Eric Biederman */
50316+ IS_PRIVATE(l_dentry->d_inode))) {
50317+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
50318+ goto out;
50319+ }
50320+
50321+ for (;;) {
50322+ if (dentry == real_root.dentry && mnt == real_root.mnt)
50323+ break;
50324+
50325+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
50326+ if (mnt->mnt_parent == mnt)
50327+ break;
50328+
50329+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50330+ if (retval != NULL)
50331+ goto out;
50332+
50333+ dentry = mnt->mnt_mountpoint;
50334+ mnt = mnt->mnt_parent;
50335+ continue;
50336+ }
50337+
50338+ parent = dentry->d_parent;
50339+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50340+ if (retval != NULL)
50341+ goto out;
50342+
50343+ dentry = parent;
50344+ }
50345+
50346+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50347+
50348+ /* real_root is pinned so we don't have to hold a reference */
50349+ if (retval == NULL)
50350+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
50351+out:
50352+ br_read_unlock(vfsmount_lock);
50353+ write_sequnlock(&rename_lock);
50354+
50355+ BUG_ON(retval == NULL);
50356+
50357+ return retval;
50358+}
50359+
50360+static __inline__ struct acl_object_label *
50361+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50362+ const struct acl_subject_label *subj)
50363+{
50364+ char *path = NULL;
50365+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
50366+}
50367+
50368+static __inline__ struct acl_object_label *
50369+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50370+ const struct acl_subject_label *subj)
50371+{
50372+ char *path = NULL;
50373+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
50374+}
50375+
50376+static __inline__ struct acl_object_label *
50377+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50378+ const struct acl_subject_label *subj, char *path)
50379+{
50380+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
50381+}
50382+
50383+static struct acl_subject_label *
50384+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50385+ const struct acl_role_label *role)
50386+{
50387+ struct dentry *dentry = (struct dentry *) l_dentry;
50388+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
50389+ struct acl_subject_label *retval;
50390+ struct dentry *parent;
50391+
50392+ write_seqlock(&rename_lock);
50393+ br_read_lock(vfsmount_lock);
50394+
50395+ for (;;) {
50396+ if (dentry == real_root.dentry && mnt == real_root.mnt)
50397+ break;
50398+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
50399+ if (mnt->mnt_parent == mnt)
50400+ break;
50401+
50402+ spin_lock(&dentry->d_lock);
50403+ read_lock(&gr_inode_lock);
50404+ retval =
50405+ lookup_acl_subj_label(dentry->d_inode->i_ino,
50406+ __get_dev(dentry), role);
50407+ read_unlock(&gr_inode_lock);
50408+ spin_unlock(&dentry->d_lock);
50409+ if (retval != NULL)
50410+ goto out;
50411+
50412+ dentry = mnt->mnt_mountpoint;
50413+ mnt = mnt->mnt_parent;
50414+ continue;
50415+ }
50416+
50417+ spin_lock(&dentry->d_lock);
50418+ read_lock(&gr_inode_lock);
50419+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50420+ __get_dev(dentry), role);
50421+ read_unlock(&gr_inode_lock);
50422+ parent = dentry->d_parent;
50423+ spin_unlock(&dentry->d_lock);
50424+
50425+ if (retval != NULL)
50426+ goto out;
50427+
50428+ dentry = parent;
50429+ }
50430+
50431+ spin_lock(&dentry->d_lock);
50432+ read_lock(&gr_inode_lock);
50433+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50434+ __get_dev(dentry), role);
50435+ read_unlock(&gr_inode_lock);
50436+ spin_unlock(&dentry->d_lock);
50437+
50438+ if (unlikely(retval == NULL)) {
50439+ /* real_root is pinned, we don't need to hold a reference */
50440+ read_lock(&gr_inode_lock);
50441+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
50442+ __get_dev(real_root.dentry), role);
50443+ read_unlock(&gr_inode_lock);
50444+ }
50445+out:
50446+ br_read_unlock(vfsmount_lock);
50447+ write_sequnlock(&rename_lock);
50448+
50449+ BUG_ON(retval == NULL);
50450+
50451+ return retval;
50452+}
50453+
50454+static void
50455+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
50456+{
50457+ struct task_struct *task = current;
50458+ const struct cred *cred = current_cred();
50459+
50460+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50461+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50462+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50463+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
50464+
50465+ return;
50466+}
50467+
50468+static void
50469+gr_log_learn_sysctl(const char *path, const __u32 mode)
50470+{
50471+ struct task_struct *task = current;
50472+ const struct cred *cred = current_cred();
50473+
50474+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50475+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50476+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50477+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
50478+
50479+ return;
50480+}
50481+
50482+static void
50483+gr_log_learn_id_change(const char type, const unsigned int real,
50484+ const unsigned int effective, const unsigned int fs)
50485+{
50486+ struct task_struct *task = current;
50487+ const struct cred *cred = current_cred();
50488+
50489+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
50490+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50491+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50492+ type, real, effective, fs, &task->signal->saved_ip);
50493+
50494+ return;
50495+}
50496+
50497+__u32
50498+gr_search_file(const struct dentry * dentry, const __u32 mode,
50499+ const struct vfsmount * mnt)
50500+{
50501+ __u32 retval = mode;
50502+ struct acl_subject_label *curracl;
50503+ struct acl_object_label *currobj;
50504+
50505+ if (unlikely(!(gr_status & GR_READY)))
50506+ return (mode & ~GR_AUDITS);
50507+
50508+ curracl = current->acl;
50509+
50510+ currobj = chk_obj_label(dentry, mnt, curracl);
50511+ retval = currobj->mode & mode;
50512+
50513+ /* if we're opening a specified transfer file for writing
50514+ (e.g. /dev/initctl), then transfer our role to init
50515+ */
50516+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
50517+ current->role->roletype & GR_ROLE_PERSIST)) {
50518+ struct task_struct *task = init_pid_ns.child_reaper;
50519+
50520+ if (task->role != current->role) {
50521+ task->acl_sp_role = 0;
50522+ task->acl_role_id = current->acl_role_id;
50523+ task->role = current->role;
50524+ rcu_read_lock();
50525+ read_lock(&grsec_exec_file_lock);
50526+ gr_apply_subject_to_task(task);
50527+ read_unlock(&grsec_exec_file_lock);
50528+ rcu_read_unlock();
50529+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
50530+ }
50531+ }
50532+
50533+ if (unlikely
50534+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
50535+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
50536+ __u32 new_mode = mode;
50537+
50538+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50539+
50540+ retval = new_mode;
50541+
50542+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
50543+ new_mode |= GR_INHERIT;
50544+
50545+ if (!(mode & GR_NOLEARN))
50546+ gr_log_learn(dentry, mnt, new_mode);
50547+ }
50548+
50549+ return retval;
50550+}
50551+
50552+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
50553+ const struct dentry *parent,
50554+ const struct vfsmount *mnt)
50555+{
50556+ struct name_entry *match;
50557+ struct acl_object_label *matchpo;
50558+ struct acl_subject_label *curracl;
50559+ char *path;
50560+
50561+ if (unlikely(!(gr_status & GR_READY)))
50562+ return NULL;
50563+
50564+ preempt_disable();
50565+ path = gr_to_filename_rbac(new_dentry, mnt);
50566+ match = lookup_name_entry_create(path);
50567+
50568+ curracl = current->acl;
50569+
50570+ if (match) {
50571+ read_lock(&gr_inode_lock);
50572+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
50573+ read_unlock(&gr_inode_lock);
50574+
50575+ if (matchpo) {
50576+ preempt_enable();
50577+ return matchpo;
50578+ }
50579+ }
50580+
50581+ // lookup parent
50582+
50583+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
50584+
50585+ preempt_enable();
50586+ return matchpo;
50587+}
50588+
50589+__u32
50590+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
50591+ const struct vfsmount * mnt, const __u32 mode)
50592+{
50593+ struct acl_object_label *matchpo;
50594+ __u32 retval;
50595+
50596+ if (unlikely(!(gr_status & GR_READY)))
50597+ return (mode & ~GR_AUDITS);
50598+
50599+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
50600+
50601+ retval = matchpo->mode & mode;
50602+
50603+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
50604+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
50605+ __u32 new_mode = mode;
50606+
50607+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50608+
50609+ gr_log_learn(new_dentry, mnt, new_mode);
50610+ return new_mode;
50611+ }
50612+
50613+ return retval;
50614+}
50615+
50616+__u32
50617+gr_check_link(const struct dentry * new_dentry,
50618+ const struct dentry * parent_dentry,
50619+ const struct vfsmount * parent_mnt,
50620+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
50621+{
50622+ struct acl_object_label *obj;
50623+ __u32 oldmode, newmode;
50624+ __u32 needmode;
50625+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
50626+ GR_DELETE | GR_INHERIT;
50627+
50628+ if (unlikely(!(gr_status & GR_READY)))
50629+ return (GR_CREATE | GR_LINK);
50630+
50631+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
50632+ oldmode = obj->mode;
50633+
50634+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
50635+ newmode = obj->mode;
50636+
50637+ needmode = newmode & checkmodes;
50638+
50639+ // old name for hardlink must have at least the permissions of the new name
50640+ if ((oldmode & needmode) != needmode)
50641+ goto bad;
50642+
50643+ // if old name had restrictions/auditing, make sure the new name does as well
50644+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
50645+
50646+ // don't allow hardlinking of suid/sgid files without permission
50647+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50648+ needmode |= GR_SETID;
50649+
50650+ if ((newmode & needmode) != needmode)
50651+ goto bad;
50652+
50653+ // enforce minimum permissions
50654+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
50655+ return newmode;
50656+bad:
50657+ needmode = oldmode;
50658+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50659+ needmode |= GR_SETID;
50660+
50661+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
50662+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
50663+ return (GR_CREATE | GR_LINK);
50664+ } else if (newmode & GR_SUPPRESS)
50665+ return GR_SUPPRESS;
50666+ else
50667+ return 0;
50668+}
50669+
50670+int
50671+gr_check_hidden_task(const struct task_struct *task)
50672+{
50673+ if (unlikely(!(gr_status & GR_READY)))
50674+ return 0;
50675+
50676+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
50677+ return 1;
50678+
50679+ return 0;
50680+}
50681+
50682+int
50683+gr_check_protected_task(const struct task_struct *task)
50684+{
50685+ if (unlikely(!(gr_status & GR_READY) || !task))
50686+ return 0;
50687+
50688+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50689+ task->acl != current->acl)
50690+ return 1;
50691+
50692+ return 0;
50693+}
50694+
50695+int
50696+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50697+{
50698+ struct task_struct *p;
50699+ int ret = 0;
50700+
50701+ if (unlikely(!(gr_status & GR_READY) || !pid))
50702+ return ret;
50703+
50704+ read_lock(&tasklist_lock);
50705+ do_each_pid_task(pid, type, p) {
50706+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50707+ p->acl != current->acl) {
50708+ ret = 1;
50709+ goto out;
50710+ }
50711+ } while_each_pid_task(pid, type, p);
50712+out:
50713+ read_unlock(&tasklist_lock);
50714+
50715+ return ret;
50716+}
50717+
50718+void
50719+gr_copy_label(struct task_struct *tsk)
50720+{
50721+ /* plain copying of fields is already done by dup_task_struct */
50722+ tsk->signal->used_accept = 0;
50723+ tsk->acl_sp_role = 0;
50724+ //tsk->acl_role_id = current->acl_role_id;
50725+ //tsk->acl = current->acl;
50726+ //tsk->role = current->role;
50727+ tsk->signal->curr_ip = current->signal->curr_ip;
50728+ tsk->signal->saved_ip = current->signal->saved_ip;
50729+ if (current->exec_file)
50730+ get_file(current->exec_file);
50731+ //tsk->exec_file = current->exec_file;
50732+ //tsk->is_writable = current->is_writable;
50733+ if (unlikely(current->signal->used_accept)) {
50734+ current->signal->curr_ip = 0;
50735+ current->signal->saved_ip = 0;
50736+ }
50737+
50738+ return;
50739+}
50740+
50741+static void
50742+gr_set_proc_res(struct task_struct *task)
50743+{
50744+ struct acl_subject_label *proc;
50745+ unsigned short i;
50746+
50747+ proc = task->acl;
50748+
50749+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
50750+ return;
50751+
50752+ for (i = 0; i < RLIM_NLIMITS; i++) {
50753+ if (!(proc->resmask & (1 << i)))
50754+ continue;
50755+
50756+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
50757+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
50758+ }
50759+
50760+ return;
50761+}
50762+
50763+extern int __gr_process_user_ban(struct user_struct *user);
50764+
50765+int
50766+gr_check_user_change(int real, int effective, int fs)
50767+{
50768+ unsigned int i;
50769+ __u16 num;
50770+ uid_t *uidlist;
50771+ int curuid;
50772+ int realok = 0;
50773+ int effectiveok = 0;
50774+ int fsok = 0;
50775+
50776+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
50777+ struct user_struct *user;
50778+
50779+ if (real == -1)
50780+ goto skipit;
50781+
50782+ user = find_user(real);
50783+ if (user == NULL)
50784+ goto skipit;
50785+
50786+ if (__gr_process_user_ban(user)) {
50787+ /* for find_user */
50788+ free_uid(user);
50789+ return 1;
50790+ }
50791+
50792+ /* for find_user */
50793+ free_uid(user);
50794+
50795+skipit:
50796+#endif
50797+
50798+ if (unlikely(!(gr_status & GR_READY)))
50799+ return 0;
50800+
50801+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50802+ gr_log_learn_id_change('u', real, effective, fs);
50803+
50804+ num = current->acl->user_trans_num;
50805+ uidlist = current->acl->user_transitions;
50806+
50807+ if (uidlist == NULL)
50808+ return 0;
50809+
50810+ if (real == -1)
50811+ realok = 1;
50812+ if (effective == -1)
50813+ effectiveok = 1;
50814+ if (fs == -1)
50815+ fsok = 1;
50816+
50817+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
50818+ for (i = 0; i < num; i++) {
50819+ curuid = (int)uidlist[i];
50820+ if (real == curuid)
50821+ realok = 1;
50822+ if (effective == curuid)
50823+ effectiveok = 1;
50824+ if (fs == curuid)
50825+ fsok = 1;
50826+ }
50827+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
50828+ for (i = 0; i < num; i++) {
50829+ curuid = (int)uidlist[i];
50830+ if (real == curuid)
50831+ break;
50832+ if (effective == curuid)
50833+ break;
50834+ if (fs == curuid)
50835+ break;
50836+ }
50837+ /* not in deny list */
50838+ if (i == num) {
50839+ realok = 1;
50840+ effectiveok = 1;
50841+ fsok = 1;
50842+ }
50843+ }
50844+
50845+ if (realok && effectiveok && fsok)
50846+ return 0;
50847+ else {
50848+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50849+ return 1;
50850+ }
50851+}
50852+
50853+int
50854+gr_check_group_change(int real, int effective, int fs)
50855+{
50856+ unsigned int i;
50857+ __u16 num;
50858+ gid_t *gidlist;
50859+ int curgid;
50860+ int realok = 0;
50861+ int effectiveok = 0;
50862+ int fsok = 0;
50863+
50864+ if (unlikely(!(gr_status & GR_READY)))
50865+ return 0;
50866+
50867+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50868+ gr_log_learn_id_change('g', real, effective, fs);
50869+
50870+ num = current->acl->group_trans_num;
50871+ gidlist = current->acl->group_transitions;
50872+
50873+ if (gidlist == NULL)
50874+ return 0;
50875+
50876+ if (real == -1)
50877+ realok = 1;
50878+ if (effective == -1)
50879+ effectiveok = 1;
50880+ if (fs == -1)
50881+ fsok = 1;
50882+
50883+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
50884+ for (i = 0; i < num; i++) {
50885+ curgid = (int)gidlist[i];
50886+ if (real == curgid)
50887+ realok = 1;
50888+ if (effective == curgid)
50889+ effectiveok = 1;
50890+ if (fs == curgid)
50891+ fsok = 1;
50892+ }
50893+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
50894+ for (i = 0; i < num; i++) {
50895+ curgid = (int)gidlist[i];
50896+ if (real == curgid)
50897+ break;
50898+ if (effective == curgid)
50899+ break;
50900+ if (fs == curgid)
50901+ break;
50902+ }
50903+ /* not in deny list */
50904+ if (i == num) {
50905+ realok = 1;
50906+ effectiveok = 1;
50907+ fsok = 1;
50908+ }
50909+ }
50910+
50911+ if (realok && effectiveok && fsok)
50912+ return 0;
50913+ else {
50914+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50915+ return 1;
50916+ }
50917+}
50918+
50919+extern int gr_acl_is_capable(const int cap);
50920+
50921+void
50922+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
50923+{
50924+ struct acl_role_label *role = task->role;
50925+ struct acl_subject_label *subj = NULL;
50926+ struct acl_object_label *obj;
50927+ struct file *filp;
50928+
50929+ if (unlikely(!(gr_status & GR_READY)))
50930+ return;
50931+
50932+ filp = task->exec_file;
50933+
50934+ /* kernel process, we'll give them the kernel role */
50935+ if (unlikely(!filp)) {
50936+ task->role = kernel_role;
50937+ task->acl = kernel_role->root_label;
50938+ return;
50939+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
50940+ role = lookup_acl_role_label(task, uid, gid);
50941+
50942+ /* don't change the role if we're not a privileged process */
50943+ if (role && task->role != role &&
50944+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
50945+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
50946+ return;
50947+
50948+ /* perform subject lookup in possibly new role
50949+ we can use this result below in the case where role == task->role
50950+ */
50951+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
50952+
50953+ /* if we changed uid/gid, but result in the same role
50954+ and are using inheritance, don't lose the inherited subject
50955+ if current subject is other than what normal lookup
50956+ would result in, we arrived via inheritance, don't
50957+ lose subject
50958+ */
50959+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
50960+ (subj == task->acl)))
50961+ task->acl = subj;
50962+
50963+ task->role = role;
50964+
50965+ task->is_writable = 0;
50966+
50967+ /* ignore additional mmap checks for processes that are writable
50968+ by the default ACL */
50969+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50970+ if (unlikely(obj->mode & GR_WRITE))
50971+ task->is_writable = 1;
50972+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50973+ if (unlikely(obj->mode & GR_WRITE))
50974+ task->is_writable = 1;
50975+
50976+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50977+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50978+#endif
50979+
50980+ gr_set_proc_res(task);
50981+
50982+ return;
50983+}
50984+
50985+int
50986+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
50987+ const int unsafe_flags)
50988+{
50989+ struct task_struct *task = current;
50990+ struct acl_subject_label *newacl;
50991+ struct acl_object_label *obj;
50992+ __u32 retmode;
50993+
50994+ if (unlikely(!(gr_status & GR_READY)))
50995+ return 0;
50996+
50997+ newacl = chk_subj_label(dentry, mnt, task->role);
50998+
50999+ task_lock(task);
51000+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
51001+ !(task->role->roletype & GR_ROLE_GOD) &&
51002+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
51003+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
51004+ task_unlock(task);
51005+ if (unsafe_flags & LSM_UNSAFE_SHARE)
51006+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
51007+ else
51008+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
51009+ return -EACCES;
51010+ }
51011+ task_unlock(task);
51012+
51013+ obj = chk_obj_label(dentry, mnt, task->acl);
51014+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
51015+
51016+ if (!(task->acl->mode & GR_INHERITLEARN) &&
51017+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
51018+ if (obj->nested)
51019+ task->acl = obj->nested;
51020+ else
51021+ task->acl = newacl;
51022+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
51023+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
51024+
51025+ task->is_writable = 0;
51026+
51027+ /* ignore additional mmap checks for processes that are writable
51028+ by the default ACL */
51029+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
51030+ if (unlikely(obj->mode & GR_WRITE))
51031+ task->is_writable = 1;
51032+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
51033+ if (unlikely(obj->mode & GR_WRITE))
51034+ task->is_writable = 1;
51035+
51036+ gr_set_proc_res(task);
51037+
51038+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51039+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
51040+#endif
51041+ return 0;
51042+}
51043+
51044+/* always called with valid inodev ptr */
51045+static void
51046+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
51047+{
51048+ struct acl_object_label *matchpo;
51049+ struct acl_subject_label *matchps;
51050+ struct acl_subject_label *subj;
51051+ struct acl_role_label *role;
51052+ unsigned int x;
51053+
51054+ FOR_EACH_ROLE_START(role)
51055+ FOR_EACH_SUBJECT_START(role, subj, x)
51056+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
51057+ matchpo->mode |= GR_DELETED;
51058+ FOR_EACH_SUBJECT_END(subj,x)
51059+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
51060+ if (subj->inode == ino && subj->device == dev)
51061+ subj->mode |= GR_DELETED;
51062+ FOR_EACH_NESTED_SUBJECT_END(subj)
51063+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
51064+ matchps->mode |= GR_DELETED;
51065+ FOR_EACH_ROLE_END(role)
51066+
51067+ inodev->nentry->deleted = 1;
51068+
51069+ return;
51070+}
51071+
51072+void
51073+gr_handle_delete(const ino_t ino, const dev_t dev)
51074+{
51075+ struct inodev_entry *inodev;
51076+
51077+ if (unlikely(!(gr_status & GR_READY)))
51078+ return;
51079+
51080+ write_lock(&gr_inode_lock);
51081+ inodev = lookup_inodev_entry(ino, dev);
51082+ if (inodev != NULL)
51083+ do_handle_delete(inodev, ino, dev);
51084+ write_unlock(&gr_inode_lock);
51085+
51086+ return;
51087+}
51088+
51089+static void
51090+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
51091+ const ino_t newinode, const dev_t newdevice,
51092+ struct acl_subject_label *subj)
51093+{
51094+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
51095+ struct acl_object_label *match;
51096+
51097+ match = subj->obj_hash[index];
51098+
51099+ while (match && (match->inode != oldinode ||
51100+ match->device != olddevice ||
51101+ !(match->mode & GR_DELETED)))
51102+ match = match->next;
51103+
51104+ if (match && (match->inode == oldinode)
51105+ && (match->device == olddevice)
51106+ && (match->mode & GR_DELETED)) {
51107+ if (match->prev == NULL) {
51108+ subj->obj_hash[index] = match->next;
51109+ if (match->next != NULL)
51110+ match->next->prev = NULL;
51111+ } else {
51112+ match->prev->next = match->next;
51113+ if (match->next != NULL)
51114+ match->next->prev = match->prev;
51115+ }
51116+ match->prev = NULL;
51117+ match->next = NULL;
51118+ match->inode = newinode;
51119+ match->device = newdevice;
51120+ match->mode &= ~GR_DELETED;
51121+
51122+ insert_acl_obj_label(match, subj);
51123+ }
51124+
51125+ return;
51126+}
51127+
51128+static void
51129+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
51130+ const ino_t newinode, const dev_t newdevice,
51131+ struct acl_role_label *role)
51132+{
51133+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
51134+ struct acl_subject_label *match;
51135+
51136+ match = role->subj_hash[index];
51137+
51138+ while (match && (match->inode != oldinode ||
51139+ match->device != olddevice ||
51140+ !(match->mode & GR_DELETED)))
51141+ match = match->next;
51142+
51143+ if (match && (match->inode == oldinode)
51144+ && (match->device == olddevice)
51145+ && (match->mode & GR_DELETED)) {
51146+ if (match->prev == NULL) {
51147+ role->subj_hash[index] = match->next;
51148+ if (match->next != NULL)
51149+ match->next->prev = NULL;
51150+ } else {
51151+ match->prev->next = match->next;
51152+ if (match->next != NULL)
51153+ match->next->prev = match->prev;
51154+ }
51155+ match->prev = NULL;
51156+ match->next = NULL;
51157+ match->inode = newinode;
51158+ match->device = newdevice;
51159+ match->mode &= ~GR_DELETED;
51160+
51161+ insert_acl_subj_label(match, role);
51162+ }
51163+
51164+ return;
51165+}
51166+
51167+static void
51168+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
51169+ const ino_t newinode, const dev_t newdevice)
51170+{
51171+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
51172+ struct inodev_entry *match;
51173+
51174+ match = inodev_set.i_hash[index];
51175+
51176+ while (match && (match->nentry->inode != oldinode ||
51177+ match->nentry->device != olddevice || !match->nentry->deleted))
51178+ match = match->next;
51179+
51180+ if (match && (match->nentry->inode == oldinode)
51181+ && (match->nentry->device == olddevice) &&
51182+ match->nentry->deleted) {
51183+ if (match->prev == NULL) {
51184+ inodev_set.i_hash[index] = match->next;
51185+ if (match->next != NULL)
51186+ match->next->prev = NULL;
51187+ } else {
51188+ match->prev->next = match->next;
51189+ if (match->next != NULL)
51190+ match->next->prev = match->prev;
51191+ }
51192+ match->prev = NULL;
51193+ match->next = NULL;
51194+ match->nentry->inode = newinode;
51195+ match->nentry->device = newdevice;
51196+ match->nentry->deleted = 0;
51197+
51198+ insert_inodev_entry(match);
51199+ }
51200+
51201+ return;
51202+}
51203+
51204+static void
51205+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
51206+{
51207+ struct acl_subject_label *subj;
51208+ struct acl_role_label *role;
51209+ unsigned int x;
51210+
51211+ FOR_EACH_ROLE_START(role)
51212+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
51213+
51214+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
51215+ if ((subj->inode == ino) && (subj->device == dev)) {
51216+ subj->inode = ino;
51217+ subj->device = dev;
51218+ }
51219+ FOR_EACH_NESTED_SUBJECT_END(subj)
51220+ FOR_EACH_SUBJECT_START(role, subj, x)
51221+ update_acl_obj_label(matchn->inode, matchn->device,
51222+ ino, dev, subj);
51223+ FOR_EACH_SUBJECT_END(subj,x)
51224+ FOR_EACH_ROLE_END(role)
51225+
51226+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
51227+
51228+ return;
51229+}
51230+
51231+static void
51232+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
51233+ const struct vfsmount *mnt)
51234+{
51235+ ino_t ino = dentry->d_inode->i_ino;
51236+ dev_t dev = __get_dev(dentry);
51237+
51238+ __do_handle_create(matchn, ino, dev);
51239+
51240+ return;
51241+}
51242+
51243+void
51244+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
51245+{
51246+ struct name_entry *matchn;
51247+
51248+ if (unlikely(!(gr_status & GR_READY)))
51249+ return;
51250+
51251+ preempt_disable();
51252+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
51253+
51254+ if (unlikely((unsigned long)matchn)) {
51255+ write_lock(&gr_inode_lock);
51256+ do_handle_create(matchn, dentry, mnt);
51257+ write_unlock(&gr_inode_lock);
51258+ }
51259+ preempt_enable();
51260+
51261+ return;
51262+}
51263+
51264+void
51265+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
51266+{
51267+ struct name_entry *matchn;
51268+
51269+ if (unlikely(!(gr_status & GR_READY)))
51270+ return;
51271+
51272+ preempt_disable();
51273+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
51274+
51275+ if (unlikely((unsigned long)matchn)) {
51276+ write_lock(&gr_inode_lock);
51277+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
51278+ write_unlock(&gr_inode_lock);
51279+ }
51280+ preempt_enable();
51281+
51282+ return;
51283+}
51284+
51285+void
51286+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
51287+ struct dentry *old_dentry,
51288+ struct dentry *new_dentry,
51289+ struct vfsmount *mnt, const __u8 replace)
51290+{
51291+ struct name_entry *matchn;
51292+ struct inodev_entry *inodev;
51293+ struct inode *inode = new_dentry->d_inode;
51294+ ino_t old_ino = old_dentry->d_inode->i_ino;
51295+ dev_t old_dev = __get_dev(old_dentry);
51296+
51297+ /* vfs_rename swaps the name and parent link for old_dentry and
51298+ new_dentry
51299+ at this point, old_dentry has the new name, parent link, and inode
51300+ for the renamed file
51301+ if a file is being replaced by a rename, new_dentry has the inode
51302+ and name for the replaced file
51303+ */
51304+
51305+ if (unlikely(!(gr_status & GR_READY)))
51306+ return;
51307+
51308+ preempt_disable();
51309+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
51310+
51311+ /* we wouldn't have to check d_inode if it weren't for
51312+ NFS silly-renaming
51313+ */
51314+
51315+ write_lock(&gr_inode_lock);
51316+ if (unlikely(replace && inode)) {
51317+ ino_t new_ino = inode->i_ino;
51318+ dev_t new_dev = __get_dev(new_dentry);
51319+
51320+ inodev = lookup_inodev_entry(new_ino, new_dev);
51321+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
51322+ do_handle_delete(inodev, new_ino, new_dev);
51323+ }
51324+
51325+ inodev = lookup_inodev_entry(old_ino, old_dev);
51326+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
51327+ do_handle_delete(inodev, old_ino, old_dev);
51328+
51329+ if (unlikely((unsigned long)matchn))
51330+ do_handle_create(matchn, old_dentry, mnt);
51331+
51332+ write_unlock(&gr_inode_lock);
51333+ preempt_enable();
51334+
51335+ return;
51336+}
51337+
51338+static int
51339+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
51340+ unsigned char **sum)
51341+{
51342+ struct acl_role_label *r;
51343+ struct role_allowed_ip *ipp;
51344+ struct role_transition *trans;
51345+ unsigned int i;
51346+ int found = 0;
51347+ u32 curr_ip = current->signal->curr_ip;
51348+
51349+ current->signal->saved_ip = curr_ip;
51350+
51351+ /* check transition table */
51352+
51353+ for (trans = current->role->transitions; trans; trans = trans->next) {
51354+ if (!strcmp(rolename, trans->rolename)) {
51355+ found = 1;
51356+ break;
51357+ }
51358+ }
51359+
51360+ if (!found)
51361+ return 0;
51362+
51363+ /* handle special roles that do not require authentication
51364+ and check ip */
51365+
51366+ FOR_EACH_ROLE_START(r)
51367+ if (!strcmp(rolename, r->rolename) &&
51368+ (r->roletype & GR_ROLE_SPECIAL)) {
51369+ found = 0;
51370+ if (r->allowed_ips != NULL) {
51371+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
51372+ if ((ntohl(curr_ip) & ipp->netmask) ==
51373+ (ntohl(ipp->addr) & ipp->netmask))
51374+ found = 1;
51375+ }
51376+ } else
51377+ found = 2;
51378+ if (!found)
51379+ return 0;
51380+
51381+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
51382+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
51383+ *salt = NULL;
51384+ *sum = NULL;
51385+ return 1;
51386+ }
51387+ }
51388+ FOR_EACH_ROLE_END(r)
51389+
51390+ for (i = 0; i < num_sprole_pws; i++) {
51391+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
51392+ *salt = acl_special_roles[i]->salt;
51393+ *sum = acl_special_roles[i]->sum;
51394+ return 1;
51395+ }
51396+ }
51397+
51398+ return 0;
51399+}
51400+
51401+static void
51402+assign_special_role(char *rolename)
51403+{
51404+ struct acl_object_label *obj;
51405+ struct acl_role_label *r;
51406+ struct acl_role_label *assigned = NULL;
51407+ struct task_struct *tsk;
51408+ struct file *filp;
51409+
51410+ FOR_EACH_ROLE_START(r)
51411+ if (!strcmp(rolename, r->rolename) &&
51412+ (r->roletype & GR_ROLE_SPECIAL)) {
51413+ assigned = r;
51414+ break;
51415+ }
51416+ FOR_EACH_ROLE_END(r)
51417+
51418+ if (!assigned)
51419+ return;
51420+
51421+ read_lock(&tasklist_lock);
51422+ read_lock(&grsec_exec_file_lock);
51423+
51424+ tsk = current->real_parent;
51425+ if (tsk == NULL)
51426+ goto out_unlock;
51427+
51428+ filp = tsk->exec_file;
51429+ if (filp == NULL)
51430+ goto out_unlock;
51431+
51432+ tsk->is_writable = 0;
51433+
51434+ tsk->acl_sp_role = 1;
51435+ tsk->acl_role_id = ++acl_sp_role_value;
51436+ tsk->role = assigned;
51437+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
51438+
51439+ /* ignore additional mmap checks for processes that are writable
51440+ by the default ACL */
51441+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51442+ if (unlikely(obj->mode & GR_WRITE))
51443+ tsk->is_writable = 1;
51444+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
51445+ if (unlikely(obj->mode & GR_WRITE))
51446+ tsk->is_writable = 1;
51447+
51448+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51449+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
51450+#endif
51451+
51452+out_unlock:
51453+ read_unlock(&grsec_exec_file_lock);
51454+ read_unlock(&tasklist_lock);
51455+ return;
51456+}
51457+
51458+int gr_check_secure_terminal(struct task_struct *task)
51459+{
51460+ struct task_struct *p, *p2, *p3;
51461+ struct files_struct *files;
51462+ struct fdtable *fdt;
51463+ struct file *our_file = NULL, *file;
51464+ int i;
51465+
51466+ if (task->signal->tty == NULL)
51467+ return 1;
51468+
51469+ files = get_files_struct(task);
51470+ if (files != NULL) {
51471+ rcu_read_lock();
51472+ fdt = files_fdtable(files);
51473+ for (i=0; i < fdt->max_fds; i++) {
51474+ file = fcheck_files(files, i);
51475+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
51476+ get_file(file);
51477+ our_file = file;
51478+ }
51479+ }
51480+ rcu_read_unlock();
51481+ put_files_struct(files);
51482+ }
51483+
51484+ if (our_file == NULL)
51485+ return 1;
51486+
51487+ read_lock(&tasklist_lock);
51488+ do_each_thread(p2, p) {
51489+ files = get_files_struct(p);
51490+ if (files == NULL ||
51491+ (p->signal && p->signal->tty == task->signal->tty)) {
51492+ if (files != NULL)
51493+ put_files_struct(files);
51494+ continue;
51495+ }
51496+ rcu_read_lock();
51497+ fdt = files_fdtable(files);
51498+ for (i=0; i < fdt->max_fds; i++) {
51499+ file = fcheck_files(files, i);
51500+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
51501+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
51502+ p3 = task;
51503+ while (p3->pid > 0) {
51504+ if (p3 == p)
51505+ break;
51506+ p3 = p3->real_parent;
51507+ }
51508+ if (p3 == p)
51509+ break;
51510+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
51511+ gr_handle_alertkill(p);
51512+ rcu_read_unlock();
51513+ put_files_struct(files);
51514+ read_unlock(&tasklist_lock);
51515+ fput(our_file);
51516+ return 0;
51517+ }
51518+ }
51519+ rcu_read_unlock();
51520+ put_files_struct(files);
51521+ } while_each_thread(p2, p);
51522+ read_unlock(&tasklist_lock);
51523+
51524+ fput(our_file);
51525+ return 1;
51526+}
51527+
51528+ssize_t
51529+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
51530+{
51531+ struct gr_arg_wrapper uwrap;
51532+ unsigned char *sprole_salt = NULL;
51533+ unsigned char *sprole_sum = NULL;
51534+ int error = sizeof (struct gr_arg_wrapper);
51535+ int error2 = 0;
51536+
51537+ mutex_lock(&gr_dev_mutex);
51538+
51539+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
51540+ error = -EPERM;
51541+ goto out;
51542+ }
51543+
51544+ if (count != sizeof (struct gr_arg_wrapper)) {
51545+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
51546+ error = -EINVAL;
51547+ goto out;
51548+ }
51549+
51550+
51551+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
51552+ gr_auth_expires = 0;
51553+ gr_auth_attempts = 0;
51554+ }
51555+
51556+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
51557+ error = -EFAULT;
51558+ goto out;
51559+ }
51560+
51561+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
51562+ error = -EINVAL;
51563+ goto out;
51564+ }
51565+
51566+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
51567+ error = -EFAULT;
51568+ goto out;
51569+ }
51570+
51571+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51572+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51573+ time_after(gr_auth_expires, get_seconds())) {
51574+ error = -EBUSY;
51575+ goto out;
51576+ }
51577+
51578+ /* if non-root trying to do anything other than use a special role,
51579+ do not attempt authentication, do not count towards authentication
51580+ locking
51581+ */
51582+
51583+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
51584+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51585+ current_uid()) {
51586+ error = -EPERM;
51587+ goto out;
51588+ }
51589+
51590+ /* ensure pw and special role name are null terminated */
51591+
51592+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
51593+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
51594+
51595+ /* Okay.
51596+ * We have our enough of the argument structure..(we have yet
51597+ * to copy_from_user the tables themselves) . Copy the tables
51598+ * only if we need them, i.e. for loading operations. */
51599+
51600+ switch (gr_usermode->mode) {
51601+ case GR_STATUS:
51602+ if (gr_status & GR_READY) {
51603+ error = 1;
51604+ if (!gr_check_secure_terminal(current))
51605+ error = 3;
51606+ } else
51607+ error = 2;
51608+ goto out;
51609+ case GR_SHUTDOWN:
51610+ if ((gr_status & GR_READY)
51611+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51612+ pax_open_kernel();
51613+ gr_status &= ~GR_READY;
51614+ pax_close_kernel();
51615+
51616+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
51617+ free_variables();
51618+ memset(gr_usermode, 0, sizeof (struct gr_arg));
51619+ memset(gr_system_salt, 0, GR_SALT_LEN);
51620+ memset(gr_system_sum, 0, GR_SHA_LEN);
51621+ } else if (gr_status & GR_READY) {
51622+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
51623+ error = -EPERM;
51624+ } else {
51625+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
51626+ error = -EAGAIN;
51627+ }
51628+ break;
51629+ case GR_ENABLE:
51630+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
51631+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
51632+ else {
51633+ if (gr_status & GR_READY)
51634+ error = -EAGAIN;
51635+ else
51636+ error = error2;
51637+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
51638+ }
51639+ break;
51640+ case GR_RELOAD:
51641+ if (!(gr_status & GR_READY)) {
51642+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
51643+ error = -EAGAIN;
51644+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51645+ preempt_disable();
51646+
51647+ pax_open_kernel();
51648+ gr_status &= ~GR_READY;
51649+ pax_close_kernel();
51650+
51651+ free_variables();
51652+ if (!(error2 = gracl_init(gr_usermode))) {
51653+ preempt_enable();
51654+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
51655+ } else {
51656+ preempt_enable();
51657+ error = error2;
51658+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51659+ }
51660+ } else {
51661+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51662+ error = -EPERM;
51663+ }
51664+ break;
51665+ case GR_SEGVMOD:
51666+ if (unlikely(!(gr_status & GR_READY))) {
51667+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
51668+ error = -EAGAIN;
51669+ break;
51670+ }
51671+
51672+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51673+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
51674+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
51675+ struct acl_subject_label *segvacl;
51676+ segvacl =
51677+ lookup_acl_subj_label(gr_usermode->segv_inode,
51678+ gr_usermode->segv_device,
51679+ current->role);
51680+ if (segvacl) {
51681+ segvacl->crashes = 0;
51682+ segvacl->expires = 0;
51683+ }
51684+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
51685+ gr_remove_uid(gr_usermode->segv_uid);
51686+ }
51687+ } else {
51688+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
51689+ error = -EPERM;
51690+ }
51691+ break;
51692+ case GR_SPROLE:
51693+ case GR_SPROLEPAM:
51694+ if (unlikely(!(gr_status & GR_READY))) {
51695+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
51696+ error = -EAGAIN;
51697+ break;
51698+ }
51699+
51700+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
51701+ current->role->expires = 0;
51702+ current->role->auth_attempts = 0;
51703+ }
51704+
51705+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51706+ time_after(current->role->expires, get_seconds())) {
51707+ error = -EBUSY;
51708+ goto out;
51709+ }
51710+
51711+ if (lookup_special_role_auth
51712+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
51713+ && ((!sprole_salt && !sprole_sum)
51714+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
51715+ char *p = "";
51716+ assign_special_role(gr_usermode->sp_role);
51717+ read_lock(&tasklist_lock);
51718+ if (current->real_parent)
51719+ p = current->real_parent->role->rolename;
51720+ read_unlock(&tasklist_lock);
51721+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
51722+ p, acl_sp_role_value);
51723+ } else {
51724+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
51725+ error = -EPERM;
51726+ if(!(current->role->auth_attempts++))
51727+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51728+
51729+ goto out;
51730+ }
51731+ break;
51732+ case GR_UNSPROLE:
51733+ if (unlikely(!(gr_status & GR_READY))) {
51734+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
51735+ error = -EAGAIN;
51736+ break;
51737+ }
51738+
51739+ if (current->role->roletype & GR_ROLE_SPECIAL) {
51740+ char *p = "";
51741+ int i = 0;
51742+
51743+ read_lock(&tasklist_lock);
51744+ if (current->real_parent) {
51745+ p = current->real_parent->role->rolename;
51746+ i = current->real_parent->acl_role_id;
51747+ }
51748+ read_unlock(&tasklist_lock);
51749+
51750+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
51751+ gr_set_acls(1);
51752+ } else {
51753+ error = -EPERM;
51754+ goto out;
51755+ }
51756+ break;
51757+ default:
51758+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
51759+ error = -EINVAL;
51760+ break;
51761+ }
51762+
51763+ if (error != -EPERM)
51764+ goto out;
51765+
51766+ if(!(gr_auth_attempts++))
51767+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51768+
51769+ out:
51770+ mutex_unlock(&gr_dev_mutex);
51771+ return error;
51772+}
51773+
51774+/* must be called with
51775+ rcu_read_lock();
51776+ read_lock(&tasklist_lock);
51777+ read_lock(&grsec_exec_file_lock);
51778+*/
51779+int gr_apply_subject_to_task(struct task_struct *task)
51780+{
51781+ struct acl_object_label *obj;
51782+ char *tmpname;
51783+ struct acl_subject_label *tmpsubj;
51784+ struct file *filp;
51785+ struct name_entry *nmatch;
51786+
51787+ filp = task->exec_file;
51788+ if (filp == NULL)
51789+ return 0;
51790+
51791+ /* the following is to apply the correct subject
51792+ on binaries running when the RBAC system
51793+ is enabled, when the binaries have been
51794+ replaced or deleted since their execution
51795+ -----
51796+ when the RBAC system starts, the inode/dev
51797+ from exec_file will be one the RBAC system
51798+ is unaware of. It only knows the inode/dev
51799+ of the present file on disk, or the absence
51800+ of it.
51801+ */
51802+ preempt_disable();
51803+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
51804+
51805+ nmatch = lookup_name_entry(tmpname);
51806+ preempt_enable();
51807+ tmpsubj = NULL;
51808+ if (nmatch) {
51809+ if (nmatch->deleted)
51810+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
51811+ else
51812+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
51813+ if (tmpsubj != NULL)
51814+ task->acl = tmpsubj;
51815+ }
51816+ if (tmpsubj == NULL)
51817+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
51818+ task->role);
51819+ if (task->acl) {
51820+ task->is_writable = 0;
51821+ /* ignore additional mmap checks for processes that are writable
51822+ by the default ACL */
51823+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51824+ if (unlikely(obj->mode & GR_WRITE))
51825+ task->is_writable = 1;
51826+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
51827+ if (unlikely(obj->mode & GR_WRITE))
51828+ task->is_writable = 1;
51829+
51830+ gr_set_proc_res(task);
51831+
51832+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51833+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
51834+#endif
51835+ } else {
51836+ return 1;
51837+ }
51838+
51839+ return 0;
51840+}
51841+
51842+int
51843+gr_set_acls(const int type)
51844+{
51845+ struct task_struct *task, *task2;
51846+ struct acl_role_label *role = current->role;
51847+ __u16 acl_role_id = current->acl_role_id;
51848+ const struct cred *cred;
51849+ int ret;
51850+
51851+ rcu_read_lock();
51852+ read_lock(&tasklist_lock);
51853+ read_lock(&grsec_exec_file_lock);
51854+ do_each_thread(task2, task) {
51855+ /* check to see if we're called from the exit handler,
51856+ if so, only replace ACLs that have inherited the admin
51857+ ACL */
51858+
51859+ if (type && (task->role != role ||
51860+ task->acl_role_id != acl_role_id))
51861+ continue;
51862+
51863+ task->acl_role_id = 0;
51864+ task->acl_sp_role = 0;
51865+
51866+ if (task->exec_file) {
51867+ cred = __task_cred(task);
51868+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
51869+ ret = gr_apply_subject_to_task(task);
51870+ if (ret) {
51871+ read_unlock(&grsec_exec_file_lock);
51872+ read_unlock(&tasklist_lock);
51873+ rcu_read_unlock();
51874+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
51875+ return ret;
51876+ }
51877+ } else {
51878+ // it's a kernel process
51879+ task->role = kernel_role;
51880+ task->acl = kernel_role->root_label;
51881+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
51882+ task->acl->mode &= ~GR_PROCFIND;
51883+#endif
51884+ }
51885+ } while_each_thread(task2, task);
51886+ read_unlock(&grsec_exec_file_lock);
51887+ read_unlock(&tasklist_lock);
51888+ rcu_read_unlock();
51889+
51890+ return 0;
51891+}
51892+
51893+void
51894+gr_learn_resource(const struct task_struct *task,
51895+ const int res, const unsigned long wanted, const int gt)
51896+{
51897+ struct acl_subject_label *acl;
51898+ const struct cred *cred;
51899+
51900+ if (unlikely((gr_status & GR_READY) &&
51901+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
51902+ goto skip_reslog;
51903+
51904+#ifdef CONFIG_GRKERNSEC_RESLOG
51905+ gr_log_resource(task, res, wanted, gt);
51906+#endif
51907+ skip_reslog:
51908+
51909+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
51910+ return;
51911+
51912+ acl = task->acl;
51913+
51914+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
51915+ !(acl->resmask & (1 << (unsigned short) res))))
51916+ return;
51917+
51918+ if (wanted >= acl->res[res].rlim_cur) {
51919+ unsigned long res_add;
51920+
51921+ res_add = wanted;
51922+ switch (res) {
51923+ case RLIMIT_CPU:
51924+ res_add += GR_RLIM_CPU_BUMP;
51925+ break;
51926+ case RLIMIT_FSIZE:
51927+ res_add += GR_RLIM_FSIZE_BUMP;
51928+ break;
51929+ case RLIMIT_DATA:
51930+ res_add += GR_RLIM_DATA_BUMP;
51931+ break;
51932+ case RLIMIT_STACK:
51933+ res_add += GR_RLIM_STACK_BUMP;
51934+ break;
51935+ case RLIMIT_CORE:
51936+ res_add += GR_RLIM_CORE_BUMP;
51937+ break;
51938+ case RLIMIT_RSS:
51939+ res_add += GR_RLIM_RSS_BUMP;
51940+ break;
51941+ case RLIMIT_NPROC:
51942+ res_add += GR_RLIM_NPROC_BUMP;
51943+ break;
51944+ case RLIMIT_NOFILE:
51945+ res_add += GR_RLIM_NOFILE_BUMP;
51946+ break;
51947+ case RLIMIT_MEMLOCK:
51948+ res_add += GR_RLIM_MEMLOCK_BUMP;
51949+ break;
51950+ case RLIMIT_AS:
51951+ res_add += GR_RLIM_AS_BUMP;
51952+ break;
51953+ case RLIMIT_LOCKS:
51954+ res_add += GR_RLIM_LOCKS_BUMP;
51955+ break;
51956+ case RLIMIT_SIGPENDING:
51957+ res_add += GR_RLIM_SIGPENDING_BUMP;
51958+ break;
51959+ case RLIMIT_MSGQUEUE:
51960+ res_add += GR_RLIM_MSGQUEUE_BUMP;
51961+ break;
51962+ case RLIMIT_NICE:
51963+ res_add += GR_RLIM_NICE_BUMP;
51964+ break;
51965+ case RLIMIT_RTPRIO:
51966+ res_add += GR_RLIM_RTPRIO_BUMP;
51967+ break;
51968+ case RLIMIT_RTTIME:
51969+ res_add += GR_RLIM_RTTIME_BUMP;
51970+ break;
51971+ }
51972+
51973+ acl->res[res].rlim_cur = res_add;
51974+
51975+ if (wanted > acl->res[res].rlim_max)
51976+ acl->res[res].rlim_max = res_add;
51977+
51978+ /* only log the subject filename, since resource logging is supported for
51979+ single-subject learning only */
51980+ rcu_read_lock();
51981+ cred = __task_cred(task);
51982+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51983+ task->role->roletype, cred->uid, cred->gid, acl->filename,
51984+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
51985+ "", (unsigned long) res, &task->signal->saved_ip);
51986+ rcu_read_unlock();
51987+ }
51988+
51989+ return;
51990+}
51991+
51992+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
51993+void
51994+pax_set_initial_flags(struct linux_binprm *bprm)
51995+{
51996+ struct task_struct *task = current;
51997+ struct acl_subject_label *proc;
51998+ unsigned long flags;
51999+
52000+ if (unlikely(!(gr_status & GR_READY)))
52001+ return;
52002+
52003+ flags = pax_get_flags(task);
52004+
52005+ proc = task->acl;
52006+
52007+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
52008+ flags &= ~MF_PAX_PAGEEXEC;
52009+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
52010+ flags &= ~MF_PAX_SEGMEXEC;
52011+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
52012+ flags &= ~MF_PAX_RANDMMAP;
52013+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
52014+ flags &= ~MF_PAX_EMUTRAMP;
52015+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
52016+ flags &= ~MF_PAX_MPROTECT;
52017+
52018+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
52019+ flags |= MF_PAX_PAGEEXEC;
52020+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
52021+ flags |= MF_PAX_SEGMEXEC;
52022+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
52023+ flags |= MF_PAX_RANDMMAP;
52024+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
52025+ flags |= MF_PAX_EMUTRAMP;
52026+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
52027+ flags |= MF_PAX_MPROTECT;
52028+
52029+ pax_set_flags(task, flags);
52030+
52031+ return;
52032+}
52033+#endif
52034+
52035+#ifdef CONFIG_SYSCTL
52036+/* Eric Biederman likes breaking userland ABI and every inode-based security
52037+ system to save 35kb of memory */
52038+
52039+/* we modify the passed in filename, but adjust it back before returning */
52040+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
52041+{
52042+ struct name_entry *nmatch;
52043+ char *p, *lastp = NULL;
52044+ struct acl_object_label *obj = NULL, *tmp;
52045+ struct acl_subject_label *tmpsubj;
52046+ char c = '\0';
52047+
52048+ read_lock(&gr_inode_lock);
52049+
52050+ p = name + len - 1;
52051+ do {
52052+ nmatch = lookup_name_entry(name);
52053+ if (lastp != NULL)
52054+ *lastp = c;
52055+
52056+ if (nmatch == NULL)
52057+ goto next_component;
52058+ tmpsubj = current->acl;
52059+ do {
52060+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
52061+ if (obj != NULL) {
52062+ tmp = obj->globbed;
52063+ while (tmp) {
52064+ if (!glob_match(tmp->filename, name)) {
52065+ obj = tmp;
52066+ goto found_obj;
52067+ }
52068+ tmp = tmp->next;
52069+ }
52070+ goto found_obj;
52071+ }
52072+ } while ((tmpsubj = tmpsubj->parent_subject));
52073+next_component:
52074+ /* end case */
52075+ if (p == name)
52076+ break;
52077+
52078+ while (*p != '/')
52079+ p--;
52080+ if (p == name)
52081+ lastp = p + 1;
52082+ else {
52083+ lastp = p;
52084+ p--;
52085+ }
52086+ c = *lastp;
52087+ *lastp = '\0';
52088+ } while (1);
52089+found_obj:
52090+ read_unlock(&gr_inode_lock);
52091+ /* obj returned will always be non-null */
52092+ return obj;
52093+}
52094+
52095+/* returns 0 when allowing, non-zero on error
52096+ op of 0 is used for readdir, so we don't log the names of hidden files
52097+*/
52098+__u32
52099+gr_handle_sysctl(const struct ctl_table *table, const int op)
52100+{
52101+ struct ctl_table *tmp;
52102+ const char *proc_sys = "/proc/sys";
52103+ char *path;
52104+ struct acl_object_label *obj;
52105+ unsigned short len = 0, pos = 0, depth = 0, i;
52106+ __u32 err = 0;
52107+ __u32 mode = 0;
52108+
52109+ if (unlikely(!(gr_status & GR_READY)))
52110+ return 0;
52111+
52112+ /* for now, ignore operations on non-sysctl entries if it's not a
52113+ readdir*/
52114+ if (table->child != NULL && op != 0)
52115+ return 0;
52116+
52117+ mode |= GR_FIND;
52118+ /* it's only a read if it's an entry, read on dirs is for readdir */
52119+ if (op & MAY_READ)
52120+ mode |= GR_READ;
52121+ if (op & MAY_WRITE)
52122+ mode |= GR_WRITE;
52123+
52124+ preempt_disable();
52125+
52126+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
52127+
52128+ /* it's only a read/write if it's an actual entry, not a dir
52129+ (which are opened for readdir)
52130+ */
52131+
52132+ /* convert the requested sysctl entry into a pathname */
52133+
52134+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
52135+ len += strlen(tmp->procname);
52136+ len++;
52137+ depth++;
52138+ }
52139+
52140+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
52141+ /* deny */
52142+ goto out;
52143+ }
52144+
52145+ memset(path, 0, PAGE_SIZE);
52146+
52147+ memcpy(path, proc_sys, strlen(proc_sys));
52148+
52149+ pos += strlen(proc_sys);
52150+
52151+ for (; depth > 0; depth--) {
52152+ path[pos] = '/';
52153+ pos++;
52154+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
52155+ if (depth == i) {
52156+ memcpy(path + pos, tmp->procname,
52157+ strlen(tmp->procname));
52158+ pos += strlen(tmp->procname);
52159+ }
52160+ i++;
52161+ }
52162+ }
52163+
52164+ obj = gr_lookup_by_name(path, pos);
52165+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
52166+
52167+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
52168+ ((err & mode) != mode))) {
52169+ __u32 new_mode = mode;
52170+
52171+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52172+
52173+ err = 0;
52174+ gr_log_learn_sysctl(path, new_mode);
52175+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
52176+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
52177+ err = -ENOENT;
52178+ } else if (!(err & GR_FIND)) {
52179+ err = -ENOENT;
52180+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
52181+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
52182+ path, (mode & GR_READ) ? " reading" : "",
52183+ (mode & GR_WRITE) ? " writing" : "");
52184+ err = -EACCES;
52185+ } else if ((err & mode) != mode) {
52186+ err = -EACCES;
52187+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
52188+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
52189+ path, (mode & GR_READ) ? " reading" : "",
52190+ (mode & GR_WRITE) ? " writing" : "");
52191+ err = 0;
52192+ } else
52193+ err = 0;
52194+
52195+ out:
52196+ preempt_enable();
52197+
52198+ return err;
52199+}
52200+#endif
52201+
52202+int
52203+gr_handle_proc_ptrace(struct task_struct *task)
52204+{
52205+ struct file *filp;
52206+ struct task_struct *tmp = task;
52207+ struct task_struct *curtemp = current;
52208+ __u32 retmode;
52209+
52210+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
52211+ if (unlikely(!(gr_status & GR_READY)))
52212+ return 0;
52213+#endif
52214+
52215+ read_lock(&tasklist_lock);
52216+ read_lock(&grsec_exec_file_lock);
52217+ filp = task->exec_file;
52218+
52219+ while (tmp->pid > 0) {
52220+ if (tmp == curtemp)
52221+ break;
52222+ tmp = tmp->real_parent;
52223+ }
52224+
52225+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
52226+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
52227+ read_unlock(&grsec_exec_file_lock);
52228+ read_unlock(&tasklist_lock);
52229+ return 1;
52230+ }
52231+
52232+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52233+ if (!(gr_status & GR_READY)) {
52234+ read_unlock(&grsec_exec_file_lock);
52235+ read_unlock(&tasklist_lock);
52236+ return 0;
52237+ }
52238+#endif
52239+
52240+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
52241+ read_unlock(&grsec_exec_file_lock);
52242+ read_unlock(&tasklist_lock);
52243+
52244+ if (retmode & GR_NOPTRACE)
52245+ return 1;
52246+
52247+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
52248+ && (current->acl != task->acl || (current->acl != current->role->root_label
52249+ && current->pid != task->pid)))
52250+ return 1;
52251+
52252+ return 0;
52253+}
52254+
52255+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
52256+{
52257+ if (unlikely(!(gr_status & GR_READY)))
52258+ return;
52259+
52260+ if (!(current->role->roletype & GR_ROLE_GOD))
52261+ return;
52262+
52263+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
52264+ p->role->rolename, gr_task_roletype_to_char(p),
52265+ p->acl->filename);
52266+}
52267+
52268+int
52269+gr_handle_ptrace(struct task_struct *task, const long request)
52270+{
52271+ struct task_struct *tmp = task;
52272+ struct task_struct *curtemp = current;
52273+ __u32 retmode;
52274+
52275+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
52276+ if (unlikely(!(gr_status & GR_READY)))
52277+ return 0;
52278+#endif
52279+
52280+ read_lock(&tasklist_lock);
52281+ while (tmp->pid > 0) {
52282+ if (tmp == curtemp)
52283+ break;
52284+ tmp = tmp->real_parent;
52285+ }
52286+
52287+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
52288+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
52289+ read_unlock(&tasklist_lock);
52290+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52291+ return 1;
52292+ }
52293+ read_unlock(&tasklist_lock);
52294+
52295+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52296+ if (!(gr_status & GR_READY))
52297+ return 0;
52298+#endif
52299+
52300+ read_lock(&grsec_exec_file_lock);
52301+ if (unlikely(!task->exec_file)) {
52302+ read_unlock(&grsec_exec_file_lock);
52303+ return 0;
52304+ }
52305+
52306+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
52307+ read_unlock(&grsec_exec_file_lock);
52308+
52309+ if (retmode & GR_NOPTRACE) {
52310+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52311+ return 1;
52312+ }
52313+
52314+ if (retmode & GR_PTRACERD) {
52315+ switch (request) {
52316+ case PTRACE_SEIZE:
52317+ case PTRACE_POKETEXT:
52318+ case PTRACE_POKEDATA:
52319+ case PTRACE_POKEUSR:
52320+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
52321+ case PTRACE_SETREGS:
52322+ case PTRACE_SETFPREGS:
52323+#endif
52324+#ifdef CONFIG_X86
52325+ case PTRACE_SETFPXREGS:
52326+#endif
52327+#ifdef CONFIG_ALTIVEC
52328+ case PTRACE_SETVRREGS:
52329+#endif
52330+ return 1;
52331+ default:
52332+ return 0;
52333+ }
52334+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
52335+ !(current->role->roletype & GR_ROLE_GOD) &&
52336+ (current->acl != task->acl)) {
52337+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52338+ return 1;
52339+ }
52340+
52341+ return 0;
52342+}
52343+
52344+static int is_writable_mmap(const struct file *filp)
52345+{
52346+ struct task_struct *task = current;
52347+ struct acl_object_label *obj, *obj2;
52348+
52349+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
52350+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
52351+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52352+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
52353+ task->role->root_label);
52354+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
52355+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
52356+ return 1;
52357+ }
52358+ }
52359+ return 0;
52360+}
52361+
52362+int
52363+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
52364+{
52365+ __u32 mode;
52366+
52367+ if (unlikely(!file || !(prot & PROT_EXEC)))
52368+ return 1;
52369+
52370+ if (is_writable_mmap(file))
52371+ return 0;
52372+
52373+ mode =
52374+ gr_search_file(file->f_path.dentry,
52375+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52376+ file->f_path.mnt);
52377+
52378+ if (!gr_tpe_allow(file))
52379+ return 0;
52380+
52381+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52382+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52383+ return 0;
52384+ } else if (unlikely(!(mode & GR_EXEC))) {
52385+ return 0;
52386+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52387+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52388+ return 1;
52389+ }
52390+
52391+ return 1;
52392+}
52393+
52394+int
52395+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
52396+{
52397+ __u32 mode;
52398+
52399+ if (unlikely(!file || !(prot & PROT_EXEC)))
52400+ return 1;
52401+
52402+ if (is_writable_mmap(file))
52403+ return 0;
52404+
52405+ mode =
52406+ gr_search_file(file->f_path.dentry,
52407+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52408+ file->f_path.mnt);
52409+
52410+ if (!gr_tpe_allow(file))
52411+ return 0;
52412+
52413+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52414+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52415+ return 0;
52416+ } else if (unlikely(!(mode & GR_EXEC))) {
52417+ return 0;
52418+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52419+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52420+ return 1;
52421+ }
52422+
52423+ return 1;
52424+}
52425+
52426+void
52427+gr_acl_handle_psacct(struct task_struct *task, const long code)
52428+{
52429+ unsigned long runtime;
52430+ unsigned long cputime;
52431+ unsigned int wday, cday;
52432+ __u8 whr, chr;
52433+ __u8 wmin, cmin;
52434+ __u8 wsec, csec;
52435+ struct timespec timeval;
52436+
52437+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
52438+ !(task->acl->mode & GR_PROCACCT)))
52439+ return;
52440+
52441+ do_posix_clock_monotonic_gettime(&timeval);
52442+ runtime = timeval.tv_sec - task->start_time.tv_sec;
52443+ wday = runtime / (3600 * 24);
52444+ runtime -= wday * (3600 * 24);
52445+ whr = runtime / 3600;
52446+ runtime -= whr * 3600;
52447+ wmin = runtime / 60;
52448+ runtime -= wmin * 60;
52449+ wsec = runtime;
52450+
52451+ cputime = (task->utime + task->stime) / HZ;
52452+ cday = cputime / (3600 * 24);
52453+ cputime -= cday * (3600 * 24);
52454+ chr = cputime / 3600;
52455+ cputime -= chr * 3600;
52456+ cmin = cputime / 60;
52457+ cputime -= cmin * 60;
52458+ csec = cputime;
52459+
52460+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
52461+
52462+ return;
52463+}
52464+
52465+void gr_set_kernel_label(struct task_struct *task)
52466+{
52467+ if (gr_status & GR_READY) {
52468+ task->role = kernel_role;
52469+ task->acl = kernel_role->root_label;
52470+ }
52471+ return;
52472+}
52473+
52474+#ifdef CONFIG_TASKSTATS
52475+int gr_is_taskstats_denied(int pid)
52476+{
52477+ struct task_struct *task;
52478+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52479+ const struct cred *cred;
52480+#endif
52481+ int ret = 0;
52482+
52483+ /* restrict taskstats viewing to un-chrooted root users
52484+ who have the 'view' subject flag if the RBAC system is enabled
52485+ */
52486+
52487+ rcu_read_lock();
52488+ read_lock(&tasklist_lock);
52489+ task = find_task_by_vpid(pid);
52490+ if (task) {
52491+#ifdef CONFIG_GRKERNSEC_CHROOT
52492+ if (proc_is_chrooted(task))
52493+ ret = -EACCES;
52494+#endif
52495+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52496+ cred = __task_cred(task);
52497+#ifdef CONFIG_GRKERNSEC_PROC_USER
52498+ if (cred->uid != 0)
52499+ ret = -EACCES;
52500+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52501+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
52502+ ret = -EACCES;
52503+#endif
52504+#endif
52505+ if (gr_status & GR_READY) {
52506+ if (!(task->acl->mode & GR_VIEW))
52507+ ret = -EACCES;
52508+ }
52509+ } else
52510+ ret = -ENOENT;
52511+
52512+ read_unlock(&tasklist_lock);
52513+ rcu_read_unlock();
52514+
52515+ return ret;
52516+}
52517+#endif
52518+
52519+/* AUXV entries are filled via a descendant of search_binary_handler
52520+ after we've already applied the subject for the target
52521+*/
52522+int gr_acl_enable_at_secure(void)
52523+{
52524+ if (unlikely(!(gr_status & GR_READY)))
52525+ return 0;
52526+
52527+ if (current->acl->mode & GR_ATSECURE)
52528+ return 1;
52529+
52530+ return 0;
52531+}
52532+
52533+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
52534+{
52535+ struct task_struct *task = current;
52536+ struct dentry *dentry = file->f_path.dentry;
52537+ struct vfsmount *mnt = file->f_path.mnt;
52538+ struct acl_object_label *obj, *tmp;
52539+ struct acl_subject_label *subj;
52540+ unsigned int bufsize;
52541+ int is_not_root;
52542+ char *path;
52543+ dev_t dev = __get_dev(dentry);
52544+
52545+ if (unlikely(!(gr_status & GR_READY)))
52546+ return 1;
52547+
52548+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52549+ return 1;
52550+
52551+ /* ignore Eric Biederman */
52552+ if (IS_PRIVATE(dentry->d_inode))
52553+ return 1;
52554+
52555+ subj = task->acl;
52556+ do {
52557+ obj = lookup_acl_obj_label(ino, dev, subj);
52558+ if (obj != NULL)
52559+ return (obj->mode & GR_FIND) ? 1 : 0;
52560+ } while ((subj = subj->parent_subject));
52561+
52562+ /* this is purely an optimization since we're looking for an object
52563+ for the directory we're doing a readdir on
52564+ if it's possible for any globbed object to match the entry we're
52565+ filling into the directory, then the object we find here will be
52566+ an anchor point with attached globbed objects
52567+ */
52568+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
52569+ if (obj->globbed == NULL)
52570+ return (obj->mode & GR_FIND) ? 1 : 0;
52571+
52572+ is_not_root = ((obj->filename[0] == '/') &&
52573+ (obj->filename[1] == '\0')) ? 0 : 1;
52574+ bufsize = PAGE_SIZE - namelen - is_not_root;
52575+
52576+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
52577+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
52578+ return 1;
52579+
52580+ preempt_disable();
52581+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
52582+ bufsize);
52583+
52584+ bufsize = strlen(path);
52585+
52586+ /* if base is "/", don't append an additional slash */
52587+ if (is_not_root)
52588+ *(path + bufsize) = '/';
52589+ memcpy(path + bufsize + is_not_root, name, namelen);
52590+ *(path + bufsize + namelen + is_not_root) = '\0';
52591+
52592+ tmp = obj->globbed;
52593+ while (tmp) {
52594+ if (!glob_match(tmp->filename, path)) {
52595+ preempt_enable();
52596+ return (tmp->mode & GR_FIND) ? 1 : 0;
52597+ }
52598+ tmp = tmp->next;
52599+ }
52600+ preempt_enable();
52601+ return (obj->mode & GR_FIND) ? 1 : 0;
52602+}
52603+
52604+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
52605+EXPORT_SYMBOL(gr_acl_is_enabled);
52606+#endif
52607+EXPORT_SYMBOL(gr_learn_resource);
52608+EXPORT_SYMBOL(gr_set_kernel_label);
52609+#ifdef CONFIG_SECURITY
52610+EXPORT_SYMBOL(gr_check_user_change);
52611+EXPORT_SYMBOL(gr_check_group_change);
52612+#endif
52613+
52614diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
52615new file mode 100644
52616index 0000000..34fefda
52617--- /dev/null
52618+++ b/grsecurity/gracl_alloc.c
52619@@ -0,0 +1,105 @@
52620+#include <linux/kernel.h>
52621+#include <linux/mm.h>
52622+#include <linux/slab.h>
52623+#include <linux/vmalloc.h>
52624+#include <linux/gracl.h>
52625+#include <linux/grsecurity.h>
52626+
52627+static unsigned long alloc_stack_next = 1;
52628+static unsigned long alloc_stack_size = 1;
52629+static void **alloc_stack;
52630+
52631+static __inline__ int
52632+alloc_pop(void)
52633+{
52634+ if (alloc_stack_next == 1)
52635+ return 0;
52636+
52637+ kfree(alloc_stack[alloc_stack_next - 2]);
52638+
52639+ alloc_stack_next--;
52640+
52641+ return 1;
52642+}
52643+
52644+static __inline__ int
52645+alloc_push(void *buf)
52646+{
52647+ if (alloc_stack_next >= alloc_stack_size)
52648+ return 1;
52649+
52650+ alloc_stack[alloc_stack_next - 1] = buf;
52651+
52652+ alloc_stack_next++;
52653+
52654+ return 0;
52655+}
52656+
52657+void *
52658+acl_alloc(unsigned long len)
52659+{
52660+ void *ret = NULL;
52661+
52662+ if (!len || len > PAGE_SIZE)
52663+ goto out;
52664+
52665+ ret = kmalloc(len, GFP_KERNEL);
52666+
52667+ if (ret) {
52668+ if (alloc_push(ret)) {
52669+ kfree(ret);
52670+ ret = NULL;
52671+ }
52672+ }
52673+
52674+out:
52675+ return ret;
52676+}
52677+
52678+void *
52679+acl_alloc_num(unsigned long num, unsigned long len)
52680+{
52681+ if (!len || (num > (PAGE_SIZE / len)))
52682+ return NULL;
52683+
52684+ return acl_alloc(num * len);
52685+}
52686+
52687+void
52688+acl_free_all(void)
52689+{
52690+ if (gr_acl_is_enabled() || !alloc_stack)
52691+ return;
52692+
52693+ while (alloc_pop()) ;
52694+
52695+ if (alloc_stack) {
52696+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
52697+ kfree(alloc_stack);
52698+ else
52699+ vfree(alloc_stack);
52700+ }
52701+
52702+ alloc_stack = NULL;
52703+ alloc_stack_size = 1;
52704+ alloc_stack_next = 1;
52705+
52706+ return;
52707+}
52708+
52709+int
52710+acl_alloc_stack_init(unsigned long size)
52711+{
52712+ if ((size * sizeof (void *)) <= PAGE_SIZE)
52713+ alloc_stack =
52714+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
52715+ else
52716+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
52717+
52718+ alloc_stack_size = size;
52719+
52720+ if (!alloc_stack)
52721+ return 0;
52722+ else
52723+ return 1;
52724+}
52725diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
52726new file mode 100644
52727index 0000000..955ddfb
52728--- /dev/null
52729+++ b/grsecurity/gracl_cap.c
52730@@ -0,0 +1,101 @@
52731+#include <linux/kernel.h>
52732+#include <linux/module.h>
52733+#include <linux/sched.h>
52734+#include <linux/gracl.h>
52735+#include <linux/grsecurity.h>
52736+#include <linux/grinternal.h>
52737+
52738+extern const char *captab_log[];
52739+extern int captab_log_entries;
52740+
52741+int
52742+gr_acl_is_capable(const int cap)
52743+{
52744+ struct task_struct *task = current;
52745+ const struct cred *cred = current_cred();
52746+ struct acl_subject_label *curracl;
52747+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52748+ kernel_cap_t cap_audit = __cap_empty_set;
52749+
52750+ if (!gr_acl_is_enabled())
52751+ return 1;
52752+
52753+ curracl = task->acl;
52754+
52755+ cap_drop = curracl->cap_lower;
52756+ cap_mask = curracl->cap_mask;
52757+ cap_audit = curracl->cap_invert_audit;
52758+
52759+ while ((curracl = curracl->parent_subject)) {
52760+ /* if the cap isn't specified in the current computed mask but is specified in the
52761+ current level subject, and is lowered in the current level subject, then add
52762+ it to the set of dropped capabilities
52763+ otherwise, add the current level subject's mask to the current computed mask
52764+ */
52765+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52766+ cap_raise(cap_mask, cap);
52767+ if (cap_raised(curracl->cap_lower, cap))
52768+ cap_raise(cap_drop, cap);
52769+ if (cap_raised(curracl->cap_invert_audit, cap))
52770+ cap_raise(cap_audit, cap);
52771+ }
52772+ }
52773+
52774+ if (!cap_raised(cap_drop, cap)) {
52775+ if (cap_raised(cap_audit, cap))
52776+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
52777+ return 1;
52778+ }
52779+
52780+ curracl = task->acl;
52781+
52782+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
52783+ && cap_raised(cred->cap_effective, cap)) {
52784+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
52785+ task->role->roletype, cred->uid,
52786+ cred->gid, task->exec_file ?
52787+ gr_to_filename(task->exec_file->f_path.dentry,
52788+ task->exec_file->f_path.mnt) : curracl->filename,
52789+ curracl->filename, 0UL,
52790+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
52791+ return 1;
52792+ }
52793+
52794+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
52795+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
52796+ return 0;
52797+}
52798+
52799+int
52800+gr_acl_is_capable_nolog(const int cap)
52801+{
52802+ struct acl_subject_label *curracl;
52803+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52804+
52805+ if (!gr_acl_is_enabled())
52806+ return 1;
52807+
52808+ curracl = current->acl;
52809+
52810+ cap_drop = curracl->cap_lower;
52811+ cap_mask = curracl->cap_mask;
52812+
52813+ while ((curracl = curracl->parent_subject)) {
52814+ /* if the cap isn't specified in the current computed mask but is specified in the
52815+ current level subject, and is lowered in the current level subject, then add
52816+ it to the set of dropped capabilities
52817+ otherwise, add the current level subject's mask to the current computed mask
52818+ */
52819+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52820+ cap_raise(cap_mask, cap);
52821+ if (cap_raised(curracl->cap_lower, cap))
52822+ cap_raise(cap_drop, cap);
52823+ }
52824+ }
52825+
52826+ if (!cap_raised(cap_drop, cap))
52827+ return 1;
52828+
52829+ return 0;
52830+}
52831+
52832diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
52833new file mode 100644
52834index 0000000..88d0e87
52835--- /dev/null
52836+++ b/grsecurity/gracl_fs.c
52837@@ -0,0 +1,435 @@
52838+#include <linux/kernel.h>
52839+#include <linux/sched.h>
52840+#include <linux/types.h>
52841+#include <linux/fs.h>
52842+#include <linux/file.h>
52843+#include <linux/stat.h>
52844+#include <linux/grsecurity.h>
52845+#include <linux/grinternal.h>
52846+#include <linux/gracl.h>
52847+
52848+umode_t
52849+gr_acl_umask(void)
52850+{
52851+ if (unlikely(!gr_acl_is_enabled()))
52852+ return 0;
52853+
52854+ return current->role->umask;
52855+}
52856+
52857+__u32
52858+gr_acl_handle_hidden_file(const struct dentry * dentry,
52859+ const struct vfsmount * mnt)
52860+{
52861+ __u32 mode;
52862+
52863+ if (unlikely(!dentry->d_inode))
52864+ return GR_FIND;
52865+
52866+ mode =
52867+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
52868+
52869+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
52870+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52871+ return mode;
52872+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
52873+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52874+ return 0;
52875+ } else if (unlikely(!(mode & GR_FIND)))
52876+ return 0;
52877+
52878+ return GR_FIND;
52879+}
52880+
52881+__u32
52882+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
52883+ int acc_mode)
52884+{
52885+ __u32 reqmode = GR_FIND;
52886+ __u32 mode;
52887+
52888+ if (unlikely(!dentry->d_inode))
52889+ return reqmode;
52890+
52891+ if (acc_mode & MAY_APPEND)
52892+ reqmode |= GR_APPEND;
52893+ else if (acc_mode & MAY_WRITE)
52894+ reqmode |= GR_WRITE;
52895+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
52896+ reqmode |= GR_READ;
52897+
52898+ mode =
52899+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52900+ mnt);
52901+
52902+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52903+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52904+ reqmode & GR_READ ? " reading" : "",
52905+ reqmode & GR_WRITE ? " writing" : reqmode &
52906+ GR_APPEND ? " appending" : "");
52907+ return reqmode;
52908+ } else
52909+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52910+ {
52911+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52912+ reqmode & GR_READ ? " reading" : "",
52913+ reqmode & GR_WRITE ? " writing" : reqmode &
52914+ GR_APPEND ? " appending" : "");
52915+ return 0;
52916+ } else if (unlikely((mode & reqmode) != reqmode))
52917+ return 0;
52918+
52919+ return reqmode;
52920+}
52921+
52922+__u32
52923+gr_acl_handle_creat(const struct dentry * dentry,
52924+ const struct dentry * p_dentry,
52925+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
52926+ const int imode)
52927+{
52928+ __u32 reqmode = GR_WRITE | GR_CREATE;
52929+ __u32 mode;
52930+
52931+ if (acc_mode & MAY_APPEND)
52932+ reqmode |= GR_APPEND;
52933+ // if a directory was required or the directory already exists, then
52934+ // don't count this open as a read
52935+ if ((acc_mode & MAY_READ) &&
52936+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
52937+ reqmode |= GR_READ;
52938+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
52939+ reqmode |= GR_SETID;
52940+
52941+ mode =
52942+ gr_check_create(dentry, p_dentry, p_mnt,
52943+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
52944+
52945+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52946+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52947+ reqmode & GR_READ ? " reading" : "",
52948+ reqmode & GR_WRITE ? " writing" : reqmode &
52949+ GR_APPEND ? " appending" : "");
52950+ return reqmode;
52951+ } else
52952+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52953+ {
52954+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52955+ reqmode & GR_READ ? " reading" : "",
52956+ reqmode & GR_WRITE ? " writing" : reqmode &
52957+ GR_APPEND ? " appending" : "");
52958+ return 0;
52959+ } else if (unlikely((mode & reqmode) != reqmode))
52960+ return 0;
52961+
52962+ return reqmode;
52963+}
52964+
52965+__u32
52966+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
52967+ const int fmode)
52968+{
52969+ __u32 mode, reqmode = GR_FIND;
52970+
52971+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
52972+ reqmode |= GR_EXEC;
52973+ if (fmode & S_IWOTH)
52974+ reqmode |= GR_WRITE;
52975+ if (fmode & S_IROTH)
52976+ reqmode |= GR_READ;
52977+
52978+ mode =
52979+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52980+ mnt);
52981+
52982+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52983+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
52984+ reqmode & GR_READ ? " reading" : "",
52985+ reqmode & GR_WRITE ? " writing" : "",
52986+ reqmode & GR_EXEC ? " executing" : "");
52987+ return reqmode;
52988+ } else
52989+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52990+ {
52991+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
52992+ reqmode & GR_READ ? " reading" : "",
52993+ reqmode & GR_WRITE ? " writing" : "",
52994+ reqmode & GR_EXEC ? " executing" : "");
52995+ return 0;
52996+ } else if (unlikely((mode & reqmode) != reqmode))
52997+ return 0;
52998+
52999+ return reqmode;
53000+}
53001+
53002+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
53003+{
53004+ __u32 mode;
53005+
53006+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
53007+
53008+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
53009+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
53010+ return mode;
53011+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
53012+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
53013+ return 0;
53014+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
53015+ return 0;
53016+
53017+ return (reqmode);
53018+}
53019+
53020+__u32
53021+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
53022+{
53023+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
53024+}
53025+
53026+__u32
53027+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
53028+{
53029+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
53030+}
53031+
53032+__u32
53033+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
53034+{
53035+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
53036+}
53037+
53038+__u32
53039+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
53040+{
53041+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
53042+}
53043+
53044+__u32
53045+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
53046+ umode_t *modeptr)
53047+{
53048+ umode_t mode;
53049+
53050+ *modeptr &= ~gr_acl_umask();
53051+ mode = *modeptr;
53052+
53053+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
53054+ return 1;
53055+
53056+ if (unlikely(mode & (S_ISUID | S_ISGID))) {
53057+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
53058+ GR_CHMOD_ACL_MSG);
53059+ } else {
53060+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
53061+ }
53062+}
53063+
53064+__u32
53065+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
53066+{
53067+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
53068+}
53069+
53070+__u32
53071+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
53072+{
53073+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
53074+}
53075+
53076+__u32
53077+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
53078+{
53079+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
53080+}
53081+
53082+__u32
53083+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
53084+{
53085+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
53086+ GR_UNIXCONNECT_ACL_MSG);
53087+}
53088+
53089+/* hardlinks require at minimum create and link permission,
53090+ any additional privilege required is based on the
53091+ privilege of the file being linked to
53092+*/
53093+__u32
53094+gr_acl_handle_link(const struct dentry * new_dentry,
53095+ const struct dentry * parent_dentry,
53096+ const struct vfsmount * parent_mnt,
53097+ const struct dentry * old_dentry,
53098+ const struct vfsmount * old_mnt, const char *to)
53099+{
53100+ __u32 mode;
53101+ __u32 needmode = GR_CREATE | GR_LINK;
53102+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
53103+
53104+ mode =
53105+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
53106+ old_mnt);
53107+
53108+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
53109+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
53110+ return mode;
53111+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
53112+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
53113+ return 0;
53114+ } else if (unlikely((mode & needmode) != needmode))
53115+ return 0;
53116+
53117+ return 1;
53118+}
53119+
53120+__u32
53121+gr_acl_handle_symlink(const struct dentry * new_dentry,
53122+ const struct dentry * parent_dentry,
53123+ const struct vfsmount * parent_mnt, const char *from)
53124+{
53125+ __u32 needmode = GR_WRITE | GR_CREATE;
53126+ __u32 mode;
53127+
53128+ mode =
53129+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
53130+ GR_CREATE | GR_AUDIT_CREATE |
53131+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
53132+
53133+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
53134+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
53135+ return mode;
53136+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
53137+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
53138+ return 0;
53139+ } else if (unlikely((mode & needmode) != needmode))
53140+ return 0;
53141+
53142+ return (GR_WRITE | GR_CREATE);
53143+}
53144+
53145+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
53146+{
53147+ __u32 mode;
53148+
53149+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
53150+
53151+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
53152+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
53153+ return mode;
53154+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
53155+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
53156+ return 0;
53157+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
53158+ return 0;
53159+
53160+ return (reqmode);
53161+}
53162+
53163+__u32
53164+gr_acl_handle_mknod(const struct dentry * new_dentry,
53165+ const struct dentry * parent_dentry,
53166+ const struct vfsmount * parent_mnt,
53167+ const int mode)
53168+{
53169+ __u32 reqmode = GR_WRITE | GR_CREATE;
53170+ if (unlikely(mode & (S_ISUID | S_ISGID)))
53171+ reqmode |= GR_SETID;
53172+
53173+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
53174+ reqmode, GR_MKNOD_ACL_MSG);
53175+}
53176+
53177+__u32
53178+gr_acl_handle_mkdir(const struct dentry *new_dentry,
53179+ const struct dentry *parent_dentry,
53180+ const struct vfsmount *parent_mnt)
53181+{
53182+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
53183+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
53184+}
53185+
53186+#define RENAME_CHECK_SUCCESS(old, new) \
53187+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
53188+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
53189+
53190+int
53191+gr_acl_handle_rename(struct dentry *new_dentry,
53192+ struct dentry *parent_dentry,
53193+ const struct vfsmount *parent_mnt,
53194+ struct dentry *old_dentry,
53195+ struct inode *old_parent_inode,
53196+ struct vfsmount *old_mnt, const char *newname)
53197+{
53198+ __u32 comp1, comp2;
53199+ int error = 0;
53200+
53201+ if (unlikely(!gr_acl_is_enabled()))
53202+ return 0;
53203+
53204+ if (!new_dentry->d_inode) {
53205+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
53206+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
53207+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
53208+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
53209+ GR_DELETE | GR_AUDIT_DELETE |
53210+ GR_AUDIT_READ | GR_AUDIT_WRITE |
53211+ GR_SUPPRESS, old_mnt);
53212+ } else {
53213+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
53214+ GR_CREATE | GR_DELETE |
53215+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
53216+ GR_AUDIT_READ | GR_AUDIT_WRITE |
53217+ GR_SUPPRESS, parent_mnt);
53218+ comp2 =
53219+ gr_search_file(old_dentry,
53220+ GR_READ | GR_WRITE | GR_AUDIT_READ |
53221+ GR_DELETE | GR_AUDIT_DELETE |
53222+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
53223+ }
53224+
53225+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
53226+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
53227+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
53228+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
53229+ && !(comp2 & GR_SUPPRESS)) {
53230+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
53231+ error = -EACCES;
53232+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
53233+ error = -EACCES;
53234+
53235+ return error;
53236+}
53237+
53238+void
53239+gr_acl_handle_exit(void)
53240+{
53241+ u16 id;
53242+ char *rolename;
53243+ struct file *exec_file;
53244+
53245+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
53246+ !(current->role->roletype & GR_ROLE_PERSIST))) {
53247+ id = current->acl_role_id;
53248+ rolename = current->role->rolename;
53249+ gr_set_acls(1);
53250+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
53251+ }
53252+
53253+ write_lock(&grsec_exec_file_lock);
53254+ exec_file = current->exec_file;
53255+ current->exec_file = NULL;
53256+ write_unlock(&grsec_exec_file_lock);
53257+
53258+ if (exec_file)
53259+ fput(exec_file);
53260+}
53261+
53262+int
53263+gr_acl_handle_procpidmem(const struct task_struct *task)
53264+{
53265+ if (unlikely(!gr_acl_is_enabled()))
53266+ return 0;
53267+
53268+ if (task != current && task->acl->mode & GR_PROTPROCFD)
53269+ return -EACCES;
53270+
53271+ return 0;
53272+}
53273diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
53274new file mode 100644
53275index 0000000..17050ca
53276--- /dev/null
53277+++ b/grsecurity/gracl_ip.c
53278@@ -0,0 +1,381 @@
53279+#include <linux/kernel.h>
53280+#include <asm/uaccess.h>
53281+#include <asm/errno.h>
53282+#include <net/sock.h>
53283+#include <linux/file.h>
53284+#include <linux/fs.h>
53285+#include <linux/net.h>
53286+#include <linux/in.h>
53287+#include <linux/skbuff.h>
53288+#include <linux/ip.h>
53289+#include <linux/udp.h>
53290+#include <linux/types.h>
53291+#include <linux/sched.h>
53292+#include <linux/netdevice.h>
53293+#include <linux/inetdevice.h>
53294+#include <linux/gracl.h>
53295+#include <linux/grsecurity.h>
53296+#include <linux/grinternal.h>
53297+
53298+#define GR_BIND 0x01
53299+#define GR_CONNECT 0x02
53300+#define GR_INVERT 0x04
53301+#define GR_BINDOVERRIDE 0x08
53302+#define GR_CONNECTOVERRIDE 0x10
53303+#define GR_SOCK_FAMILY 0x20
53304+
53305+static const char * gr_protocols[IPPROTO_MAX] = {
53306+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
53307+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
53308+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
53309+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
53310+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
53311+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
53312+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
53313+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
53314+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
53315+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
53316+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
53317+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
53318+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
53319+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
53320+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
53321+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
53322+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
53323+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
53324+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
53325+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
53326+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
53327+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
53328+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
53329+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
53330+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
53331+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
53332+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
53333+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
53334+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
53335+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
53336+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
53337+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
53338+ };
53339+
53340+static const char * gr_socktypes[SOCK_MAX] = {
53341+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
53342+ "unknown:7", "unknown:8", "unknown:9", "packet"
53343+ };
53344+
53345+static const char * gr_sockfamilies[AF_MAX+1] = {
53346+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
53347+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
53348+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
53349+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
53350+ };
53351+
53352+const char *
53353+gr_proto_to_name(unsigned char proto)
53354+{
53355+ return gr_protocols[proto];
53356+}
53357+
53358+const char *
53359+gr_socktype_to_name(unsigned char type)
53360+{
53361+ return gr_socktypes[type];
53362+}
53363+
53364+const char *
53365+gr_sockfamily_to_name(unsigned char family)
53366+{
53367+ return gr_sockfamilies[family];
53368+}
53369+
53370+int
53371+gr_search_socket(const int domain, const int type, const int protocol)
53372+{
53373+ struct acl_subject_label *curr;
53374+ const struct cred *cred = current_cred();
53375+
53376+ if (unlikely(!gr_acl_is_enabled()))
53377+ goto exit;
53378+
53379+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
53380+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
53381+ goto exit; // let the kernel handle it
53382+
53383+ curr = current->acl;
53384+
53385+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
53386+ /* the family is allowed, if this is PF_INET allow it only if
53387+ the extra sock type/protocol checks pass */
53388+ if (domain == PF_INET)
53389+ goto inet_check;
53390+ goto exit;
53391+ } else {
53392+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53393+ __u32 fakeip = 0;
53394+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53395+ current->role->roletype, cred->uid,
53396+ cred->gid, current->exec_file ?
53397+ gr_to_filename(current->exec_file->f_path.dentry,
53398+ current->exec_file->f_path.mnt) :
53399+ curr->filename, curr->filename,
53400+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
53401+ &current->signal->saved_ip);
53402+ goto exit;
53403+ }
53404+ goto exit_fail;
53405+ }
53406+
53407+inet_check:
53408+ /* the rest of this checking is for IPv4 only */
53409+ if (!curr->ips)
53410+ goto exit;
53411+
53412+ if ((curr->ip_type & (1 << type)) &&
53413+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
53414+ goto exit;
53415+
53416+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53417+ /* we don't place acls on raw sockets , and sometimes
53418+ dgram/ip sockets are opened for ioctl and not
53419+ bind/connect, so we'll fake a bind learn log */
53420+ if (type == SOCK_RAW || type == SOCK_PACKET) {
53421+ __u32 fakeip = 0;
53422+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53423+ current->role->roletype, cred->uid,
53424+ cred->gid, current->exec_file ?
53425+ gr_to_filename(current->exec_file->f_path.dentry,
53426+ current->exec_file->f_path.mnt) :
53427+ curr->filename, curr->filename,
53428+ &fakeip, 0, type,
53429+ protocol, GR_CONNECT, &current->signal->saved_ip);
53430+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
53431+ __u32 fakeip = 0;
53432+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53433+ current->role->roletype, cred->uid,
53434+ cred->gid, current->exec_file ?
53435+ gr_to_filename(current->exec_file->f_path.dentry,
53436+ current->exec_file->f_path.mnt) :
53437+ curr->filename, curr->filename,
53438+ &fakeip, 0, type,
53439+ protocol, GR_BIND, &current->signal->saved_ip);
53440+ }
53441+ /* we'll log when they use connect or bind */
53442+ goto exit;
53443+ }
53444+
53445+exit_fail:
53446+ if (domain == PF_INET)
53447+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
53448+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
53449+ else
53450+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
53451+ gr_socktype_to_name(type), protocol);
53452+
53453+ return 0;
53454+exit:
53455+ return 1;
53456+}
53457+
53458+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
53459+{
53460+ if ((ip->mode & mode) &&
53461+ (ip_port >= ip->low) &&
53462+ (ip_port <= ip->high) &&
53463+ ((ntohl(ip_addr) & our_netmask) ==
53464+ (ntohl(our_addr) & our_netmask))
53465+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
53466+ && (ip->type & (1 << type))) {
53467+ if (ip->mode & GR_INVERT)
53468+ return 2; // specifically denied
53469+ else
53470+ return 1; // allowed
53471+ }
53472+
53473+ return 0; // not specifically allowed, may continue parsing
53474+}
53475+
53476+static int
53477+gr_search_connectbind(const int full_mode, struct sock *sk,
53478+ struct sockaddr_in *addr, const int type)
53479+{
53480+ char iface[IFNAMSIZ] = {0};
53481+ struct acl_subject_label *curr;
53482+ struct acl_ip_label *ip;
53483+ struct inet_sock *isk;
53484+ struct net_device *dev;
53485+ struct in_device *idev;
53486+ unsigned long i;
53487+ int ret;
53488+ int mode = full_mode & (GR_BIND | GR_CONNECT);
53489+ __u32 ip_addr = 0;
53490+ __u32 our_addr;
53491+ __u32 our_netmask;
53492+ char *p;
53493+ __u16 ip_port = 0;
53494+ const struct cred *cred = current_cred();
53495+
53496+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
53497+ return 0;
53498+
53499+ curr = current->acl;
53500+ isk = inet_sk(sk);
53501+
53502+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
53503+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
53504+ addr->sin_addr.s_addr = curr->inaddr_any_override;
53505+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
53506+ struct sockaddr_in saddr;
53507+ int err;
53508+
53509+ saddr.sin_family = AF_INET;
53510+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
53511+ saddr.sin_port = isk->inet_sport;
53512+
53513+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53514+ if (err)
53515+ return err;
53516+
53517+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53518+ if (err)
53519+ return err;
53520+ }
53521+
53522+ if (!curr->ips)
53523+ return 0;
53524+
53525+ ip_addr = addr->sin_addr.s_addr;
53526+ ip_port = ntohs(addr->sin_port);
53527+
53528+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53529+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53530+ current->role->roletype, cred->uid,
53531+ cred->gid, current->exec_file ?
53532+ gr_to_filename(current->exec_file->f_path.dentry,
53533+ current->exec_file->f_path.mnt) :
53534+ curr->filename, curr->filename,
53535+ &ip_addr, ip_port, type,
53536+ sk->sk_protocol, mode, &current->signal->saved_ip);
53537+ return 0;
53538+ }
53539+
53540+ for (i = 0; i < curr->ip_num; i++) {
53541+ ip = *(curr->ips + i);
53542+ if (ip->iface != NULL) {
53543+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
53544+ p = strchr(iface, ':');
53545+ if (p != NULL)
53546+ *p = '\0';
53547+ dev = dev_get_by_name(sock_net(sk), iface);
53548+ if (dev == NULL)
53549+ continue;
53550+ idev = in_dev_get(dev);
53551+ if (idev == NULL) {
53552+ dev_put(dev);
53553+ continue;
53554+ }
53555+ rcu_read_lock();
53556+ for_ifa(idev) {
53557+ if (!strcmp(ip->iface, ifa->ifa_label)) {
53558+ our_addr = ifa->ifa_address;
53559+ our_netmask = 0xffffffff;
53560+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53561+ if (ret == 1) {
53562+ rcu_read_unlock();
53563+ in_dev_put(idev);
53564+ dev_put(dev);
53565+ return 0;
53566+ } else if (ret == 2) {
53567+ rcu_read_unlock();
53568+ in_dev_put(idev);
53569+ dev_put(dev);
53570+ goto denied;
53571+ }
53572+ }
53573+ } endfor_ifa(idev);
53574+ rcu_read_unlock();
53575+ in_dev_put(idev);
53576+ dev_put(dev);
53577+ } else {
53578+ our_addr = ip->addr;
53579+ our_netmask = ip->netmask;
53580+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53581+ if (ret == 1)
53582+ return 0;
53583+ else if (ret == 2)
53584+ goto denied;
53585+ }
53586+ }
53587+
53588+denied:
53589+ if (mode == GR_BIND)
53590+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53591+ else if (mode == GR_CONNECT)
53592+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53593+
53594+ return -EACCES;
53595+}
53596+
53597+int
53598+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
53599+{
53600+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
53601+}
53602+
53603+int
53604+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
53605+{
53606+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
53607+}
53608+
53609+int gr_search_listen(struct socket *sock)
53610+{
53611+ struct sock *sk = sock->sk;
53612+ struct sockaddr_in addr;
53613+
53614+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53615+ addr.sin_port = inet_sk(sk)->inet_sport;
53616+
53617+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53618+}
53619+
53620+int gr_search_accept(struct socket *sock)
53621+{
53622+ struct sock *sk = sock->sk;
53623+ struct sockaddr_in addr;
53624+
53625+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53626+ addr.sin_port = inet_sk(sk)->inet_sport;
53627+
53628+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53629+}
53630+
53631+int
53632+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
53633+{
53634+ if (addr)
53635+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
53636+ else {
53637+ struct sockaddr_in sin;
53638+ const struct inet_sock *inet = inet_sk(sk);
53639+
53640+ sin.sin_addr.s_addr = inet->inet_daddr;
53641+ sin.sin_port = inet->inet_dport;
53642+
53643+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53644+ }
53645+}
53646+
53647+int
53648+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
53649+{
53650+ struct sockaddr_in sin;
53651+
53652+ if (unlikely(skb->len < sizeof (struct udphdr)))
53653+ return 0; // skip this packet
53654+
53655+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
53656+ sin.sin_port = udp_hdr(skb)->source;
53657+
53658+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53659+}
53660diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
53661new file mode 100644
53662index 0000000..25f54ef
53663--- /dev/null
53664+++ b/grsecurity/gracl_learn.c
53665@@ -0,0 +1,207 @@
53666+#include <linux/kernel.h>
53667+#include <linux/mm.h>
53668+#include <linux/sched.h>
53669+#include <linux/poll.h>
53670+#include <linux/string.h>
53671+#include <linux/file.h>
53672+#include <linux/types.h>
53673+#include <linux/vmalloc.h>
53674+#include <linux/grinternal.h>
53675+
53676+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
53677+ size_t count, loff_t *ppos);
53678+extern int gr_acl_is_enabled(void);
53679+
53680+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
53681+static int gr_learn_attached;
53682+
53683+/* use a 512k buffer */
53684+#define LEARN_BUFFER_SIZE (512 * 1024)
53685+
53686+static DEFINE_SPINLOCK(gr_learn_lock);
53687+static DEFINE_MUTEX(gr_learn_user_mutex);
53688+
53689+/* we need to maintain two buffers, so that the kernel context of grlearn
53690+ uses a semaphore around the userspace copying, and the other kernel contexts
53691+ use a spinlock when copying into the buffer, since they cannot sleep
53692+*/
53693+static char *learn_buffer;
53694+static char *learn_buffer_user;
53695+static int learn_buffer_len;
53696+static int learn_buffer_user_len;
53697+
53698+static ssize_t
53699+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
53700+{
53701+ DECLARE_WAITQUEUE(wait, current);
53702+ ssize_t retval = 0;
53703+
53704+ add_wait_queue(&learn_wait, &wait);
53705+ set_current_state(TASK_INTERRUPTIBLE);
53706+ do {
53707+ mutex_lock(&gr_learn_user_mutex);
53708+ spin_lock(&gr_learn_lock);
53709+ if (learn_buffer_len)
53710+ break;
53711+ spin_unlock(&gr_learn_lock);
53712+ mutex_unlock(&gr_learn_user_mutex);
53713+ if (file->f_flags & O_NONBLOCK) {
53714+ retval = -EAGAIN;
53715+ goto out;
53716+ }
53717+ if (signal_pending(current)) {
53718+ retval = -ERESTARTSYS;
53719+ goto out;
53720+ }
53721+
53722+ schedule();
53723+ } while (1);
53724+
53725+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
53726+ learn_buffer_user_len = learn_buffer_len;
53727+ retval = learn_buffer_len;
53728+ learn_buffer_len = 0;
53729+
53730+ spin_unlock(&gr_learn_lock);
53731+
53732+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
53733+ retval = -EFAULT;
53734+
53735+ mutex_unlock(&gr_learn_user_mutex);
53736+out:
53737+ set_current_state(TASK_RUNNING);
53738+ remove_wait_queue(&learn_wait, &wait);
53739+ return retval;
53740+}
53741+
53742+static unsigned int
53743+poll_learn(struct file * file, poll_table * wait)
53744+{
53745+ poll_wait(file, &learn_wait, wait);
53746+
53747+ if (learn_buffer_len)
53748+ return (POLLIN | POLLRDNORM);
53749+
53750+ return 0;
53751+}
53752+
53753+void
53754+gr_clear_learn_entries(void)
53755+{
53756+ char *tmp;
53757+
53758+ mutex_lock(&gr_learn_user_mutex);
53759+ spin_lock(&gr_learn_lock);
53760+ tmp = learn_buffer;
53761+ learn_buffer = NULL;
53762+ spin_unlock(&gr_learn_lock);
53763+ if (tmp)
53764+ vfree(tmp);
53765+ if (learn_buffer_user != NULL) {
53766+ vfree(learn_buffer_user);
53767+ learn_buffer_user = NULL;
53768+ }
53769+ learn_buffer_len = 0;
53770+ mutex_unlock(&gr_learn_user_mutex);
53771+
53772+ return;
53773+}
53774+
53775+void
53776+gr_add_learn_entry(const char *fmt, ...)
53777+{
53778+ va_list args;
53779+ unsigned int len;
53780+
53781+ if (!gr_learn_attached)
53782+ return;
53783+
53784+ spin_lock(&gr_learn_lock);
53785+
53786+ /* leave a gap at the end so we know when it's "full" but don't have to
53787+ compute the exact length of the string we're trying to append
53788+ */
53789+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
53790+ spin_unlock(&gr_learn_lock);
53791+ wake_up_interruptible(&learn_wait);
53792+ return;
53793+ }
53794+ if (learn_buffer == NULL) {
53795+ spin_unlock(&gr_learn_lock);
53796+ return;
53797+ }
53798+
53799+ va_start(args, fmt);
53800+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
53801+ va_end(args);
53802+
53803+ learn_buffer_len += len + 1;
53804+
53805+ spin_unlock(&gr_learn_lock);
53806+ wake_up_interruptible(&learn_wait);
53807+
53808+ return;
53809+}
53810+
53811+static int
53812+open_learn(struct inode *inode, struct file *file)
53813+{
53814+ if (file->f_mode & FMODE_READ && gr_learn_attached)
53815+ return -EBUSY;
53816+ if (file->f_mode & FMODE_READ) {
53817+ int retval = 0;
53818+ mutex_lock(&gr_learn_user_mutex);
53819+ if (learn_buffer == NULL)
53820+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
53821+ if (learn_buffer_user == NULL)
53822+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
53823+ if (learn_buffer == NULL) {
53824+ retval = -ENOMEM;
53825+ goto out_error;
53826+ }
53827+ if (learn_buffer_user == NULL) {
53828+ retval = -ENOMEM;
53829+ goto out_error;
53830+ }
53831+ learn_buffer_len = 0;
53832+ learn_buffer_user_len = 0;
53833+ gr_learn_attached = 1;
53834+out_error:
53835+ mutex_unlock(&gr_learn_user_mutex);
53836+ return retval;
53837+ }
53838+ return 0;
53839+}
53840+
53841+static int
53842+close_learn(struct inode *inode, struct file *file)
53843+{
53844+ if (file->f_mode & FMODE_READ) {
53845+ char *tmp = NULL;
53846+ mutex_lock(&gr_learn_user_mutex);
53847+ spin_lock(&gr_learn_lock);
53848+ tmp = learn_buffer;
53849+ learn_buffer = NULL;
53850+ spin_unlock(&gr_learn_lock);
53851+ if (tmp)
53852+ vfree(tmp);
53853+ if (learn_buffer_user != NULL) {
53854+ vfree(learn_buffer_user);
53855+ learn_buffer_user = NULL;
53856+ }
53857+ learn_buffer_len = 0;
53858+ learn_buffer_user_len = 0;
53859+ gr_learn_attached = 0;
53860+ mutex_unlock(&gr_learn_user_mutex);
53861+ }
53862+
53863+ return 0;
53864+}
53865+
53866+const struct file_operations grsec_fops = {
53867+ .read = read_learn,
53868+ .write = write_grsec_handler,
53869+ .open = open_learn,
53870+ .release = close_learn,
53871+ .poll = poll_learn,
53872+};
53873diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
53874new file mode 100644
53875index 0000000..39645c9
53876--- /dev/null
53877+++ b/grsecurity/gracl_res.c
53878@@ -0,0 +1,68 @@
53879+#include <linux/kernel.h>
53880+#include <linux/sched.h>
53881+#include <linux/gracl.h>
53882+#include <linux/grinternal.h>
53883+
53884+static const char *restab_log[] = {
53885+ [RLIMIT_CPU] = "RLIMIT_CPU",
53886+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
53887+ [RLIMIT_DATA] = "RLIMIT_DATA",
53888+ [RLIMIT_STACK] = "RLIMIT_STACK",
53889+ [RLIMIT_CORE] = "RLIMIT_CORE",
53890+ [RLIMIT_RSS] = "RLIMIT_RSS",
53891+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
53892+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
53893+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
53894+ [RLIMIT_AS] = "RLIMIT_AS",
53895+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
53896+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
53897+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
53898+ [RLIMIT_NICE] = "RLIMIT_NICE",
53899+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
53900+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
53901+ [GR_CRASH_RES] = "RLIMIT_CRASH"
53902+};
53903+
53904+void
53905+gr_log_resource(const struct task_struct *task,
53906+ const int res, const unsigned long wanted, const int gt)
53907+{
53908+ const struct cred *cred;
53909+ unsigned long rlim;
53910+
53911+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
53912+ return;
53913+
53914+ // not yet supported resource
53915+ if (unlikely(!restab_log[res]))
53916+ return;
53917+
53918+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
53919+ rlim = task_rlimit_max(task, res);
53920+ else
53921+ rlim = task_rlimit(task, res);
53922+
53923+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
53924+ return;
53925+
53926+ rcu_read_lock();
53927+ cred = __task_cred(task);
53928+
53929+ if (res == RLIMIT_NPROC &&
53930+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
53931+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
53932+ goto out_rcu_unlock;
53933+ else if (res == RLIMIT_MEMLOCK &&
53934+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
53935+ goto out_rcu_unlock;
53936+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
53937+ goto out_rcu_unlock;
53938+ rcu_read_unlock();
53939+
53940+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
53941+
53942+ return;
53943+out_rcu_unlock:
53944+ rcu_read_unlock();
53945+ return;
53946+}
53947diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
53948new file mode 100644
53949index 0000000..5556be3
53950--- /dev/null
53951+++ b/grsecurity/gracl_segv.c
53952@@ -0,0 +1,299 @@
53953+#include <linux/kernel.h>
53954+#include <linux/mm.h>
53955+#include <asm/uaccess.h>
53956+#include <asm/errno.h>
53957+#include <asm/mman.h>
53958+#include <net/sock.h>
53959+#include <linux/file.h>
53960+#include <linux/fs.h>
53961+#include <linux/net.h>
53962+#include <linux/in.h>
53963+#include <linux/slab.h>
53964+#include <linux/types.h>
53965+#include <linux/sched.h>
53966+#include <linux/timer.h>
53967+#include <linux/gracl.h>
53968+#include <linux/grsecurity.h>
53969+#include <linux/grinternal.h>
53970+
53971+static struct crash_uid *uid_set;
53972+static unsigned short uid_used;
53973+static DEFINE_SPINLOCK(gr_uid_lock);
53974+extern rwlock_t gr_inode_lock;
53975+extern struct acl_subject_label *
53976+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
53977+ struct acl_role_label *role);
53978+
53979+#ifdef CONFIG_BTRFS_FS
53980+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
53981+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
53982+#endif
53983+
53984+static inline dev_t __get_dev(const struct dentry *dentry)
53985+{
53986+#ifdef CONFIG_BTRFS_FS
53987+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
53988+ return get_btrfs_dev_from_inode(dentry->d_inode);
53989+ else
53990+#endif
53991+ return dentry->d_inode->i_sb->s_dev;
53992+}
53993+
53994+int
53995+gr_init_uidset(void)
53996+{
53997+ uid_set =
53998+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
53999+ uid_used = 0;
54000+
54001+ return uid_set ? 1 : 0;
54002+}
54003+
54004+void
54005+gr_free_uidset(void)
54006+{
54007+ if (uid_set)
54008+ kfree(uid_set);
54009+
54010+ return;
54011+}
54012+
54013+int
54014+gr_find_uid(const uid_t uid)
54015+{
54016+ struct crash_uid *tmp = uid_set;
54017+ uid_t buid;
54018+ int low = 0, high = uid_used - 1, mid;
54019+
54020+ while (high >= low) {
54021+ mid = (low + high) >> 1;
54022+ buid = tmp[mid].uid;
54023+ if (buid == uid)
54024+ return mid;
54025+ if (buid > uid)
54026+ high = mid - 1;
54027+ if (buid < uid)
54028+ low = mid + 1;
54029+ }
54030+
54031+ return -1;
54032+}
54033+
54034+static __inline__ void
54035+gr_insertsort(void)
54036+{
54037+ unsigned short i, j;
54038+ struct crash_uid index;
54039+
54040+ for (i = 1; i < uid_used; i++) {
54041+ index = uid_set[i];
54042+ j = i;
54043+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
54044+ uid_set[j] = uid_set[j - 1];
54045+ j--;
54046+ }
54047+ uid_set[j] = index;
54048+ }
54049+
54050+ return;
54051+}
54052+
54053+static __inline__ void
54054+gr_insert_uid(const uid_t uid, const unsigned long expires)
54055+{
54056+ int loc;
54057+
54058+ if (uid_used == GR_UIDTABLE_MAX)
54059+ return;
54060+
54061+ loc = gr_find_uid(uid);
54062+
54063+ if (loc >= 0) {
54064+ uid_set[loc].expires = expires;
54065+ return;
54066+ }
54067+
54068+ uid_set[uid_used].uid = uid;
54069+ uid_set[uid_used].expires = expires;
54070+ uid_used++;
54071+
54072+ gr_insertsort();
54073+
54074+ return;
54075+}
54076+
54077+void
54078+gr_remove_uid(const unsigned short loc)
54079+{
54080+ unsigned short i;
54081+
54082+ for (i = loc + 1; i < uid_used; i++)
54083+ uid_set[i - 1] = uid_set[i];
54084+
54085+ uid_used--;
54086+
54087+ return;
54088+}
54089+
54090+int
54091+gr_check_crash_uid(const uid_t uid)
54092+{
54093+ int loc;
54094+ int ret = 0;
54095+
54096+ if (unlikely(!gr_acl_is_enabled()))
54097+ return 0;
54098+
54099+ spin_lock(&gr_uid_lock);
54100+ loc = gr_find_uid(uid);
54101+
54102+ if (loc < 0)
54103+ goto out_unlock;
54104+
54105+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
54106+ gr_remove_uid(loc);
54107+ else
54108+ ret = 1;
54109+
54110+out_unlock:
54111+ spin_unlock(&gr_uid_lock);
54112+ return ret;
54113+}
54114+
54115+static __inline__ int
54116+proc_is_setxid(const struct cred *cred)
54117+{
54118+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
54119+ cred->uid != cred->fsuid)
54120+ return 1;
54121+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
54122+ cred->gid != cred->fsgid)
54123+ return 1;
54124+
54125+ return 0;
54126+}
54127+
54128+extern int gr_fake_force_sig(int sig, struct task_struct *t);
54129+
54130+void
54131+gr_handle_crash(struct task_struct *task, const int sig)
54132+{
54133+ struct acl_subject_label *curr;
54134+ struct task_struct *tsk, *tsk2;
54135+ const struct cred *cred;
54136+ const struct cred *cred2;
54137+
54138+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
54139+ return;
54140+
54141+ if (unlikely(!gr_acl_is_enabled()))
54142+ return;
54143+
54144+ curr = task->acl;
54145+
54146+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
54147+ return;
54148+
54149+ if (time_before_eq(curr->expires, get_seconds())) {
54150+ curr->expires = 0;
54151+ curr->crashes = 0;
54152+ }
54153+
54154+ curr->crashes++;
54155+
54156+ if (!curr->expires)
54157+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
54158+
54159+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
54160+ time_after(curr->expires, get_seconds())) {
54161+ rcu_read_lock();
54162+ cred = __task_cred(task);
54163+ if (cred->uid && proc_is_setxid(cred)) {
54164+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
54165+ spin_lock(&gr_uid_lock);
54166+ gr_insert_uid(cred->uid, curr->expires);
54167+ spin_unlock(&gr_uid_lock);
54168+ curr->expires = 0;
54169+ curr->crashes = 0;
54170+ read_lock(&tasklist_lock);
54171+ do_each_thread(tsk2, tsk) {
54172+ cred2 = __task_cred(tsk);
54173+ if (tsk != task && cred2->uid == cred->uid)
54174+ gr_fake_force_sig(SIGKILL, tsk);
54175+ } while_each_thread(tsk2, tsk);
54176+ read_unlock(&tasklist_lock);
54177+ } else {
54178+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
54179+ read_lock(&tasklist_lock);
54180+ read_lock(&grsec_exec_file_lock);
54181+ do_each_thread(tsk2, tsk) {
54182+ if (likely(tsk != task)) {
54183+ // if this thread has the same subject as the one that triggered
54184+ // RES_CRASH and it's the same binary, kill it
54185+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
54186+ gr_fake_force_sig(SIGKILL, tsk);
54187+ }
54188+ } while_each_thread(tsk2, tsk);
54189+ read_unlock(&grsec_exec_file_lock);
54190+ read_unlock(&tasklist_lock);
54191+ }
54192+ rcu_read_unlock();
54193+ }
54194+
54195+ return;
54196+}
54197+
54198+int
54199+gr_check_crash_exec(const struct file *filp)
54200+{
54201+ struct acl_subject_label *curr;
54202+
54203+ if (unlikely(!gr_acl_is_enabled()))
54204+ return 0;
54205+
54206+ read_lock(&gr_inode_lock);
54207+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
54208+ __get_dev(filp->f_path.dentry),
54209+ current->role);
54210+ read_unlock(&gr_inode_lock);
54211+
54212+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
54213+ (!curr->crashes && !curr->expires))
54214+ return 0;
54215+
54216+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
54217+ time_after(curr->expires, get_seconds()))
54218+ return 1;
54219+ else if (time_before_eq(curr->expires, get_seconds())) {
54220+ curr->crashes = 0;
54221+ curr->expires = 0;
54222+ }
54223+
54224+ return 0;
54225+}
54226+
54227+void
54228+gr_handle_alertkill(struct task_struct *task)
54229+{
54230+ struct acl_subject_label *curracl;
54231+ __u32 curr_ip;
54232+ struct task_struct *p, *p2;
54233+
54234+ if (unlikely(!gr_acl_is_enabled()))
54235+ return;
54236+
54237+ curracl = task->acl;
54238+ curr_ip = task->signal->curr_ip;
54239+
54240+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
54241+ read_lock(&tasklist_lock);
54242+ do_each_thread(p2, p) {
54243+ if (p->signal->curr_ip == curr_ip)
54244+ gr_fake_force_sig(SIGKILL, p);
54245+ } while_each_thread(p2, p);
54246+ read_unlock(&tasklist_lock);
54247+ } else if (curracl->mode & GR_KILLPROC)
54248+ gr_fake_force_sig(SIGKILL, task);
54249+
54250+ return;
54251+}
54252diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
54253new file mode 100644
54254index 0000000..9d83a69
54255--- /dev/null
54256+++ b/grsecurity/gracl_shm.c
54257@@ -0,0 +1,40 @@
54258+#include <linux/kernel.h>
54259+#include <linux/mm.h>
54260+#include <linux/sched.h>
54261+#include <linux/file.h>
54262+#include <linux/ipc.h>
54263+#include <linux/gracl.h>
54264+#include <linux/grsecurity.h>
54265+#include <linux/grinternal.h>
54266+
54267+int
54268+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54269+ const time_t shm_createtime, const uid_t cuid, const int shmid)
54270+{
54271+ struct task_struct *task;
54272+
54273+ if (!gr_acl_is_enabled())
54274+ return 1;
54275+
54276+ rcu_read_lock();
54277+ read_lock(&tasklist_lock);
54278+
54279+ task = find_task_by_vpid(shm_cprid);
54280+
54281+ if (unlikely(!task))
54282+ task = find_task_by_vpid(shm_lapid);
54283+
54284+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
54285+ (task->pid == shm_lapid)) &&
54286+ (task->acl->mode & GR_PROTSHM) &&
54287+ (task->acl != current->acl))) {
54288+ read_unlock(&tasklist_lock);
54289+ rcu_read_unlock();
54290+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
54291+ return 0;
54292+ }
54293+ read_unlock(&tasklist_lock);
54294+ rcu_read_unlock();
54295+
54296+ return 1;
54297+}
54298diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
54299new file mode 100644
54300index 0000000..bc0be01
54301--- /dev/null
54302+++ b/grsecurity/grsec_chdir.c
54303@@ -0,0 +1,19 @@
54304+#include <linux/kernel.h>
54305+#include <linux/sched.h>
54306+#include <linux/fs.h>
54307+#include <linux/file.h>
54308+#include <linux/grsecurity.h>
54309+#include <linux/grinternal.h>
54310+
54311+void
54312+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
54313+{
54314+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
54315+ if ((grsec_enable_chdir && grsec_enable_group &&
54316+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
54317+ !grsec_enable_group)) {
54318+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
54319+ }
54320+#endif
54321+ return;
54322+}
54323diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
54324new file mode 100644
54325index 0000000..a2dc675
54326--- /dev/null
54327+++ b/grsecurity/grsec_chroot.c
54328@@ -0,0 +1,351 @@
54329+#include <linux/kernel.h>
54330+#include <linux/module.h>
54331+#include <linux/sched.h>
54332+#include <linux/file.h>
54333+#include <linux/fs.h>
54334+#include <linux/mount.h>
54335+#include <linux/types.h>
54336+#include <linux/pid_namespace.h>
54337+#include <linux/grsecurity.h>
54338+#include <linux/grinternal.h>
54339+
54340+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
54341+{
54342+#ifdef CONFIG_GRKERNSEC
54343+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
54344+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
54345+ task->gr_is_chrooted = 1;
54346+ else
54347+ task->gr_is_chrooted = 0;
54348+
54349+ task->gr_chroot_dentry = path->dentry;
54350+#endif
54351+ return;
54352+}
54353+
54354+void gr_clear_chroot_entries(struct task_struct *task)
54355+{
54356+#ifdef CONFIG_GRKERNSEC
54357+ task->gr_is_chrooted = 0;
54358+ task->gr_chroot_dentry = NULL;
54359+#endif
54360+ return;
54361+}
54362+
54363+int
54364+gr_handle_chroot_unix(const pid_t pid)
54365+{
54366+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54367+ struct task_struct *p;
54368+
54369+ if (unlikely(!grsec_enable_chroot_unix))
54370+ return 1;
54371+
54372+ if (likely(!proc_is_chrooted(current)))
54373+ return 1;
54374+
54375+ rcu_read_lock();
54376+ read_lock(&tasklist_lock);
54377+ p = find_task_by_vpid_unrestricted(pid);
54378+ if (unlikely(p && !have_same_root(current, p))) {
54379+ read_unlock(&tasklist_lock);
54380+ rcu_read_unlock();
54381+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
54382+ return 0;
54383+ }
54384+ read_unlock(&tasklist_lock);
54385+ rcu_read_unlock();
54386+#endif
54387+ return 1;
54388+}
54389+
54390+int
54391+gr_handle_chroot_nice(void)
54392+{
54393+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54394+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
54395+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
54396+ return -EPERM;
54397+ }
54398+#endif
54399+ return 0;
54400+}
54401+
54402+int
54403+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
54404+{
54405+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54406+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
54407+ && proc_is_chrooted(current)) {
54408+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
54409+ return -EACCES;
54410+ }
54411+#endif
54412+ return 0;
54413+}
54414+
54415+int
54416+gr_handle_chroot_rawio(const struct inode *inode)
54417+{
54418+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54419+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
54420+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
54421+ return 1;
54422+#endif
54423+ return 0;
54424+}
54425+
54426+int
54427+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
54428+{
54429+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54430+ struct task_struct *p;
54431+ int ret = 0;
54432+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
54433+ return ret;
54434+
54435+ read_lock(&tasklist_lock);
54436+ do_each_pid_task(pid, type, p) {
54437+ if (!have_same_root(current, p)) {
54438+ ret = 1;
54439+ goto out;
54440+ }
54441+ } while_each_pid_task(pid, type, p);
54442+out:
54443+ read_unlock(&tasklist_lock);
54444+ return ret;
54445+#endif
54446+ return 0;
54447+}
54448+
54449+int
54450+gr_pid_is_chrooted(struct task_struct *p)
54451+{
54452+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54453+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
54454+ return 0;
54455+
54456+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
54457+ !have_same_root(current, p)) {
54458+ return 1;
54459+ }
54460+#endif
54461+ return 0;
54462+}
54463+
54464+EXPORT_SYMBOL(gr_pid_is_chrooted);
54465+
54466+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
54467+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
54468+{
54469+ struct path path, currentroot;
54470+ int ret = 0;
54471+
54472+ path.dentry = (struct dentry *)u_dentry;
54473+ path.mnt = (struct vfsmount *)u_mnt;
54474+ get_fs_root(current->fs, &currentroot);
54475+ if (path_is_under(&path, &currentroot))
54476+ ret = 1;
54477+ path_put(&currentroot);
54478+
54479+ return ret;
54480+}
54481+#endif
54482+
54483+int
54484+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
54485+{
54486+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54487+ if (!grsec_enable_chroot_fchdir)
54488+ return 1;
54489+
54490+ if (!proc_is_chrooted(current))
54491+ return 1;
54492+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
54493+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
54494+ return 0;
54495+ }
54496+#endif
54497+ return 1;
54498+}
54499+
54500+int
54501+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54502+ const time_t shm_createtime)
54503+{
54504+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54505+ struct task_struct *p;
54506+ time_t starttime;
54507+
54508+ if (unlikely(!grsec_enable_chroot_shmat))
54509+ return 1;
54510+
54511+ if (likely(!proc_is_chrooted(current)))
54512+ return 1;
54513+
54514+ rcu_read_lock();
54515+ read_lock(&tasklist_lock);
54516+
54517+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
54518+ starttime = p->start_time.tv_sec;
54519+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
54520+ if (have_same_root(current, p)) {
54521+ goto allow;
54522+ } else {
54523+ read_unlock(&tasklist_lock);
54524+ rcu_read_unlock();
54525+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54526+ return 0;
54527+ }
54528+ }
54529+ /* creator exited, pid reuse, fall through to next check */
54530+ }
54531+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
54532+ if (unlikely(!have_same_root(current, p))) {
54533+ read_unlock(&tasklist_lock);
54534+ rcu_read_unlock();
54535+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54536+ return 0;
54537+ }
54538+ }
54539+
54540+allow:
54541+ read_unlock(&tasklist_lock);
54542+ rcu_read_unlock();
54543+#endif
54544+ return 1;
54545+}
54546+
54547+void
54548+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
54549+{
54550+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54551+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
54552+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
54553+#endif
54554+ return;
54555+}
54556+
54557+int
54558+gr_handle_chroot_mknod(const struct dentry *dentry,
54559+ const struct vfsmount *mnt, const int mode)
54560+{
54561+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54562+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
54563+ proc_is_chrooted(current)) {
54564+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
54565+ return -EPERM;
54566+ }
54567+#endif
54568+ return 0;
54569+}
54570+
54571+int
54572+gr_handle_chroot_mount(const struct dentry *dentry,
54573+ const struct vfsmount *mnt, const char *dev_name)
54574+{
54575+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54576+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
54577+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
54578+ return -EPERM;
54579+ }
54580+#endif
54581+ return 0;
54582+}
54583+
54584+int
54585+gr_handle_chroot_pivot(void)
54586+{
54587+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54588+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
54589+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
54590+ return -EPERM;
54591+ }
54592+#endif
54593+ return 0;
54594+}
54595+
54596+int
54597+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
54598+{
54599+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54600+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
54601+ !gr_is_outside_chroot(dentry, mnt)) {
54602+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
54603+ return -EPERM;
54604+ }
54605+#endif
54606+ return 0;
54607+}
54608+
54609+extern const char *captab_log[];
54610+extern int captab_log_entries;
54611+
54612+int
54613+gr_chroot_is_capable(const int cap)
54614+{
54615+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54616+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54617+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54618+ if (cap_raised(chroot_caps, cap)) {
54619+ const struct cred *creds = current_cred();
54620+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
54621+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
54622+ }
54623+ return 0;
54624+ }
54625+ }
54626+#endif
54627+ return 1;
54628+}
54629+
54630+int
54631+gr_chroot_is_capable_nolog(const int cap)
54632+{
54633+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54634+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54635+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54636+ if (cap_raised(chroot_caps, cap)) {
54637+ return 0;
54638+ }
54639+ }
54640+#endif
54641+ return 1;
54642+}
54643+
54644+int
54645+gr_handle_chroot_sysctl(const int op)
54646+{
54647+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54648+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
54649+ proc_is_chrooted(current))
54650+ return -EACCES;
54651+#endif
54652+ return 0;
54653+}
54654+
54655+void
54656+gr_handle_chroot_chdir(struct path *path)
54657+{
54658+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54659+ if (grsec_enable_chroot_chdir)
54660+ set_fs_pwd(current->fs, path);
54661+#endif
54662+ return;
54663+}
54664+
54665+int
54666+gr_handle_chroot_chmod(const struct dentry *dentry,
54667+ const struct vfsmount *mnt, const int mode)
54668+{
54669+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54670+ /* allow chmod +s on directories, but not files */
54671+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
54672+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
54673+ proc_is_chrooted(current)) {
54674+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
54675+ return -EPERM;
54676+ }
54677+#endif
54678+ return 0;
54679+}
54680diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
54681new file mode 100644
54682index 0000000..213ad8b
54683--- /dev/null
54684+++ b/grsecurity/grsec_disabled.c
54685@@ -0,0 +1,437 @@
54686+#include <linux/kernel.h>
54687+#include <linux/module.h>
54688+#include <linux/sched.h>
54689+#include <linux/file.h>
54690+#include <linux/fs.h>
54691+#include <linux/kdev_t.h>
54692+#include <linux/net.h>
54693+#include <linux/in.h>
54694+#include <linux/ip.h>
54695+#include <linux/skbuff.h>
54696+#include <linux/sysctl.h>
54697+
54698+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54699+void
54700+pax_set_initial_flags(struct linux_binprm *bprm)
54701+{
54702+ return;
54703+}
54704+#endif
54705+
54706+#ifdef CONFIG_SYSCTL
54707+__u32
54708+gr_handle_sysctl(const struct ctl_table * table, const int op)
54709+{
54710+ return 0;
54711+}
54712+#endif
54713+
54714+#ifdef CONFIG_TASKSTATS
54715+int gr_is_taskstats_denied(int pid)
54716+{
54717+ return 0;
54718+}
54719+#endif
54720+
54721+int
54722+gr_acl_is_enabled(void)
54723+{
54724+ return 0;
54725+}
54726+
54727+void
54728+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
54729+{
54730+ return;
54731+}
54732+
54733+int
54734+gr_handle_rawio(const struct inode *inode)
54735+{
54736+ return 0;
54737+}
54738+
54739+void
54740+gr_acl_handle_psacct(struct task_struct *task, const long code)
54741+{
54742+ return;
54743+}
54744+
54745+int
54746+gr_handle_ptrace(struct task_struct *task, const long request)
54747+{
54748+ return 0;
54749+}
54750+
54751+int
54752+gr_handle_proc_ptrace(struct task_struct *task)
54753+{
54754+ return 0;
54755+}
54756+
54757+void
54758+gr_learn_resource(const struct task_struct *task,
54759+ const int res, const unsigned long wanted, const int gt)
54760+{
54761+ return;
54762+}
54763+
54764+int
54765+gr_set_acls(const int type)
54766+{
54767+ return 0;
54768+}
54769+
54770+int
54771+gr_check_hidden_task(const struct task_struct *tsk)
54772+{
54773+ return 0;
54774+}
54775+
54776+int
54777+gr_check_protected_task(const struct task_struct *task)
54778+{
54779+ return 0;
54780+}
54781+
54782+int
54783+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
54784+{
54785+ return 0;
54786+}
54787+
54788+void
54789+gr_copy_label(struct task_struct *tsk)
54790+{
54791+ return;
54792+}
54793+
54794+void
54795+gr_set_pax_flags(struct task_struct *task)
54796+{
54797+ return;
54798+}
54799+
54800+int
54801+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
54802+ const int unsafe_share)
54803+{
54804+ return 0;
54805+}
54806+
54807+void
54808+gr_handle_delete(const ino_t ino, const dev_t dev)
54809+{
54810+ return;
54811+}
54812+
54813+void
54814+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
54815+{
54816+ return;
54817+}
54818+
54819+void
54820+gr_handle_crash(struct task_struct *task, const int sig)
54821+{
54822+ return;
54823+}
54824+
54825+int
54826+gr_check_crash_exec(const struct file *filp)
54827+{
54828+ return 0;
54829+}
54830+
54831+int
54832+gr_check_crash_uid(const uid_t uid)
54833+{
54834+ return 0;
54835+}
54836+
54837+void
54838+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
54839+ struct dentry *old_dentry,
54840+ struct dentry *new_dentry,
54841+ struct vfsmount *mnt, const __u8 replace)
54842+{
54843+ return;
54844+}
54845+
54846+int
54847+gr_search_socket(const int family, const int type, const int protocol)
54848+{
54849+ return 1;
54850+}
54851+
54852+int
54853+gr_search_connectbind(const int mode, const struct socket *sock,
54854+ const struct sockaddr_in *addr)
54855+{
54856+ return 0;
54857+}
54858+
54859+void
54860+gr_handle_alertkill(struct task_struct *task)
54861+{
54862+ return;
54863+}
54864+
54865+__u32
54866+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
54867+{
54868+ return 1;
54869+}
54870+
54871+__u32
54872+gr_acl_handle_hidden_file(const struct dentry * dentry,
54873+ const struct vfsmount * mnt)
54874+{
54875+ return 1;
54876+}
54877+
54878+__u32
54879+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54880+ int acc_mode)
54881+{
54882+ return 1;
54883+}
54884+
54885+__u32
54886+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54887+{
54888+ return 1;
54889+}
54890+
54891+__u32
54892+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
54893+{
54894+ return 1;
54895+}
54896+
54897+int
54898+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
54899+ unsigned int *vm_flags)
54900+{
54901+ return 1;
54902+}
54903+
54904+__u32
54905+gr_acl_handle_truncate(const struct dentry * dentry,
54906+ const struct vfsmount * mnt)
54907+{
54908+ return 1;
54909+}
54910+
54911+__u32
54912+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
54913+{
54914+ return 1;
54915+}
54916+
54917+__u32
54918+gr_acl_handle_access(const struct dentry * dentry,
54919+ const struct vfsmount * mnt, const int fmode)
54920+{
54921+ return 1;
54922+}
54923+
54924+__u32
54925+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
54926+ umode_t *mode)
54927+{
54928+ return 1;
54929+}
54930+
54931+__u32
54932+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
54933+{
54934+ return 1;
54935+}
54936+
54937+__u32
54938+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
54939+{
54940+ return 1;
54941+}
54942+
54943+void
54944+grsecurity_init(void)
54945+{
54946+ return;
54947+}
54948+
54949+umode_t gr_acl_umask(void)
54950+{
54951+ return 0;
54952+}
54953+
54954+__u32
54955+gr_acl_handle_mknod(const struct dentry * new_dentry,
54956+ const struct dentry * parent_dentry,
54957+ const struct vfsmount * parent_mnt,
54958+ const int mode)
54959+{
54960+ return 1;
54961+}
54962+
54963+__u32
54964+gr_acl_handle_mkdir(const struct dentry * new_dentry,
54965+ const struct dentry * parent_dentry,
54966+ const struct vfsmount * parent_mnt)
54967+{
54968+ return 1;
54969+}
54970+
54971+__u32
54972+gr_acl_handle_symlink(const struct dentry * new_dentry,
54973+ const struct dentry * parent_dentry,
54974+ const struct vfsmount * parent_mnt, const char *from)
54975+{
54976+ return 1;
54977+}
54978+
54979+__u32
54980+gr_acl_handle_link(const struct dentry * new_dentry,
54981+ const struct dentry * parent_dentry,
54982+ const struct vfsmount * parent_mnt,
54983+ const struct dentry * old_dentry,
54984+ const struct vfsmount * old_mnt, const char *to)
54985+{
54986+ return 1;
54987+}
54988+
54989+int
54990+gr_acl_handle_rename(const struct dentry *new_dentry,
54991+ const struct dentry *parent_dentry,
54992+ const struct vfsmount *parent_mnt,
54993+ const struct dentry *old_dentry,
54994+ const struct inode *old_parent_inode,
54995+ const struct vfsmount *old_mnt, const char *newname)
54996+{
54997+ return 0;
54998+}
54999+
55000+int
55001+gr_acl_handle_filldir(const struct file *file, const char *name,
55002+ const int namelen, const ino_t ino)
55003+{
55004+ return 1;
55005+}
55006+
55007+int
55008+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55009+ const time_t shm_createtime, const uid_t cuid, const int shmid)
55010+{
55011+ return 1;
55012+}
55013+
55014+int
55015+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
55016+{
55017+ return 0;
55018+}
55019+
55020+int
55021+gr_search_accept(const struct socket *sock)
55022+{
55023+ return 0;
55024+}
55025+
55026+int
55027+gr_search_listen(const struct socket *sock)
55028+{
55029+ return 0;
55030+}
55031+
55032+int
55033+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
55034+{
55035+ return 0;
55036+}
55037+
55038+__u32
55039+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
55040+{
55041+ return 1;
55042+}
55043+
55044+__u32
55045+gr_acl_handle_creat(const struct dentry * dentry,
55046+ const struct dentry * p_dentry,
55047+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
55048+ const int imode)
55049+{
55050+ return 1;
55051+}
55052+
55053+void
55054+gr_acl_handle_exit(void)
55055+{
55056+ return;
55057+}
55058+
55059+int
55060+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
55061+{
55062+ return 1;
55063+}
55064+
55065+void
55066+gr_set_role_label(const uid_t uid, const gid_t gid)
55067+{
55068+ return;
55069+}
55070+
55071+int
55072+gr_acl_handle_procpidmem(const struct task_struct *task)
55073+{
55074+ return 0;
55075+}
55076+
55077+int
55078+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
55079+{
55080+ return 0;
55081+}
55082+
55083+int
55084+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
55085+{
55086+ return 0;
55087+}
55088+
55089+void
55090+gr_set_kernel_label(struct task_struct *task)
55091+{
55092+ return;
55093+}
55094+
55095+int
55096+gr_check_user_change(int real, int effective, int fs)
55097+{
55098+ return 0;
55099+}
55100+
55101+int
55102+gr_check_group_change(int real, int effective, int fs)
55103+{
55104+ return 0;
55105+}
55106+
55107+int gr_acl_enable_at_secure(void)
55108+{
55109+ return 0;
55110+}
55111+
55112+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
55113+{
55114+ return dentry->d_inode->i_sb->s_dev;
55115+}
55116+
55117+EXPORT_SYMBOL(gr_learn_resource);
55118+EXPORT_SYMBOL(gr_set_kernel_label);
55119+#ifdef CONFIG_SECURITY
55120+EXPORT_SYMBOL(gr_check_user_change);
55121+EXPORT_SYMBOL(gr_check_group_change);
55122+#endif
55123diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
55124new file mode 100644
55125index 0000000..2b05ada
55126--- /dev/null
55127+++ b/grsecurity/grsec_exec.c
55128@@ -0,0 +1,146 @@
55129+#include <linux/kernel.h>
55130+#include <linux/sched.h>
55131+#include <linux/file.h>
55132+#include <linux/binfmts.h>
55133+#include <linux/fs.h>
55134+#include <linux/types.h>
55135+#include <linux/grdefs.h>
55136+#include <linux/grsecurity.h>
55137+#include <linux/grinternal.h>
55138+#include <linux/capability.h>
55139+#include <linux/module.h>
55140+
55141+#include <asm/uaccess.h>
55142+
55143+#ifdef CONFIG_GRKERNSEC_EXECLOG
55144+static char gr_exec_arg_buf[132];
55145+static DEFINE_MUTEX(gr_exec_arg_mutex);
55146+#endif
55147+
55148+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
55149+
55150+void
55151+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
55152+{
55153+#ifdef CONFIG_GRKERNSEC_EXECLOG
55154+ char *grarg = gr_exec_arg_buf;
55155+ unsigned int i, x, execlen = 0;
55156+ char c;
55157+
55158+ if (!((grsec_enable_execlog && grsec_enable_group &&
55159+ in_group_p(grsec_audit_gid))
55160+ || (grsec_enable_execlog && !grsec_enable_group)))
55161+ return;
55162+
55163+ mutex_lock(&gr_exec_arg_mutex);
55164+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
55165+
55166+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
55167+ const char __user *p;
55168+ unsigned int len;
55169+
55170+ p = get_user_arg_ptr(argv, i);
55171+ if (IS_ERR(p))
55172+ goto log;
55173+
55174+ len = strnlen_user(p, 128 - execlen);
55175+ if (len > 128 - execlen)
55176+ len = 128 - execlen;
55177+ else if (len > 0)
55178+ len--;
55179+ if (copy_from_user(grarg + execlen, p, len))
55180+ goto log;
55181+
55182+ /* rewrite unprintable characters */
55183+ for (x = 0; x < len; x++) {
55184+ c = *(grarg + execlen + x);
55185+ if (c < 32 || c > 126)
55186+ *(grarg + execlen + x) = ' ';
55187+ }
55188+
55189+ execlen += len;
55190+ *(grarg + execlen) = ' ';
55191+ *(grarg + execlen + 1) = '\0';
55192+ execlen++;
55193+ }
55194+
55195+ log:
55196+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
55197+ bprm->file->f_path.mnt, grarg);
55198+ mutex_unlock(&gr_exec_arg_mutex);
55199+#endif
55200+ return;
55201+}
55202+
55203+#ifdef CONFIG_GRKERNSEC
55204+extern int gr_acl_is_capable(const int cap);
55205+extern int gr_acl_is_capable_nolog(const int cap);
55206+extern int gr_chroot_is_capable(const int cap);
55207+extern int gr_chroot_is_capable_nolog(const int cap);
55208+#endif
55209+
55210+const char *captab_log[] = {
55211+ "CAP_CHOWN",
55212+ "CAP_DAC_OVERRIDE",
55213+ "CAP_DAC_READ_SEARCH",
55214+ "CAP_FOWNER",
55215+ "CAP_FSETID",
55216+ "CAP_KILL",
55217+ "CAP_SETGID",
55218+ "CAP_SETUID",
55219+ "CAP_SETPCAP",
55220+ "CAP_LINUX_IMMUTABLE",
55221+ "CAP_NET_BIND_SERVICE",
55222+ "CAP_NET_BROADCAST",
55223+ "CAP_NET_ADMIN",
55224+ "CAP_NET_RAW",
55225+ "CAP_IPC_LOCK",
55226+ "CAP_IPC_OWNER",
55227+ "CAP_SYS_MODULE",
55228+ "CAP_SYS_RAWIO",
55229+ "CAP_SYS_CHROOT",
55230+ "CAP_SYS_PTRACE",
55231+ "CAP_SYS_PACCT",
55232+ "CAP_SYS_ADMIN",
55233+ "CAP_SYS_BOOT",
55234+ "CAP_SYS_NICE",
55235+ "CAP_SYS_RESOURCE",
55236+ "CAP_SYS_TIME",
55237+ "CAP_SYS_TTY_CONFIG",
55238+ "CAP_MKNOD",
55239+ "CAP_LEASE",
55240+ "CAP_AUDIT_WRITE",
55241+ "CAP_AUDIT_CONTROL",
55242+ "CAP_SETFCAP",
55243+ "CAP_MAC_OVERRIDE",
55244+ "CAP_MAC_ADMIN",
55245+ "CAP_SYSLOG",
55246+ "CAP_WAKE_ALARM"
55247+};
55248+
55249+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
55250+
55251+int gr_is_capable(const int cap)
55252+{
55253+#ifdef CONFIG_GRKERNSEC
55254+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
55255+ return 1;
55256+ return 0;
55257+#else
55258+ return 1;
55259+#endif
55260+}
55261+
55262+int gr_is_capable_nolog(const int cap)
55263+{
55264+#ifdef CONFIG_GRKERNSEC
55265+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
55266+ return 1;
55267+ return 0;
55268+#else
55269+ return 1;
55270+#endif
55271+}
55272+
55273+EXPORT_SYMBOL(gr_is_capable);
55274+EXPORT_SYMBOL(gr_is_capable_nolog);
55275diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
55276new file mode 100644
55277index 0000000..d3ee748
55278--- /dev/null
55279+++ b/grsecurity/grsec_fifo.c
55280@@ -0,0 +1,24 @@
55281+#include <linux/kernel.h>
55282+#include <linux/sched.h>
55283+#include <linux/fs.h>
55284+#include <linux/file.h>
55285+#include <linux/grinternal.h>
55286+
55287+int
55288+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
55289+ const struct dentry *dir, const int flag, const int acc_mode)
55290+{
55291+#ifdef CONFIG_GRKERNSEC_FIFO
55292+ const struct cred *cred = current_cred();
55293+
55294+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
55295+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
55296+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
55297+ (cred->fsuid != dentry->d_inode->i_uid)) {
55298+ if (!inode_permission(dentry->d_inode, acc_mode))
55299+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
55300+ return -EACCES;
55301+ }
55302+#endif
55303+ return 0;
55304+}
55305diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
55306new file mode 100644
55307index 0000000..8ca18bf
55308--- /dev/null
55309+++ b/grsecurity/grsec_fork.c
55310@@ -0,0 +1,23 @@
55311+#include <linux/kernel.h>
55312+#include <linux/sched.h>
55313+#include <linux/grsecurity.h>
55314+#include <linux/grinternal.h>
55315+#include <linux/errno.h>
55316+
55317+void
55318+gr_log_forkfail(const int retval)
55319+{
55320+#ifdef CONFIG_GRKERNSEC_FORKFAIL
55321+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
55322+ switch (retval) {
55323+ case -EAGAIN:
55324+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
55325+ break;
55326+ case -ENOMEM:
55327+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
55328+ break;
55329+ }
55330+ }
55331+#endif
55332+ return;
55333+}
55334diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
55335new file mode 100644
55336index 0000000..01ddde4
55337--- /dev/null
55338+++ b/grsecurity/grsec_init.c
55339@@ -0,0 +1,277 @@
55340+#include <linux/kernel.h>
55341+#include <linux/sched.h>
55342+#include <linux/mm.h>
55343+#include <linux/gracl.h>
55344+#include <linux/slab.h>
55345+#include <linux/vmalloc.h>
55346+#include <linux/percpu.h>
55347+#include <linux/module.h>
55348+
55349+int grsec_enable_ptrace_readexec;
55350+int grsec_enable_setxid;
55351+int grsec_enable_brute;
55352+int grsec_enable_link;
55353+int grsec_enable_dmesg;
55354+int grsec_enable_harden_ptrace;
55355+int grsec_enable_fifo;
55356+int grsec_enable_execlog;
55357+int grsec_enable_signal;
55358+int grsec_enable_forkfail;
55359+int grsec_enable_audit_ptrace;
55360+int grsec_enable_time;
55361+int grsec_enable_audit_textrel;
55362+int grsec_enable_group;
55363+int grsec_audit_gid;
55364+int grsec_enable_chdir;
55365+int grsec_enable_mount;
55366+int grsec_enable_rofs;
55367+int grsec_enable_chroot_findtask;
55368+int grsec_enable_chroot_mount;
55369+int grsec_enable_chroot_shmat;
55370+int grsec_enable_chroot_fchdir;
55371+int grsec_enable_chroot_double;
55372+int grsec_enable_chroot_pivot;
55373+int grsec_enable_chroot_chdir;
55374+int grsec_enable_chroot_chmod;
55375+int grsec_enable_chroot_mknod;
55376+int grsec_enable_chroot_nice;
55377+int grsec_enable_chroot_execlog;
55378+int grsec_enable_chroot_caps;
55379+int grsec_enable_chroot_sysctl;
55380+int grsec_enable_chroot_unix;
55381+int grsec_enable_tpe;
55382+int grsec_tpe_gid;
55383+int grsec_enable_blackhole;
55384+#ifdef CONFIG_IPV6_MODULE
55385+EXPORT_SYMBOL(grsec_enable_blackhole);
55386+#endif
55387+int grsec_lastack_retries;
55388+int grsec_enable_tpe_all;
55389+int grsec_enable_tpe_invert;
55390+int grsec_enable_socket_all;
55391+int grsec_socket_all_gid;
55392+int grsec_enable_socket_client;
55393+int grsec_socket_client_gid;
55394+int grsec_enable_socket_server;
55395+int grsec_socket_server_gid;
55396+int grsec_resource_logging;
55397+int grsec_disable_privio;
55398+int grsec_enable_log_rwxmaps;
55399+int grsec_lock;
55400+
55401+DEFINE_SPINLOCK(grsec_alert_lock);
55402+unsigned long grsec_alert_wtime = 0;
55403+unsigned long grsec_alert_fyet = 0;
55404+
55405+DEFINE_SPINLOCK(grsec_audit_lock);
55406+
55407+DEFINE_RWLOCK(grsec_exec_file_lock);
55408+
55409+char *gr_shared_page[4];
55410+
55411+char *gr_alert_log_fmt;
55412+char *gr_audit_log_fmt;
55413+char *gr_alert_log_buf;
55414+char *gr_audit_log_buf;
55415+
55416+extern struct gr_arg *gr_usermode;
55417+extern unsigned char *gr_system_salt;
55418+extern unsigned char *gr_system_sum;
55419+
55420+void __init
55421+grsecurity_init(void)
55422+{
55423+ int j;
55424+ /* create the per-cpu shared pages */
55425+
55426+#ifdef CONFIG_X86
55427+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
55428+#endif
55429+
55430+ for (j = 0; j < 4; j++) {
55431+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
55432+ if (gr_shared_page[j] == NULL) {
55433+ panic("Unable to allocate grsecurity shared page");
55434+ return;
55435+ }
55436+ }
55437+
55438+ /* allocate log buffers */
55439+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
55440+ if (!gr_alert_log_fmt) {
55441+ panic("Unable to allocate grsecurity alert log format buffer");
55442+ return;
55443+ }
55444+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
55445+ if (!gr_audit_log_fmt) {
55446+ panic("Unable to allocate grsecurity audit log format buffer");
55447+ return;
55448+ }
55449+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55450+ if (!gr_alert_log_buf) {
55451+ panic("Unable to allocate grsecurity alert log buffer");
55452+ return;
55453+ }
55454+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55455+ if (!gr_audit_log_buf) {
55456+ panic("Unable to allocate grsecurity audit log buffer");
55457+ return;
55458+ }
55459+
55460+ /* allocate memory for authentication structure */
55461+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
55462+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
55463+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
55464+
55465+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
55466+ panic("Unable to allocate grsecurity authentication structure");
55467+ return;
55468+ }
55469+
55470+
55471+#ifdef CONFIG_GRKERNSEC_IO
55472+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
55473+ grsec_disable_privio = 1;
55474+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55475+ grsec_disable_privio = 1;
55476+#else
55477+ grsec_disable_privio = 0;
55478+#endif
55479+#endif
55480+
55481+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55482+ /* for backward compatibility, tpe_invert always defaults to on if
55483+ enabled in the kernel
55484+ */
55485+ grsec_enable_tpe_invert = 1;
55486+#endif
55487+
55488+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55489+#ifndef CONFIG_GRKERNSEC_SYSCTL
55490+ grsec_lock = 1;
55491+#endif
55492+
55493+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55494+ grsec_enable_audit_textrel = 1;
55495+#endif
55496+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55497+ grsec_enable_log_rwxmaps = 1;
55498+#endif
55499+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55500+ grsec_enable_group = 1;
55501+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
55502+#endif
55503+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
55504+ grsec_enable_ptrace_readexec = 1;
55505+#endif
55506+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55507+ grsec_enable_chdir = 1;
55508+#endif
55509+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55510+ grsec_enable_harden_ptrace = 1;
55511+#endif
55512+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55513+ grsec_enable_mount = 1;
55514+#endif
55515+#ifdef CONFIG_GRKERNSEC_LINK
55516+ grsec_enable_link = 1;
55517+#endif
55518+#ifdef CONFIG_GRKERNSEC_BRUTE
55519+ grsec_enable_brute = 1;
55520+#endif
55521+#ifdef CONFIG_GRKERNSEC_DMESG
55522+ grsec_enable_dmesg = 1;
55523+#endif
55524+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
55525+ grsec_enable_blackhole = 1;
55526+ grsec_lastack_retries = 4;
55527+#endif
55528+#ifdef CONFIG_GRKERNSEC_FIFO
55529+ grsec_enable_fifo = 1;
55530+#endif
55531+#ifdef CONFIG_GRKERNSEC_EXECLOG
55532+ grsec_enable_execlog = 1;
55533+#endif
55534+#ifdef CONFIG_GRKERNSEC_SETXID
55535+ grsec_enable_setxid = 1;
55536+#endif
55537+#ifdef CONFIG_GRKERNSEC_SIGNAL
55538+ grsec_enable_signal = 1;
55539+#endif
55540+#ifdef CONFIG_GRKERNSEC_FORKFAIL
55541+ grsec_enable_forkfail = 1;
55542+#endif
55543+#ifdef CONFIG_GRKERNSEC_TIME
55544+ grsec_enable_time = 1;
55545+#endif
55546+#ifdef CONFIG_GRKERNSEC_RESLOG
55547+ grsec_resource_logging = 1;
55548+#endif
55549+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55550+ grsec_enable_chroot_findtask = 1;
55551+#endif
55552+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55553+ grsec_enable_chroot_unix = 1;
55554+#endif
55555+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55556+ grsec_enable_chroot_mount = 1;
55557+#endif
55558+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55559+ grsec_enable_chroot_fchdir = 1;
55560+#endif
55561+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55562+ grsec_enable_chroot_shmat = 1;
55563+#endif
55564+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55565+ grsec_enable_audit_ptrace = 1;
55566+#endif
55567+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55568+ grsec_enable_chroot_double = 1;
55569+#endif
55570+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55571+ grsec_enable_chroot_pivot = 1;
55572+#endif
55573+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55574+ grsec_enable_chroot_chdir = 1;
55575+#endif
55576+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55577+ grsec_enable_chroot_chmod = 1;
55578+#endif
55579+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55580+ grsec_enable_chroot_mknod = 1;
55581+#endif
55582+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55583+ grsec_enable_chroot_nice = 1;
55584+#endif
55585+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55586+ grsec_enable_chroot_execlog = 1;
55587+#endif
55588+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55589+ grsec_enable_chroot_caps = 1;
55590+#endif
55591+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55592+ grsec_enable_chroot_sysctl = 1;
55593+#endif
55594+#ifdef CONFIG_GRKERNSEC_TPE
55595+ grsec_enable_tpe = 1;
55596+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
55597+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55598+ grsec_enable_tpe_all = 1;
55599+#endif
55600+#endif
55601+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55602+ grsec_enable_socket_all = 1;
55603+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
55604+#endif
55605+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55606+ grsec_enable_socket_client = 1;
55607+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
55608+#endif
55609+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55610+ grsec_enable_socket_server = 1;
55611+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
55612+#endif
55613+#endif
55614+
55615+ return;
55616+}
55617diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
55618new file mode 100644
55619index 0000000..3efe141
55620--- /dev/null
55621+++ b/grsecurity/grsec_link.c
55622@@ -0,0 +1,43 @@
55623+#include <linux/kernel.h>
55624+#include <linux/sched.h>
55625+#include <linux/fs.h>
55626+#include <linux/file.h>
55627+#include <linux/grinternal.h>
55628+
55629+int
55630+gr_handle_follow_link(const struct inode *parent,
55631+ const struct inode *inode,
55632+ const struct dentry *dentry, const struct vfsmount *mnt)
55633+{
55634+#ifdef CONFIG_GRKERNSEC_LINK
55635+ const struct cred *cred = current_cred();
55636+
55637+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
55638+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
55639+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
55640+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
55641+ return -EACCES;
55642+ }
55643+#endif
55644+ return 0;
55645+}
55646+
55647+int
55648+gr_handle_hardlink(const struct dentry *dentry,
55649+ const struct vfsmount *mnt,
55650+ struct inode *inode, const int mode, const char *to)
55651+{
55652+#ifdef CONFIG_GRKERNSEC_LINK
55653+ const struct cred *cred = current_cred();
55654+
55655+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
55656+ (!S_ISREG(mode) || (mode & S_ISUID) ||
55657+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
55658+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
55659+ !capable(CAP_FOWNER) && cred->uid) {
55660+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
55661+ return -EPERM;
55662+ }
55663+#endif
55664+ return 0;
55665+}
55666diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
55667new file mode 100644
55668index 0000000..a45d2e9
55669--- /dev/null
55670+++ b/grsecurity/grsec_log.c
55671@@ -0,0 +1,322 @@
55672+#include <linux/kernel.h>
55673+#include <linux/sched.h>
55674+#include <linux/file.h>
55675+#include <linux/tty.h>
55676+#include <linux/fs.h>
55677+#include <linux/grinternal.h>
55678+
55679+#ifdef CONFIG_TREE_PREEMPT_RCU
55680+#define DISABLE_PREEMPT() preempt_disable()
55681+#define ENABLE_PREEMPT() preempt_enable()
55682+#else
55683+#define DISABLE_PREEMPT()
55684+#define ENABLE_PREEMPT()
55685+#endif
55686+
55687+#define BEGIN_LOCKS(x) \
55688+ DISABLE_PREEMPT(); \
55689+ rcu_read_lock(); \
55690+ read_lock(&tasklist_lock); \
55691+ read_lock(&grsec_exec_file_lock); \
55692+ if (x != GR_DO_AUDIT) \
55693+ spin_lock(&grsec_alert_lock); \
55694+ else \
55695+ spin_lock(&grsec_audit_lock)
55696+
55697+#define END_LOCKS(x) \
55698+ if (x != GR_DO_AUDIT) \
55699+ spin_unlock(&grsec_alert_lock); \
55700+ else \
55701+ spin_unlock(&grsec_audit_lock); \
55702+ read_unlock(&grsec_exec_file_lock); \
55703+ read_unlock(&tasklist_lock); \
55704+ rcu_read_unlock(); \
55705+ ENABLE_PREEMPT(); \
55706+ if (x == GR_DONT_AUDIT) \
55707+ gr_handle_alertkill(current)
55708+
55709+enum {
55710+ FLOODING,
55711+ NO_FLOODING
55712+};
55713+
55714+extern char *gr_alert_log_fmt;
55715+extern char *gr_audit_log_fmt;
55716+extern char *gr_alert_log_buf;
55717+extern char *gr_audit_log_buf;
55718+
55719+static int gr_log_start(int audit)
55720+{
55721+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
55722+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
55723+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55724+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
55725+ unsigned long curr_secs = get_seconds();
55726+
55727+ if (audit == GR_DO_AUDIT)
55728+ goto set_fmt;
55729+
55730+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
55731+ grsec_alert_wtime = curr_secs;
55732+ grsec_alert_fyet = 0;
55733+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
55734+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
55735+ grsec_alert_fyet++;
55736+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
55737+ grsec_alert_wtime = curr_secs;
55738+ grsec_alert_fyet++;
55739+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
55740+ return FLOODING;
55741+ }
55742+ else return FLOODING;
55743+
55744+set_fmt:
55745+#endif
55746+ memset(buf, 0, PAGE_SIZE);
55747+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
55748+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
55749+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55750+ } else if (current->signal->curr_ip) {
55751+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
55752+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
55753+ } else if (gr_acl_is_enabled()) {
55754+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
55755+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55756+ } else {
55757+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
55758+ strcpy(buf, fmt);
55759+ }
55760+
55761+ return NO_FLOODING;
55762+}
55763+
55764+static void gr_log_middle(int audit, const char *msg, va_list ap)
55765+ __attribute__ ((format (printf, 2, 0)));
55766+
55767+static void gr_log_middle(int audit, const char *msg, va_list ap)
55768+{
55769+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55770+ unsigned int len = strlen(buf);
55771+
55772+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55773+
55774+ return;
55775+}
55776+
55777+static void gr_log_middle_varargs(int audit, const char *msg, ...)
55778+ __attribute__ ((format (printf, 2, 3)));
55779+
55780+static void gr_log_middle_varargs(int audit, const char *msg, ...)
55781+{
55782+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55783+ unsigned int len = strlen(buf);
55784+ va_list ap;
55785+
55786+ va_start(ap, msg);
55787+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55788+ va_end(ap);
55789+
55790+ return;
55791+}
55792+
55793+static void gr_log_end(int audit, int append_default)
55794+{
55795+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55796+
55797+ if (append_default) {
55798+ unsigned int len = strlen(buf);
55799+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
55800+ }
55801+
55802+ printk("%s\n", buf);
55803+
55804+ return;
55805+}
55806+
55807+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
55808+{
55809+ int logtype;
55810+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
55811+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
55812+ void *voidptr = NULL;
55813+ int num1 = 0, num2 = 0;
55814+ unsigned long ulong1 = 0, ulong2 = 0;
55815+ struct dentry *dentry = NULL;
55816+ struct vfsmount *mnt = NULL;
55817+ struct file *file = NULL;
55818+ struct task_struct *task = NULL;
55819+ const struct cred *cred, *pcred;
55820+ va_list ap;
55821+
55822+ BEGIN_LOCKS(audit);
55823+ logtype = gr_log_start(audit);
55824+ if (logtype == FLOODING) {
55825+ END_LOCKS(audit);
55826+ return;
55827+ }
55828+ va_start(ap, argtypes);
55829+ switch (argtypes) {
55830+ case GR_TTYSNIFF:
55831+ task = va_arg(ap, struct task_struct *);
55832+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
55833+ break;
55834+ case GR_SYSCTL_HIDDEN:
55835+ str1 = va_arg(ap, char *);
55836+ gr_log_middle_varargs(audit, msg, result, str1);
55837+ break;
55838+ case GR_RBAC:
55839+ dentry = va_arg(ap, struct dentry *);
55840+ mnt = va_arg(ap, struct vfsmount *);
55841+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
55842+ break;
55843+ case GR_RBAC_STR:
55844+ dentry = va_arg(ap, struct dentry *);
55845+ mnt = va_arg(ap, struct vfsmount *);
55846+ str1 = va_arg(ap, char *);
55847+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
55848+ break;
55849+ case GR_STR_RBAC:
55850+ str1 = va_arg(ap, char *);
55851+ dentry = va_arg(ap, struct dentry *);
55852+ mnt = va_arg(ap, struct vfsmount *);
55853+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
55854+ break;
55855+ case GR_RBAC_MODE2:
55856+ dentry = va_arg(ap, struct dentry *);
55857+ mnt = va_arg(ap, struct vfsmount *);
55858+ str1 = va_arg(ap, char *);
55859+ str2 = va_arg(ap, char *);
55860+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
55861+ break;
55862+ case GR_RBAC_MODE3:
55863+ dentry = va_arg(ap, struct dentry *);
55864+ mnt = va_arg(ap, struct vfsmount *);
55865+ str1 = va_arg(ap, char *);
55866+ str2 = va_arg(ap, char *);
55867+ str3 = va_arg(ap, char *);
55868+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
55869+ break;
55870+ case GR_FILENAME:
55871+ dentry = va_arg(ap, struct dentry *);
55872+ mnt = va_arg(ap, struct vfsmount *);
55873+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
55874+ break;
55875+ case GR_STR_FILENAME:
55876+ str1 = va_arg(ap, char *);
55877+ dentry = va_arg(ap, struct dentry *);
55878+ mnt = va_arg(ap, struct vfsmount *);
55879+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
55880+ break;
55881+ case GR_FILENAME_STR:
55882+ dentry = va_arg(ap, struct dentry *);
55883+ mnt = va_arg(ap, struct vfsmount *);
55884+ str1 = va_arg(ap, char *);
55885+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
55886+ break;
55887+ case GR_FILENAME_TWO_INT:
55888+ dentry = va_arg(ap, struct dentry *);
55889+ mnt = va_arg(ap, struct vfsmount *);
55890+ num1 = va_arg(ap, int);
55891+ num2 = va_arg(ap, int);
55892+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
55893+ break;
55894+ case GR_FILENAME_TWO_INT_STR:
55895+ dentry = va_arg(ap, struct dentry *);
55896+ mnt = va_arg(ap, struct vfsmount *);
55897+ num1 = va_arg(ap, int);
55898+ num2 = va_arg(ap, int);
55899+ str1 = va_arg(ap, char *);
55900+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
55901+ break;
55902+ case GR_TEXTREL:
55903+ file = va_arg(ap, struct file *);
55904+ ulong1 = va_arg(ap, unsigned long);
55905+ ulong2 = va_arg(ap, unsigned long);
55906+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
55907+ break;
55908+ case GR_PTRACE:
55909+ task = va_arg(ap, struct task_struct *);
55910+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
55911+ break;
55912+ case GR_RESOURCE:
55913+ task = va_arg(ap, struct task_struct *);
55914+ cred = __task_cred(task);
55915+ pcred = __task_cred(task->real_parent);
55916+ ulong1 = va_arg(ap, unsigned long);
55917+ str1 = va_arg(ap, char *);
55918+ ulong2 = va_arg(ap, unsigned long);
55919+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55920+ break;
55921+ case GR_CAP:
55922+ task = va_arg(ap, struct task_struct *);
55923+ cred = __task_cred(task);
55924+ pcred = __task_cred(task->real_parent);
55925+ str1 = va_arg(ap, char *);
55926+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55927+ break;
55928+ case GR_SIG:
55929+ str1 = va_arg(ap, char *);
55930+ voidptr = va_arg(ap, void *);
55931+ gr_log_middle_varargs(audit, msg, str1, voidptr);
55932+ break;
55933+ case GR_SIG2:
55934+ task = va_arg(ap, struct task_struct *);
55935+ cred = __task_cred(task);
55936+ pcred = __task_cred(task->real_parent);
55937+ num1 = va_arg(ap, int);
55938+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55939+ break;
55940+ case GR_CRASH1:
55941+ task = va_arg(ap, struct task_struct *);
55942+ cred = __task_cred(task);
55943+ pcred = __task_cred(task->real_parent);
55944+ ulong1 = va_arg(ap, unsigned long);
55945+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
55946+ break;
55947+ case GR_CRASH2:
55948+ task = va_arg(ap, struct task_struct *);
55949+ cred = __task_cred(task);
55950+ pcred = __task_cred(task->real_parent);
55951+ ulong1 = va_arg(ap, unsigned long);
55952+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
55953+ break;
55954+ case GR_RWXMAP:
55955+ file = va_arg(ap, struct file *);
55956+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
55957+ break;
55958+ case GR_PSACCT:
55959+ {
55960+ unsigned int wday, cday;
55961+ __u8 whr, chr;
55962+ __u8 wmin, cmin;
55963+ __u8 wsec, csec;
55964+ char cur_tty[64] = { 0 };
55965+ char parent_tty[64] = { 0 };
55966+
55967+ task = va_arg(ap, struct task_struct *);
55968+ wday = va_arg(ap, unsigned int);
55969+ cday = va_arg(ap, unsigned int);
55970+ whr = va_arg(ap, int);
55971+ chr = va_arg(ap, int);
55972+ wmin = va_arg(ap, int);
55973+ cmin = va_arg(ap, int);
55974+ wsec = va_arg(ap, int);
55975+ csec = va_arg(ap, int);
55976+ ulong1 = va_arg(ap, unsigned long);
55977+ cred = __task_cred(task);
55978+ pcred = __task_cred(task->real_parent);
55979+
55980+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55981+ }
55982+ break;
55983+ default:
55984+ gr_log_middle(audit, msg, ap);
55985+ }
55986+ va_end(ap);
55987+ // these don't need DEFAULTSECARGS printed on the end
55988+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
55989+ gr_log_end(audit, 0);
55990+ else
55991+ gr_log_end(audit, 1);
55992+ END_LOCKS(audit);
55993+}
55994diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
55995new file mode 100644
55996index 0000000..f536303
55997--- /dev/null
55998+++ b/grsecurity/grsec_mem.c
55999@@ -0,0 +1,40 @@
56000+#include <linux/kernel.h>
56001+#include <linux/sched.h>
56002+#include <linux/mm.h>
56003+#include <linux/mman.h>
56004+#include <linux/grinternal.h>
56005+
56006+void
56007+gr_handle_ioperm(void)
56008+{
56009+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
56010+ return;
56011+}
56012+
56013+void
56014+gr_handle_iopl(void)
56015+{
56016+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
56017+ return;
56018+}
56019+
56020+void
56021+gr_handle_mem_readwrite(u64 from, u64 to)
56022+{
56023+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
56024+ return;
56025+}
56026+
56027+void
56028+gr_handle_vm86(void)
56029+{
56030+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
56031+ return;
56032+}
56033+
56034+void
56035+gr_log_badprocpid(const char *entry)
56036+{
56037+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
56038+ return;
56039+}
56040diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
56041new file mode 100644
56042index 0000000..2131422
56043--- /dev/null
56044+++ b/grsecurity/grsec_mount.c
56045@@ -0,0 +1,62 @@
56046+#include <linux/kernel.h>
56047+#include <linux/sched.h>
56048+#include <linux/mount.h>
56049+#include <linux/grsecurity.h>
56050+#include <linux/grinternal.h>
56051+
56052+void
56053+gr_log_remount(const char *devname, const int retval)
56054+{
56055+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56056+ if (grsec_enable_mount && (retval >= 0))
56057+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
56058+#endif
56059+ return;
56060+}
56061+
56062+void
56063+gr_log_unmount(const char *devname, const int retval)
56064+{
56065+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56066+ if (grsec_enable_mount && (retval >= 0))
56067+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
56068+#endif
56069+ return;
56070+}
56071+
56072+void
56073+gr_log_mount(const char *from, const char *to, const int retval)
56074+{
56075+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56076+ if (grsec_enable_mount && (retval >= 0))
56077+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
56078+#endif
56079+ return;
56080+}
56081+
56082+int
56083+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
56084+{
56085+#ifdef CONFIG_GRKERNSEC_ROFS
56086+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
56087+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
56088+ return -EPERM;
56089+ } else
56090+ return 0;
56091+#endif
56092+ return 0;
56093+}
56094+
56095+int
56096+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
56097+{
56098+#ifdef CONFIG_GRKERNSEC_ROFS
56099+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
56100+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
56101+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
56102+ return -EPERM;
56103+ } else
56104+ return 0;
56105+#endif
56106+ return 0;
56107+}
56108diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
56109new file mode 100644
56110index 0000000..a3b12a0
56111--- /dev/null
56112+++ b/grsecurity/grsec_pax.c
56113@@ -0,0 +1,36 @@
56114+#include <linux/kernel.h>
56115+#include <linux/sched.h>
56116+#include <linux/mm.h>
56117+#include <linux/file.h>
56118+#include <linux/grinternal.h>
56119+#include <linux/grsecurity.h>
56120+
56121+void
56122+gr_log_textrel(struct vm_area_struct * vma)
56123+{
56124+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
56125+ if (grsec_enable_audit_textrel)
56126+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
56127+#endif
56128+ return;
56129+}
56130+
56131+void
56132+gr_log_rwxmmap(struct file *file)
56133+{
56134+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56135+ if (grsec_enable_log_rwxmaps)
56136+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
56137+#endif
56138+ return;
56139+}
56140+
56141+void
56142+gr_log_rwxmprotect(struct file *file)
56143+{
56144+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56145+ if (grsec_enable_log_rwxmaps)
56146+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
56147+#endif
56148+ return;
56149+}
56150diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
56151new file mode 100644
56152index 0000000..f7f29aa
56153--- /dev/null
56154+++ b/grsecurity/grsec_ptrace.c
56155@@ -0,0 +1,30 @@
56156+#include <linux/kernel.h>
56157+#include <linux/sched.h>
56158+#include <linux/grinternal.h>
56159+#include <linux/security.h>
56160+
56161+void
56162+gr_audit_ptrace(struct task_struct *task)
56163+{
56164+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
56165+ if (grsec_enable_audit_ptrace)
56166+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
56167+#endif
56168+ return;
56169+}
56170+
56171+int
56172+gr_ptrace_readexec(struct file *file, int unsafe_flags)
56173+{
56174+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
56175+ const struct dentry *dentry = file->f_path.dentry;
56176+ const struct vfsmount *mnt = file->f_path.mnt;
56177+
56178+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
56179+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
56180+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
56181+ return -EACCES;
56182+ }
56183+#endif
56184+ return 0;
56185+}
56186diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
56187new file mode 100644
56188index 0000000..7a5b2de
56189--- /dev/null
56190+++ b/grsecurity/grsec_sig.c
56191@@ -0,0 +1,207 @@
56192+#include <linux/kernel.h>
56193+#include <linux/sched.h>
56194+#include <linux/delay.h>
56195+#include <linux/grsecurity.h>
56196+#include <linux/grinternal.h>
56197+#include <linux/hardirq.h>
56198+
56199+char *signames[] = {
56200+ [SIGSEGV] = "Segmentation fault",
56201+ [SIGILL] = "Illegal instruction",
56202+ [SIGABRT] = "Abort",
56203+ [SIGBUS] = "Invalid alignment/Bus error"
56204+};
56205+
56206+void
56207+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
56208+{
56209+#ifdef CONFIG_GRKERNSEC_SIGNAL
56210+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
56211+ (sig == SIGABRT) || (sig == SIGBUS))) {
56212+ if (t->pid == current->pid) {
56213+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
56214+ } else {
56215+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
56216+ }
56217+ }
56218+#endif
56219+ return;
56220+}
56221+
56222+int
56223+gr_handle_signal(const struct task_struct *p, const int sig)
56224+{
56225+#ifdef CONFIG_GRKERNSEC
56226+ /* ignore the 0 signal for protected task checks */
56227+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
56228+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
56229+ return -EPERM;
56230+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
56231+ return -EPERM;
56232+ }
56233+#endif
56234+ return 0;
56235+}
56236+
56237+#ifdef CONFIG_GRKERNSEC
56238+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
56239+
56240+int gr_fake_force_sig(int sig, struct task_struct *t)
56241+{
56242+ unsigned long int flags;
56243+ int ret, blocked, ignored;
56244+ struct k_sigaction *action;
56245+
56246+ spin_lock_irqsave(&t->sighand->siglock, flags);
56247+ action = &t->sighand->action[sig-1];
56248+ ignored = action->sa.sa_handler == SIG_IGN;
56249+ blocked = sigismember(&t->blocked, sig);
56250+ if (blocked || ignored) {
56251+ action->sa.sa_handler = SIG_DFL;
56252+ if (blocked) {
56253+ sigdelset(&t->blocked, sig);
56254+ recalc_sigpending_and_wake(t);
56255+ }
56256+ }
56257+ if (action->sa.sa_handler == SIG_DFL)
56258+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
56259+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
56260+
56261+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
56262+
56263+ return ret;
56264+}
56265+#endif
56266+
56267+#ifdef CONFIG_GRKERNSEC_BRUTE
56268+#define GR_USER_BAN_TIME (15 * 60)
56269+
56270+static int __get_dumpable(unsigned long mm_flags)
56271+{
56272+ int ret;
56273+
56274+ ret = mm_flags & MMF_DUMPABLE_MASK;
56275+ return (ret >= 2) ? 2 : ret;
56276+}
56277+#endif
56278+
56279+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
56280+{
56281+#ifdef CONFIG_GRKERNSEC_BRUTE
56282+ uid_t uid = 0;
56283+
56284+ if (!grsec_enable_brute)
56285+ return;
56286+
56287+ rcu_read_lock();
56288+ read_lock(&tasklist_lock);
56289+ read_lock(&grsec_exec_file_lock);
56290+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
56291+ p->real_parent->brute = 1;
56292+ else {
56293+ const struct cred *cred = __task_cred(p), *cred2;
56294+ struct task_struct *tsk, *tsk2;
56295+
56296+ if (!__get_dumpable(mm_flags) && cred->uid) {
56297+ struct user_struct *user;
56298+
56299+ uid = cred->uid;
56300+
56301+ /* this is put upon execution past expiration */
56302+ user = find_user(uid);
56303+ if (user == NULL)
56304+ goto unlock;
56305+ user->banned = 1;
56306+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
56307+ if (user->ban_expires == ~0UL)
56308+ user->ban_expires--;
56309+
56310+ do_each_thread(tsk2, tsk) {
56311+ cred2 = __task_cred(tsk);
56312+ if (tsk != p && cred2->uid == uid)
56313+ gr_fake_force_sig(SIGKILL, tsk);
56314+ } while_each_thread(tsk2, tsk);
56315+ }
56316+ }
56317+unlock:
56318+ read_unlock(&grsec_exec_file_lock);
56319+ read_unlock(&tasklist_lock);
56320+ rcu_read_unlock();
56321+
56322+ if (uid)
56323+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
56324+
56325+#endif
56326+ return;
56327+}
56328+
56329+void gr_handle_brute_check(void)
56330+{
56331+#ifdef CONFIG_GRKERNSEC_BRUTE
56332+ if (current->brute)
56333+ msleep(30 * 1000);
56334+#endif
56335+ return;
56336+}
56337+
56338+void gr_handle_kernel_exploit(void)
56339+{
56340+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
56341+ const struct cred *cred;
56342+ struct task_struct *tsk, *tsk2;
56343+ struct user_struct *user;
56344+ uid_t uid;
56345+
56346+ if (in_irq() || in_serving_softirq() || in_nmi())
56347+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
56348+
56349+ uid = current_uid();
56350+
56351+ if (uid == 0)
56352+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
56353+ else {
56354+ /* kill all the processes of this user, hold a reference
56355+ to their creds struct, and prevent them from creating
56356+ another process until system reset
56357+ */
56358+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
56359+ /* we intentionally leak this ref */
56360+ user = get_uid(current->cred->user);
56361+ if (user) {
56362+ user->banned = 1;
56363+ user->ban_expires = ~0UL;
56364+ }
56365+
56366+ read_lock(&tasklist_lock);
56367+ do_each_thread(tsk2, tsk) {
56368+ cred = __task_cred(tsk);
56369+ if (cred->uid == uid)
56370+ gr_fake_force_sig(SIGKILL, tsk);
56371+ } while_each_thread(tsk2, tsk);
56372+ read_unlock(&tasklist_lock);
56373+ }
56374+#endif
56375+}
56376+
56377+int __gr_process_user_ban(struct user_struct *user)
56378+{
56379+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56380+ if (unlikely(user->banned)) {
56381+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
56382+ user->banned = 0;
56383+ user->ban_expires = 0;
56384+ free_uid(user);
56385+ } else
56386+ return -EPERM;
56387+ }
56388+#endif
56389+ return 0;
56390+}
56391+
56392+int gr_process_user_ban(void)
56393+{
56394+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56395+ return __gr_process_user_ban(current->cred->user);
56396+#endif
56397+ return 0;
56398+}
56399diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
56400new file mode 100644
56401index 0000000..4030d57
56402--- /dev/null
56403+++ b/grsecurity/grsec_sock.c
56404@@ -0,0 +1,244 @@
56405+#include <linux/kernel.h>
56406+#include <linux/module.h>
56407+#include <linux/sched.h>
56408+#include <linux/file.h>
56409+#include <linux/net.h>
56410+#include <linux/in.h>
56411+#include <linux/ip.h>
56412+#include <net/sock.h>
56413+#include <net/inet_sock.h>
56414+#include <linux/grsecurity.h>
56415+#include <linux/grinternal.h>
56416+#include <linux/gracl.h>
56417+
56418+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
56419+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
56420+
56421+EXPORT_SYMBOL(gr_search_udp_recvmsg);
56422+EXPORT_SYMBOL(gr_search_udp_sendmsg);
56423+
56424+#ifdef CONFIG_UNIX_MODULE
56425+EXPORT_SYMBOL(gr_acl_handle_unix);
56426+EXPORT_SYMBOL(gr_acl_handle_mknod);
56427+EXPORT_SYMBOL(gr_handle_chroot_unix);
56428+EXPORT_SYMBOL(gr_handle_create);
56429+#endif
56430+
56431+#ifdef CONFIG_GRKERNSEC
56432+#define gr_conn_table_size 32749
56433+struct conn_table_entry {
56434+ struct conn_table_entry *next;
56435+ struct signal_struct *sig;
56436+};
56437+
56438+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
56439+DEFINE_SPINLOCK(gr_conn_table_lock);
56440+
56441+extern const char * gr_socktype_to_name(unsigned char type);
56442+extern const char * gr_proto_to_name(unsigned char proto);
56443+extern const char * gr_sockfamily_to_name(unsigned char family);
56444+
56445+static __inline__ int
56446+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
56447+{
56448+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
56449+}
56450+
56451+static __inline__ int
56452+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
56453+ __u16 sport, __u16 dport)
56454+{
56455+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
56456+ sig->gr_sport == sport && sig->gr_dport == dport))
56457+ return 1;
56458+ else
56459+ return 0;
56460+}
56461+
56462+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
56463+{
56464+ struct conn_table_entry **match;
56465+ unsigned int index;
56466+
56467+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56468+ sig->gr_sport, sig->gr_dport,
56469+ gr_conn_table_size);
56470+
56471+ newent->sig = sig;
56472+
56473+ match = &gr_conn_table[index];
56474+ newent->next = *match;
56475+ *match = newent;
56476+
56477+ return;
56478+}
56479+
56480+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
56481+{
56482+ struct conn_table_entry *match, *last = NULL;
56483+ unsigned int index;
56484+
56485+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56486+ sig->gr_sport, sig->gr_dport,
56487+ gr_conn_table_size);
56488+
56489+ match = gr_conn_table[index];
56490+ while (match && !conn_match(match->sig,
56491+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
56492+ sig->gr_dport)) {
56493+ last = match;
56494+ match = match->next;
56495+ }
56496+
56497+ if (match) {
56498+ if (last)
56499+ last->next = match->next;
56500+ else
56501+ gr_conn_table[index] = NULL;
56502+ kfree(match);
56503+ }
56504+
56505+ return;
56506+}
56507+
56508+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
56509+ __u16 sport, __u16 dport)
56510+{
56511+ struct conn_table_entry *match;
56512+ unsigned int index;
56513+
56514+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
56515+
56516+ match = gr_conn_table[index];
56517+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
56518+ match = match->next;
56519+
56520+ if (match)
56521+ return match->sig;
56522+ else
56523+ return NULL;
56524+}
56525+
56526+#endif
56527+
56528+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
56529+{
56530+#ifdef CONFIG_GRKERNSEC
56531+ struct signal_struct *sig = task->signal;
56532+ struct conn_table_entry *newent;
56533+
56534+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
56535+ if (newent == NULL)
56536+ return;
56537+ /* no bh lock needed since we are called with bh disabled */
56538+ spin_lock(&gr_conn_table_lock);
56539+ gr_del_task_from_ip_table_nolock(sig);
56540+ sig->gr_saddr = inet->inet_rcv_saddr;
56541+ sig->gr_daddr = inet->inet_daddr;
56542+ sig->gr_sport = inet->inet_sport;
56543+ sig->gr_dport = inet->inet_dport;
56544+ gr_add_to_task_ip_table_nolock(sig, newent);
56545+ spin_unlock(&gr_conn_table_lock);
56546+#endif
56547+ return;
56548+}
56549+
56550+void gr_del_task_from_ip_table(struct task_struct *task)
56551+{
56552+#ifdef CONFIG_GRKERNSEC
56553+ spin_lock_bh(&gr_conn_table_lock);
56554+ gr_del_task_from_ip_table_nolock(task->signal);
56555+ spin_unlock_bh(&gr_conn_table_lock);
56556+#endif
56557+ return;
56558+}
56559+
56560+void
56561+gr_attach_curr_ip(const struct sock *sk)
56562+{
56563+#ifdef CONFIG_GRKERNSEC
56564+ struct signal_struct *p, *set;
56565+ const struct inet_sock *inet = inet_sk(sk);
56566+
56567+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
56568+ return;
56569+
56570+ set = current->signal;
56571+
56572+ spin_lock_bh(&gr_conn_table_lock);
56573+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
56574+ inet->inet_dport, inet->inet_sport);
56575+ if (unlikely(p != NULL)) {
56576+ set->curr_ip = p->curr_ip;
56577+ set->used_accept = 1;
56578+ gr_del_task_from_ip_table_nolock(p);
56579+ spin_unlock_bh(&gr_conn_table_lock);
56580+ return;
56581+ }
56582+ spin_unlock_bh(&gr_conn_table_lock);
56583+
56584+ set->curr_ip = inet->inet_daddr;
56585+ set->used_accept = 1;
56586+#endif
56587+ return;
56588+}
56589+
56590+int
56591+gr_handle_sock_all(const int family, const int type, const int protocol)
56592+{
56593+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56594+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
56595+ (family != AF_UNIX)) {
56596+ if (family == AF_INET)
56597+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
56598+ else
56599+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
56600+ return -EACCES;
56601+ }
56602+#endif
56603+ return 0;
56604+}
56605+
56606+int
56607+gr_handle_sock_server(const struct sockaddr *sck)
56608+{
56609+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56610+ if (grsec_enable_socket_server &&
56611+ in_group_p(grsec_socket_server_gid) &&
56612+ sck && (sck->sa_family != AF_UNIX) &&
56613+ (sck->sa_family != AF_LOCAL)) {
56614+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56615+ return -EACCES;
56616+ }
56617+#endif
56618+ return 0;
56619+}
56620+
56621+int
56622+gr_handle_sock_server_other(const struct sock *sck)
56623+{
56624+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56625+ if (grsec_enable_socket_server &&
56626+ in_group_p(grsec_socket_server_gid) &&
56627+ sck && (sck->sk_family != AF_UNIX) &&
56628+ (sck->sk_family != AF_LOCAL)) {
56629+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56630+ return -EACCES;
56631+ }
56632+#endif
56633+ return 0;
56634+}
56635+
56636+int
56637+gr_handle_sock_client(const struct sockaddr *sck)
56638+{
56639+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56640+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
56641+ sck && (sck->sa_family != AF_UNIX) &&
56642+ (sck->sa_family != AF_LOCAL)) {
56643+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
56644+ return -EACCES;
56645+ }
56646+#endif
56647+ return 0;
56648+}
56649diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
56650new file mode 100644
56651index 0000000..a1aedd7
56652--- /dev/null
56653+++ b/grsecurity/grsec_sysctl.c
56654@@ -0,0 +1,451 @@
56655+#include <linux/kernel.h>
56656+#include <linux/sched.h>
56657+#include <linux/sysctl.h>
56658+#include <linux/grsecurity.h>
56659+#include <linux/grinternal.h>
56660+
56661+int
56662+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
56663+{
56664+#ifdef CONFIG_GRKERNSEC_SYSCTL
56665+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
56666+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
56667+ return -EACCES;
56668+ }
56669+#endif
56670+ return 0;
56671+}
56672+
56673+#ifdef CONFIG_GRKERNSEC_ROFS
56674+static int __maybe_unused one = 1;
56675+#endif
56676+
56677+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
56678+struct ctl_table grsecurity_table[] = {
56679+#ifdef CONFIG_GRKERNSEC_SYSCTL
56680+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
56681+#ifdef CONFIG_GRKERNSEC_IO
56682+ {
56683+ .procname = "disable_priv_io",
56684+ .data = &grsec_disable_privio,
56685+ .maxlen = sizeof(int),
56686+ .mode = 0600,
56687+ .proc_handler = &proc_dointvec,
56688+ },
56689+#endif
56690+#endif
56691+#ifdef CONFIG_GRKERNSEC_LINK
56692+ {
56693+ .procname = "linking_restrictions",
56694+ .data = &grsec_enable_link,
56695+ .maxlen = sizeof(int),
56696+ .mode = 0600,
56697+ .proc_handler = &proc_dointvec,
56698+ },
56699+#endif
56700+#ifdef CONFIG_GRKERNSEC_BRUTE
56701+ {
56702+ .procname = "deter_bruteforce",
56703+ .data = &grsec_enable_brute,
56704+ .maxlen = sizeof(int),
56705+ .mode = 0600,
56706+ .proc_handler = &proc_dointvec,
56707+ },
56708+#endif
56709+#ifdef CONFIG_GRKERNSEC_FIFO
56710+ {
56711+ .procname = "fifo_restrictions",
56712+ .data = &grsec_enable_fifo,
56713+ .maxlen = sizeof(int),
56714+ .mode = 0600,
56715+ .proc_handler = &proc_dointvec,
56716+ },
56717+#endif
56718+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
56719+ {
56720+ .procname = "ptrace_readexec",
56721+ .data = &grsec_enable_ptrace_readexec,
56722+ .maxlen = sizeof(int),
56723+ .mode = 0600,
56724+ .proc_handler = &proc_dointvec,
56725+ },
56726+#endif
56727+#ifdef CONFIG_GRKERNSEC_SETXID
56728+ {
56729+ .procname = "consistent_setxid",
56730+ .data = &grsec_enable_setxid,
56731+ .maxlen = sizeof(int),
56732+ .mode = 0600,
56733+ .proc_handler = &proc_dointvec,
56734+ },
56735+#endif
56736+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
56737+ {
56738+ .procname = "ip_blackhole",
56739+ .data = &grsec_enable_blackhole,
56740+ .maxlen = sizeof(int),
56741+ .mode = 0600,
56742+ .proc_handler = &proc_dointvec,
56743+ },
56744+ {
56745+ .procname = "lastack_retries",
56746+ .data = &grsec_lastack_retries,
56747+ .maxlen = sizeof(int),
56748+ .mode = 0600,
56749+ .proc_handler = &proc_dointvec,
56750+ },
56751+#endif
56752+#ifdef CONFIG_GRKERNSEC_EXECLOG
56753+ {
56754+ .procname = "exec_logging",
56755+ .data = &grsec_enable_execlog,
56756+ .maxlen = sizeof(int),
56757+ .mode = 0600,
56758+ .proc_handler = &proc_dointvec,
56759+ },
56760+#endif
56761+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56762+ {
56763+ .procname = "rwxmap_logging",
56764+ .data = &grsec_enable_log_rwxmaps,
56765+ .maxlen = sizeof(int),
56766+ .mode = 0600,
56767+ .proc_handler = &proc_dointvec,
56768+ },
56769+#endif
56770+#ifdef CONFIG_GRKERNSEC_SIGNAL
56771+ {
56772+ .procname = "signal_logging",
56773+ .data = &grsec_enable_signal,
56774+ .maxlen = sizeof(int),
56775+ .mode = 0600,
56776+ .proc_handler = &proc_dointvec,
56777+ },
56778+#endif
56779+#ifdef CONFIG_GRKERNSEC_FORKFAIL
56780+ {
56781+ .procname = "forkfail_logging",
56782+ .data = &grsec_enable_forkfail,
56783+ .maxlen = sizeof(int),
56784+ .mode = 0600,
56785+ .proc_handler = &proc_dointvec,
56786+ },
56787+#endif
56788+#ifdef CONFIG_GRKERNSEC_TIME
56789+ {
56790+ .procname = "timechange_logging",
56791+ .data = &grsec_enable_time,
56792+ .maxlen = sizeof(int),
56793+ .mode = 0600,
56794+ .proc_handler = &proc_dointvec,
56795+ },
56796+#endif
56797+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56798+ {
56799+ .procname = "chroot_deny_shmat",
56800+ .data = &grsec_enable_chroot_shmat,
56801+ .maxlen = sizeof(int),
56802+ .mode = 0600,
56803+ .proc_handler = &proc_dointvec,
56804+ },
56805+#endif
56806+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56807+ {
56808+ .procname = "chroot_deny_unix",
56809+ .data = &grsec_enable_chroot_unix,
56810+ .maxlen = sizeof(int),
56811+ .mode = 0600,
56812+ .proc_handler = &proc_dointvec,
56813+ },
56814+#endif
56815+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56816+ {
56817+ .procname = "chroot_deny_mount",
56818+ .data = &grsec_enable_chroot_mount,
56819+ .maxlen = sizeof(int),
56820+ .mode = 0600,
56821+ .proc_handler = &proc_dointvec,
56822+ },
56823+#endif
56824+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56825+ {
56826+ .procname = "chroot_deny_fchdir",
56827+ .data = &grsec_enable_chroot_fchdir,
56828+ .maxlen = sizeof(int),
56829+ .mode = 0600,
56830+ .proc_handler = &proc_dointvec,
56831+ },
56832+#endif
56833+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56834+ {
56835+ .procname = "chroot_deny_chroot",
56836+ .data = &grsec_enable_chroot_double,
56837+ .maxlen = sizeof(int),
56838+ .mode = 0600,
56839+ .proc_handler = &proc_dointvec,
56840+ },
56841+#endif
56842+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56843+ {
56844+ .procname = "chroot_deny_pivot",
56845+ .data = &grsec_enable_chroot_pivot,
56846+ .maxlen = sizeof(int),
56847+ .mode = 0600,
56848+ .proc_handler = &proc_dointvec,
56849+ },
56850+#endif
56851+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56852+ {
56853+ .procname = "chroot_enforce_chdir",
56854+ .data = &grsec_enable_chroot_chdir,
56855+ .maxlen = sizeof(int),
56856+ .mode = 0600,
56857+ .proc_handler = &proc_dointvec,
56858+ },
56859+#endif
56860+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56861+ {
56862+ .procname = "chroot_deny_chmod",
56863+ .data = &grsec_enable_chroot_chmod,
56864+ .maxlen = sizeof(int),
56865+ .mode = 0600,
56866+ .proc_handler = &proc_dointvec,
56867+ },
56868+#endif
56869+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56870+ {
56871+ .procname = "chroot_deny_mknod",
56872+ .data = &grsec_enable_chroot_mknod,
56873+ .maxlen = sizeof(int),
56874+ .mode = 0600,
56875+ .proc_handler = &proc_dointvec,
56876+ },
56877+#endif
56878+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56879+ {
56880+ .procname = "chroot_restrict_nice",
56881+ .data = &grsec_enable_chroot_nice,
56882+ .maxlen = sizeof(int),
56883+ .mode = 0600,
56884+ .proc_handler = &proc_dointvec,
56885+ },
56886+#endif
56887+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56888+ {
56889+ .procname = "chroot_execlog",
56890+ .data = &grsec_enable_chroot_execlog,
56891+ .maxlen = sizeof(int),
56892+ .mode = 0600,
56893+ .proc_handler = &proc_dointvec,
56894+ },
56895+#endif
56896+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56897+ {
56898+ .procname = "chroot_caps",
56899+ .data = &grsec_enable_chroot_caps,
56900+ .maxlen = sizeof(int),
56901+ .mode = 0600,
56902+ .proc_handler = &proc_dointvec,
56903+ },
56904+#endif
56905+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56906+ {
56907+ .procname = "chroot_deny_sysctl",
56908+ .data = &grsec_enable_chroot_sysctl,
56909+ .maxlen = sizeof(int),
56910+ .mode = 0600,
56911+ .proc_handler = &proc_dointvec,
56912+ },
56913+#endif
56914+#ifdef CONFIG_GRKERNSEC_TPE
56915+ {
56916+ .procname = "tpe",
56917+ .data = &grsec_enable_tpe,
56918+ .maxlen = sizeof(int),
56919+ .mode = 0600,
56920+ .proc_handler = &proc_dointvec,
56921+ },
56922+ {
56923+ .procname = "tpe_gid",
56924+ .data = &grsec_tpe_gid,
56925+ .maxlen = sizeof(int),
56926+ .mode = 0600,
56927+ .proc_handler = &proc_dointvec,
56928+ },
56929+#endif
56930+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56931+ {
56932+ .procname = "tpe_invert",
56933+ .data = &grsec_enable_tpe_invert,
56934+ .maxlen = sizeof(int),
56935+ .mode = 0600,
56936+ .proc_handler = &proc_dointvec,
56937+ },
56938+#endif
56939+#ifdef CONFIG_GRKERNSEC_TPE_ALL
56940+ {
56941+ .procname = "tpe_restrict_all",
56942+ .data = &grsec_enable_tpe_all,
56943+ .maxlen = sizeof(int),
56944+ .mode = 0600,
56945+ .proc_handler = &proc_dointvec,
56946+ },
56947+#endif
56948+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56949+ {
56950+ .procname = "socket_all",
56951+ .data = &grsec_enable_socket_all,
56952+ .maxlen = sizeof(int),
56953+ .mode = 0600,
56954+ .proc_handler = &proc_dointvec,
56955+ },
56956+ {
56957+ .procname = "socket_all_gid",
56958+ .data = &grsec_socket_all_gid,
56959+ .maxlen = sizeof(int),
56960+ .mode = 0600,
56961+ .proc_handler = &proc_dointvec,
56962+ },
56963+#endif
56964+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56965+ {
56966+ .procname = "socket_client",
56967+ .data = &grsec_enable_socket_client,
56968+ .maxlen = sizeof(int),
56969+ .mode = 0600,
56970+ .proc_handler = &proc_dointvec,
56971+ },
56972+ {
56973+ .procname = "socket_client_gid",
56974+ .data = &grsec_socket_client_gid,
56975+ .maxlen = sizeof(int),
56976+ .mode = 0600,
56977+ .proc_handler = &proc_dointvec,
56978+ },
56979+#endif
56980+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56981+ {
56982+ .procname = "socket_server",
56983+ .data = &grsec_enable_socket_server,
56984+ .maxlen = sizeof(int),
56985+ .mode = 0600,
56986+ .proc_handler = &proc_dointvec,
56987+ },
56988+ {
56989+ .procname = "socket_server_gid",
56990+ .data = &grsec_socket_server_gid,
56991+ .maxlen = sizeof(int),
56992+ .mode = 0600,
56993+ .proc_handler = &proc_dointvec,
56994+ },
56995+#endif
56996+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
56997+ {
56998+ .procname = "audit_group",
56999+ .data = &grsec_enable_group,
57000+ .maxlen = sizeof(int),
57001+ .mode = 0600,
57002+ .proc_handler = &proc_dointvec,
57003+ },
57004+ {
57005+ .procname = "audit_gid",
57006+ .data = &grsec_audit_gid,
57007+ .maxlen = sizeof(int),
57008+ .mode = 0600,
57009+ .proc_handler = &proc_dointvec,
57010+ },
57011+#endif
57012+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57013+ {
57014+ .procname = "audit_chdir",
57015+ .data = &grsec_enable_chdir,
57016+ .maxlen = sizeof(int),
57017+ .mode = 0600,
57018+ .proc_handler = &proc_dointvec,
57019+ },
57020+#endif
57021+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57022+ {
57023+ .procname = "audit_mount",
57024+ .data = &grsec_enable_mount,
57025+ .maxlen = sizeof(int),
57026+ .mode = 0600,
57027+ .proc_handler = &proc_dointvec,
57028+ },
57029+#endif
57030+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57031+ {
57032+ .procname = "audit_textrel",
57033+ .data = &grsec_enable_audit_textrel,
57034+ .maxlen = sizeof(int),
57035+ .mode = 0600,
57036+ .proc_handler = &proc_dointvec,
57037+ },
57038+#endif
57039+#ifdef CONFIG_GRKERNSEC_DMESG
57040+ {
57041+ .procname = "dmesg",
57042+ .data = &grsec_enable_dmesg,
57043+ .maxlen = sizeof(int),
57044+ .mode = 0600,
57045+ .proc_handler = &proc_dointvec,
57046+ },
57047+#endif
57048+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57049+ {
57050+ .procname = "chroot_findtask",
57051+ .data = &grsec_enable_chroot_findtask,
57052+ .maxlen = sizeof(int),
57053+ .mode = 0600,
57054+ .proc_handler = &proc_dointvec,
57055+ },
57056+#endif
57057+#ifdef CONFIG_GRKERNSEC_RESLOG
57058+ {
57059+ .procname = "resource_logging",
57060+ .data = &grsec_resource_logging,
57061+ .maxlen = sizeof(int),
57062+ .mode = 0600,
57063+ .proc_handler = &proc_dointvec,
57064+ },
57065+#endif
57066+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57067+ {
57068+ .procname = "audit_ptrace",
57069+ .data = &grsec_enable_audit_ptrace,
57070+ .maxlen = sizeof(int),
57071+ .mode = 0600,
57072+ .proc_handler = &proc_dointvec,
57073+ },
57074+#endif
57075+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
57076+ {
57077+ .procname = "harden_ptrace",
57078+ .data = &grsec_enable_harden_ptrace,
57079+ .maxlen = sizeof(int),
57080+ .mode = 0600,
57081+ .proc_handler = &proc_dointvec,
57082+ },
57083+#endif
57084+ {
57085+ .procname = "grsec_lock",
57086+ .data = &grsec_lock,
57087+ .maxlen = sizeof(int),
57088+ .mode = 0600,
57089+ .proc_handler = &proc_dointvec,
57090+ },
57091+#endif
57092+#ifdef CONFIG_GRKERNSEC_ROFS
57093+ {
57094+ .procname = "romount_protect",
57095+ .data = &grsec_enable_rofs,
57096+ .maxlen = sizeof(int),
57097+ .mode = 0600,
57098+ .proc_handler = &proc_dointvec_minmax,
57099+ .extra1 = &one,
57100+ .extra2 = &one,
57101+ },
57102+#endif
57103+ { }
57104+};
57105+#endif
57106diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
57107new file mode 100644
57108index 0000000..0dc13c3
57109--- /dev/null
57110+++ b/grsecurity/grsec_time.c
57111@@ -0,0 +1,16 @@
57112+#include <linux/kernel.h>
57113+#include <linux/sched.h>
57114+#include <linux/grinternal.h>
57115+#include <linux/module.h>
57116+
57117+void
57118+gr_log_timechange(void)
57119+{
57120+#ifdef CONFIG_GRKERNSEC_TIME
57121+ if (grsec_enable_time)
57122+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
57123+#endif
57124+ return;
57125+}
57126+
57127+EXPORT_SYMBOL(gr_log_timechange);
57128diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
57129new file mode 100644
57130index 0000000..07e0dc0
57131--- /dev/null
57132+++ b/grsecurity/grsec_tpe.c
57133@@ -0,0 +1,73 @@
57134+#include <linux/kernel.h>
57135+#include <linux/sched.h>
57136+#include <linux/file.h>
57137+#include <linux/fs.h>
57138+#include <linux/grinternal.h>
57139+
57140+extern int gr_acl_tpe_check(void);
57141+
57142+int
57143+gr_tpe_allow(const struct file *file)
57144+{
57145+#ifdef CONFIG_GRKERNSEC
57146+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
57147+ const struct cred *cred = current_cred();
57148+ char *msg = NULL;
57149+ char *msg2 = NULL;
57150+
57151+ // never restrict root
57152+ if (!cred->uid)
57153+ return 1;
57154+
57155+ if (grsec_enable_tpe) {
57156+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
57157+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
57158+ msg = "not being in trusted group";
57159+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
57160+ msg = "being in untrusted group";
57161+#else
57162+ if (in_group_p(grsec_tpe_gid))
57163+ msg = "being in untrusted group";
57164+#endif
57165+ }
57166+ if (!msg && gr_acl_tpe_check())
57167+ msg = "being in untrusted role";
57168+
57169+ // not in any affected group/role
57170+ if (!msg)
57171+ goto next_check;
57172+
57173+ if (inode->i_uid)
57174+ msg2 = "file in non-root-owned directory";
57175+ else if (inode->i_mode & S_IWOTH)
57176+ msg2 = "file in world-writable directory";
57177+ else if (inode->i_mode & S_IWGRP)
57178+ msg2 = "file in group-writable directory";
57179+
57180+ if (msg && msg2) {
57181+ char fullmsg[70] = {0};
57182+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
57183+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
57184+ return 0;
57185+ }
57186+ msg = NULL;
57187+next_check:
57188+#ifdef CONFIG_GRKERNSEC_TPE_ALL
57189+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
57190+ return 1;
57191+
57192+ if (inode->i_uid && (inode->i_uid != cred->uid))
57193+ msg = "directory not owned by user";
57194+ else if (inode->i_mode & S_IWOTH)
57195+ msg = "file in world-writable directory";
57196+ else if (inode->i_mode & S_IWGRP)
57197+ msg = "file in group-writable directory";
57198+
57199+ if (msg) {
57200+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
57201+ return 0;
57202+ }
57203+#endif
57204+#endif
57205+ return 1;
57206+}
57207diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
57208new file mode 100644
57209index 0000000..9f7b1ac
57210--- /dev/null
57211+++ b/grsecurity/grsum.c
57212@@ -0,0 +1,61 @@
57213+#include <linux/err.h>
57214+#include <linux/kernel.h>
57215+#include <linux/sched.h>
57216+#include <linux/mm.h>
57217+#include <linux/scatterlist.h>
57218+#include <linux/crypto.h>
57219+#include <linux/gracl.h>
57220+
57221+
57222+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
57223+#error "crypto and sha256 must be built into the kernel"
57224+#endif
57225+
57226+int
57227+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
57228+{
57229+ char *p;
57230+ struct crypto_hash *tfm;
57231+ struct hash_desc desc;
57232+ struct scatterlist sg;
57233+ unsigned char temp_sum[GR_SHA_LEN];
57234+ volatile int retval = 0;
57235+ volatile int dummy = 0;
57236+ unsigned int i;
57237+
57238+ sg_init_table(&sg, 1);
57239+
57240+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
57241+ if (IS_ERR(tfm)) {
57242+ /* should never happen, since sha256 should be built in */
57243+ return 1;
57244+ }
57245+
57246+ desc.tfm = tfm;
57247+ desc.flags = 0;
57248+
57249+ crypto_hash_init(&desc);
57250+
57251+ p = salt;
57252+ sg_set_buf(&sg, p, GR_SALT_LEN);
57253+ crypto_hash_update(&desc, &sg, sg.length);
57254+
57255+ p = entry->pw;
57256+ sg_set_buf(&sg, p, strlen(p));
57257+
57258+ crypto_hash_update(&desc, &sg, sg.length);
57259+
57260+ crypto_hash_final(&desc, temp_sum);
57261+
57262+ memset(entry->pw, 0, GR_PW_LEN);
57263+
57264+ for (i = 0; i < GR_SHA_LEN; i++)
57265+ if (sum[i] != temp_sum[i])
57266+ retval = 1;
57267+ else
57268+ dummy = 1; // waste a cycle
57269+
57270+ crypto_free_hash(tfm);
57271+
57272+ return retval;
57273+}
57274diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
57275index 6cd5b64..f620d2d 100644
57276--- a/include/acpi/acpi_bus.h
57277+++ b/include/acpi/acpi_bus.h
57278@@ -107,7 +107,7 @@ struct acpi_device_ops {
57279 acpi_op_bind bind;
57280 acpi_op_unbind unbind;
57281 acpi_op_notify notify;
57282-};
57283+} __no_const;
57284
57285 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
57286
57287diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
57288index b7babf0..71e4e74 100644
57289--- a/include/asm-generic/atomic-long.h
57290+++ b/include/asm-generic/atomic-long.h
57291@@ -22,6 +22,12 @@
57292
57293 typedef atomic64_t atomic_long_t;
57294
57295+#ifdef CONFIG_PAX_REFCOUNT
57296+typedef atomic64_unchecked_t atomic_long_unchecked_t;
57297+#else
57298+typedef atomic64_t atomic_long_unchecked_t;
57299+#endif
57300+
57301 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
57302
57303 static inline long atomic_long_read(atomic_long_t *l)
57304@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
57305 return (long)atomic64_read(v);
57306 }
57307
57308+#ifdef CONFIG_PAX_REFCOUNT
57309+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57310+{
57311+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57312+
57313+ return (long)atomic64_read_unchecked(v);
57314+}
57315+#endif
57316+
57317 static inline void atomic_long_set(atomic_long_t *l, long i)
57318 {
57319 atomic64_t *v = (atomic64_t *)l;
57320@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
57321 atomic64_set(v, i);
57322 }
57323
57324+#ifdef CONFIG_PAX_REFCOUNT
57325+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57326+{
57327+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57328+
57329+ atomic64_set_unchecked(v, i);
57330+}
57331+#endif
57332+
57333 static inline void atomic_long_inc(atomic_long_t *l)
57334 {
57335 atomic64_t *v = (atomic64_t *)l;
57336@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
57337 atomic64_inc(v);
57338 }
57339
57340+#ifdef CONFIG_PAX_REFCOUNT
57341+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57342+{
57343+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57344+
57345+ atomic64_inc_unchecked(v);
57346+}
57347+#endif
57348+
57349 static inline void atomic_long_dec(atomic_long_t *l)
57350 {
57351 atomic64_t *v = (atomic64_t *)l;
57352@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
57353 atomic64_dec(v);
57354 }
57355
57356+#ifdef CONFIG_PAX_REFCOUNT
57357+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57358+{
57359+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57360+
57361+ atomic64_dec_unchecked(v);
57362+}
57363+#endif
57364+
57365 static inline void atomic_long_add(long i, atomic_long_t *l)
57366 {
57367 atomic64_t *v = (atomic64_t *)l;
57368@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
57369 atomic64_add(i, v);
57370 }
57371
57372+#ifdef CONFIG_PAX_REFCOUNT
57373+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57374+{
57375+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57376+
57377+ atomic64_add_unchecked(i, v);
57378+}
57379+#endif
57380+
57381 static inline void atomic_long_sub(long i, atomic_long_t *l)
57382 {
57383 atomic64_t *v = (atomic64_t *)l;
57384@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
57385 atomic64_sub(i, v);
57386 }
57387
57388+#ifdef CONFIG_PAX_REFCOUNT
57389+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57390+{
57391+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57392+
57393+ atomic64_sub_unchecked(i, v);
57394+}
57395+#endif
57396+
57397 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57398 {
57399 atomic64_t *v = (atomic64_t *)l;
57400@@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
57401 return (long)atomic64_inc_return(v);
57402 }
57403
57404+#ifdef CONFIG_PAX_REFCOUNT
57405+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57406+{
57407+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57408+
57409+ return (long)atomic64_inc_return_unchecked(v);
57410+}
57411+#endif
57412+
57413 static inline long atomic_long_dec_return(atomic_long_t *l)
57414 {
57415 atomic64_t *v = (atomic64_t *)l;
57416@@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
57417
57418 typedef atomic_t atomic_long_t;
57419
57420+#ifdef CONFIG_PAX_REFCOUNT
57421+typedef atomic_unchecked_t atomic_long_unchecked_t;
57422+#else
57423+typedef atomic_t atomic_long_unchecked_t;
57424+#endif
57425+
57426 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
57427 static inline long atomic_long_read(atomic_long_t *l)
57428 {
57429@@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
57430 return (long)atomic_read(v);
57431 }
57432
57433+#ifdef CONFIG_PAX_REFCOUNT
57434+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57435+{
57436+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57437+
57438+ return (long)atomic_read_unchecked(v);
57439+}
57440+#endif
57441+
57442 static inline void atomic_long_set(atomic_long_t *l, long i)
57443 {
57444 atomic_t *v = (atomic_t *)l;
57445@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
57446 atomic_set(v, i);
57447 }
57448
57449+#ifdef CONFIG_PAX_REFCOUNT
57450+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57451+{
57452+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57453+
57454+ atomic_set_unchecked(v, i);
57455+}
57456+#endif
57457+
57458 static inline void atomic_long_inc(atomic_long_t *l)
57459 {
57460 atomic_t *v = (atomic_t *)l;
57461@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
57462 atomic_inc(v);
57463 }
57464
57465+#ifdef CONFIG_PAX_REFCOUNT
57466+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57467+{
57468+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57469+
57470+ atomic_inc_unchecked(v);
57471+}
57472+#endif
57473+
57474 static inline void atomic_long_dec(atomic_long_t *l)
57475 {
57476 atomic_t *v = (atomic_t *)l;
57477@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
57478 atomic_dec(v);
57479 }
57480
57481+#ifdef CONFIG_PAX_REFCOUNT
57482+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57483+{
57484+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57485+
57486+ atomic_dec_unchecked(v);
57487+}
57488+#endif
57489+
57490 static inline void atomic_long_add(long i, atomic_long_t *l)
57491 {
57492 atomic_t *v = (atomic_t *)l;
57493@@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
57494 atomic_add(i, v);
57495 }
57496
57497+#ifdef CONFIG_PAX_REFCOUNT
57498+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57499+{
57500+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57501+
57502+ atomic_add_unchecked(i, v);
57503+}
57504+#endif
57505+
57506 static inline void atomic_long_sub(long i, atomic_long_t *l)
57507 {
57508 atomic_t *v = (atomic_t *)l;
57509@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
57510 atomic_sub(i, v);
57511 }
57512
57513+#ifdef CONFIG_PAX_REFCOUNT
57514+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57515+{
57516+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57517+
57518+ atomic_sub_unchecked(i, v);
57519+}
57520+#endif
57521+
57522 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57523 {
57524 atomic_t *v = (atomic_t *)l;
57525@@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
57526 return (long)atomic_inc_return(v);
57527 }
57528
57529+#ifdef CONFIG_PAX_REFCOUNT
57530+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57531+{
57532+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57533+
57534+ return (long)atomic_inc_return_unchecked(v);
57535+}
57536+#endif
57537+
57538 static inline long atomic_long_dec_return(atomic_long_t *l)
57539 {
57540 atomic_t *v = (atomic_t *)l;
57541@@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
57542
57543 #endif /* BITS_PER_LONG == 64 */
57544
57545+#ifdef CONFIG_PAX_REFCOUNT
57546+static inline void pax_refcount_needs_these_functions(void)
57547+{
57548+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
57549+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
57550+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
57551+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
57552+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
57553+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
57554+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
57555+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
57556+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
57557+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
57558+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
57559+
57560+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
57561+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
57562+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
57563+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
57564+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
57565+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
57566+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
57567+}
57568+#else
57569+#define atomic_read_unchecked(v) atomic_read(v)
57570+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
57571+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
57572+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
57573+#define atomic_inc_unchecked(v) atomic_inc(v)
57574+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
57575+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
57576+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
57577+#define atomic_dec_unchecked(v) atomic_dec(v)
57578+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
57579+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
57580+
57581+#define atomic_long_read_unchecked(v) atomic_long_read(v)
57582+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
57583+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
57584+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
57585+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
57586+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
57587+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
57588+#endif
57589+
57590 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
57591diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
57592index b18ce4f..2ee2843 100644
57593--- a/include/asm-generic/atomic64.h
57594+++ b/include/asm-generic/atomic64.h
57595@@ -16,6 +16,8 @@ typedef struct {
57596 long long counter;
57597 } atomic64_t;
57598
57599+typedef atomic64_t atomic64_unchecked_t;
57600+
57601 #define ATOMIC64_INIT(i) { (i) }
57602
57603 extern long long atomic64_read(const atomic64_t *v);
57604@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
57605 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
57606 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
57607
57608+#define atomic64_read_unchecked(v) atomic64_read(v)
57609+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
57610+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
57611+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
57612+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
57613+#define atomic64_inc_unchecked(v) atomic64_inc(v)
57614+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
57615+#define atomic64_dec_unchecked(v) atomic64_dec(v)
57616+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
57617+
57618 #endif /* _ASM_GENERIC_ATOMIC64_H */
57619diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
57620index 1bfcfe5..e04c5c9 100644
57621--- a/include/asm-generic/cache.h
57622+++ b/include/asm-generic/cache.h
57623@@ -6,7 +6,7 @@
57624 * cache lines need to provide their own cache.h.
57625 */
57626
57627-#define L1_CACHE_SHIFT 5
57628-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
57629+#define L1_CACHE_SHIFT 5UL
57630+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
57631
57632 #endif /* __ASM_GENERIC_CACHE_H */
57633diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
57634index 1ca3efc..e3dc852 100644
57635--- a/include/asm-generic/int-l64.h
57636+++ b/include/asm-generic/int-l64.h
57637@@ -46,6 +46,8 @@ typedef unsigned int u32;
57638 typedef signed long s64;
57639 typedef unsigned long u64;
57640
57641+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57642+
57643 #define S8_C(x) x
57644 #define U8_C(x) x ## U
57645 #define S16_C(x) x
57646diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
57647index f394147..b6152b9 100644
57648--- a/include/asm-generic/int-ll64.h
57649+++ b/include/asm-generic/int-ll64.h
57650@@ -51,6 +51,8 @@ typedef unsigned int u32;
57651 typedef signed long long s64;
57652 typedef unsigned long long u64;
57653
57654+typedef unsigned long long intoverflow_t;
57655+
57656 #define S8_C(x) x
57657 #define U8_C(x) x ## U
57658 #define S16_C(x) x
57659diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
57660index 0232ccb..13d9165 100644
57661--- a/include/asm-generic/kmap_types.h
57662+++ b/include/asm-generic/kmap_types.h
57663@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
57664 KMAP_D(17) KM_NMI,
57665 KMAP_D(18) KM_NMI_PTE,
57666 KMAP_D(19) KM_KDB,
57667+KMAP_D(20) KM_CLEARPAGE,
57668 /*
57669 * Remember to update debug_kmap_atomic() when adding new kmap types!
57670 */
57671-KMAP_D(20) KM_TYPE_NR
57672+KMAP_D(21) KM_TYPE_NR
57673 };
57674
57675 #undef KMAP_D
57676diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
57677index 725612b..9cc513a 100644
57678--- a/include/asm-generic/pgtable-nopmd.h
57679+++ b/include/asm-generic/pgtable-nopmd.h
57680@@ -1,14 +1,19 @@
57681 #ifndef _PGTABLE_NOPMD_H
57682 #define _PGTABLE_NOPMD_H
57683
57684-#ifndef __ASSEMBLY__
57685-
57686 #include <asm-generic/pgtable-nopud.h>
57687
57688-struct mm_struct;
57689-
57690 #define __PAGETABLE_PMD_FOLDED
57691
57692+#define PMD_SHIFT PUD_SHIFT
57693+#define PTRS_PER_PMD 1
57694+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57695+#define PMD_MASK (~(PMD_SIZE-1))
57696+
57697+#ifndef __ASSEMBLY__
57698+
57699+struct mm_struct;
57700+
57701 /*
57702 * Having the pmd type consist of a pud gets the size right, and allows
57703 * us to conceptually access the pud entry that this pmd is folded into
57704@@ -16,11 +21,6 @@ struct mm_struct;
57705 */
57706 typedef struct { pud_t pud; } pmd_t;
57707
57708-#define PMD_SHIFT PUD_SHIFT
57709-#define PTRS_PER_PMD 1
57710-#define PMD_SIZE (1UL << PMD_SHIFT)
57711-#define PMD_MASK (~(PMD_SIZE-1))
57712-
57713 /*
57714 * The "pud_xxx()" functions here are trivial for a folded two-level
57715 * setup: the pmd is never bad, and a pmd always exists (as it's folded
57716diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
57717index 810431d..ccc3638 100644
57718--- a/include/asm-generic/pgtable-nopud.h
57719+++ b/include/asm-generic/pgtable-nopud.h
57720@@ -1,10 +1,15 @@
57721 #ifndef _PGTABLE_NOPUD_H
57722 #define _PGTABLE_NOPUD_H
57723
57724-#ifndef __ASSEMBLY__
57725-
57726 #define __PAGETABLE_PUD_FOLDED
57727
57728+#define PUD_SHIFT PGDIR_SHIFT
57729+#define PTRS_PER_PUD 1
57730+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57731+#define PUD_MASK (~(PUD_SIZE-1))
57732+
57733+#ifndef __ASSEMBLY__
57734+
57735 /*
57736 * Having the pud type consist of a pgd gets the size right, and allows
57737 * us to conceptually access the pgd entry that this pud is folded into
57738@@ -12,11 +17,6 @@
57739 */
57740 typedef struct { pgd_t pgd; } pud_t;
57741
57742-#define PUD_SHIFT PGDIR_SHIFT
57743-#define PTRS_PER_PUD 1
57744-#define PUD_SIZE (1UL << PUD_SHIFT)
57745-#define PUD_MASK (~(PUD_SIZE-1))
57746-
57747 /*
57748 * The "pgd_xxx()" functions here are trivial for a folded two-level
57749 * setup: the pud is never bad, and a pud always exists (as it's folded
57750diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
57751index 76bff2b..c7a14e2 100644
57752--- a/include/asm-generic/pgtable.h
57753+++ b/include/asm-generic/pgtable.h
57754@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
57755 #endif /* __HAVE_ARCH_PMD_WRITE */
57756 #endif
57757
57758+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57759+static inline unsigned long pax_open_kernel(void) { return 0; }
57760+#endif
57761+
57762+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57763+static inline unsigned long pax_close_kernel(void) { return 0; }
57764+#endif
57765+
57766 #endif /* !__ASSEMBLY__ */
57767
57768 #endif /* _ASM_GENERIC_PGTABLE_H */
57769diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
57770index b5e2e4c..6a5373e 100644
57771--- a/include/asm-generic/vmlinux.lds.h
57772+++ b/include/asm-generic/vmlinux.lds.h
57773@@ -217,6 +217,7 @@
57774 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57775 VMLINUX_SYMBOL(__start_rodata) = .; \
57776 *(.rodata) *(.rodata.*) \
57777+ *(.data..read_only) \
57778 *(__vermagic) /* Kernel version magic */ \
57779 . = ALIGN(8); \
57780 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
57781@@ -722,17 +723,18 @@
57782 * section in the linker script will go there too. @phdr should have
57783 * a leading colon.
57784 *
57785- * Note that this macros defines __per_cpu_load as an absolute symbol.
57786+ * Note that this macros defines per_cpu_load as an absolute symbol.
57787 * If there is no need to put the percpu section at a predetermined
57788 * address, use PERCPU_SECTION.
57789 */
57790 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
57791- VMLINUX_SYMBOL(__per_cpu_load) = .; \
57792- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57793+ per_cpu_load = .; \
57794+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57795 - LOAD_OFFSET) { \
57796+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57797 PERCPU_INPUT(cacheline) \
57798 } phdr \
57799- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
57800+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
57801
57802 /**
57803 * PERCPU_SECTION - define output section for percpu area, simple version
57804diff --git a/include/drm/drmP.h b/include/drm/drmP.h
57805index bf4b2dc..2d0762f 100644
57806--- a/include/drm/drmP.h
57807+++ b/include/drm/drmP.h
57808@@ -72,6 +72,7 @@
57809 #include <linux/workqueue.h>
57810 #include <linux/poll.h>
57811 #include <asm/pgalloc.h>
57812+#include <asm/local.h>
57813 #include "drm.h"
57814
57815 #include <linux/idr.h>
57816@@ -1038,7 +1039,7 @@ struct drm_device {
57817
57818 /** \name Usage Counters */
57819 /*@{ */
57820- int open_count; /**< Outstanding files open */
57821+ local_t open_count; /**< Outstanding files open */
57822 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57823 atomic_t vma_count; /**< Outstanding vma areas open */
57824 int buf_use; /**< Buffers in use -- cannot alloc */
57825@@ -1049,7 +1050,7 @@ struct drm_device {
57826 /*@{ */
57827 unsigned long counters;
57828 enum drm_stat_type types[15];
57829- atomic_t counts[15];
57830+ atomic_unchecked_t counts[15];
57831 /*@} */
57832
57833 struct list_head filelist;
57834diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
57835index 73b0712..0b7ef2f 100644
57836--- a/include/drm/drm_crtc_helper.h
57837+++ b/include/drm/drm_crtc_helper.h
57838@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
57839
57840 /* disable crtc when not in use - more explicit than dpms off */
57841 void (*disable)(struct drm_crtc *crtc);
57842-};
57843+} __no_const;
57844
57845 struct drm_encoder_helper_funcs {
57846 void (*dpms)(struct drm_encoder *encoder, int mode);
57847@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
57848 struct drm_connector *connector);
57849 /* disable encoder when not in use - more explicit than dpms off */
57850 void (*disable)(struct drm_encoder *encoder);
57851-};
57852+} __no_const;
57853
57854 struct drm_connector_helper_funcs {
57855 int (*get_modes)(struct drm_connector *connector);
57856diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
57857index 26c1f78..6722682 100644
57858--- a/include/drm/ttm/ttm_memory.h
57859+++ b/include/drm/ttm/ttm_memory.h
57860@@ -47,7 +47,7 @@
57861
57862 struct ttm_mem_shrink {
57863 int (*do_shrink) (struct ttm_mem_shrink *);
57864-};
57865+} __no_const;
57866
57867 /**
57868 * struct ttm_mem_global - Global memory accounting structure.
57869diff --git a/include/linux/a.out.h b/include/linux/a.out.h
57870index e86dfca..40cc55f 100644
57871--- a/include/linux/a.out.h
57872+++ b/include/linux/a.out.h
57873@@ -39,6 +39,14 @@ enum machine_type {
57874 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57875 };
57876
57877+/* Constants for the N_FLAGS field */
57878+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57879+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57880+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57881+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57882+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57883+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57884+
57885 #if !defined (N_MAGIC)
57886 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57887 #endif
57888diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
57889index 49a83ca..df96b54 100644
57890--- a/include/linux/atmdev.h
57891+++ b/include/linux/atmdev.h
57892@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57893 #endif
57894
57895 struct k_atm_aal_stats {
57896-#define __HANDLE_ITEM(i) atomic_t i
57897+#define __HANDLE_ITEM(i) atomic_unchecked_t i
57898 __AAL_STAT_ITEMS
57899 #undef __HANDLE_ITEM
57900 };
57901diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
57902index fd88a39..8a801b4 100644
57903--- a/include/linux/binfmts.h
57904+++ b/include/linux/binfmts.h
57905@@ -18,7 +18,7 @@ struct pt_regs;
57906 #define BINPRM_BUF_SIZE 128
57907
57908 #ifdef __KERNEL__
57909-#include <linux/list.h>
57910+#include <linux/sched.h>
57911
57912 #define CORENAME_MAX_SIZE 128
57913
57914@@ -58,6 +58,7 @@ struct linux_binprm {
57915 unsigned interp_flags;
57916 unsigned interp_data;
57917 unsigned long loader, exec;
57918+ char tcomm[TASK_COMM_LEN];
57919 };
57920
57921 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
57922@@ -88,6 +89,7 @@ struct linux_binfmt {
57923 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57924 int (*load_shlib)(struct file *);
57925 int (*core_dump)(struct coredump_params *cprm);
57926+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57927 unsigned long min_coredump; /* minimal dump size */
57928 };
57929
57930diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
57931index 0ed1eb0..3ab569b 100644
57932--- a/include/linux/blkdev.h
57933+++ b/include/linux/blkdev.h
57934@@ -1315,7 +1315,7 @@ struct block_device_operations {
57935 /* this callback is with swap_lock and sometimes page table lock held */
57936 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
57937 struct module *owner;
57938-};
57939+} __do_const;
57940
57941 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57942 unsigned long);
57943diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
57944index 4d1a074..88f929a 100644
57945--- a/include/linux/blktrace_api.h
57946+++ b/include/linux/blktrace_api.h
57947@@ -162,7 +162,7 @@ struct blk_trace {
57948 struct dentry *dir;
57949 struct dentry *dropped_file;
57950 struct dentry *msg_file;
57951- atomic_t dropped;
57952+ atomic_unchecked_t dropped;
57953 };
57954
57955 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57956diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
57957index 83195fb..0b0f77d 100644
57958--- a/include/linux/byteorder/little_endian.h
57959+++ b/include/linux/byteorder/little_endian.h
57960@@ -42,51 +42,51 @@
57961
57962 static inline __le64 __cpu_to_le64p(const __u64 *p)
57963 {
57964- return (__force __le64)*p;
57965+ return (__force const __le64)*p;
57966 }
57967 static inline __u64 __le64_to_cpup(const __le64 *p)
57968 {
57969- return (__force __u64)*p;
57970+ return (__force const __u64)*p;
57971 }
57972 static inline __le32 __cpu_to_le32p(const __u32 *p)
57973 {
57974- return (__force __le32)*p;
57975+ return (__force const __le32)*p;
57976 }
57977 static inline __u32 __le32_to_cpup(const __le32 *p)
57978 {
57979- return (__force __u32)*p;
57980+ return (__force const __u32)*p;
57981 }
57982 static inline __le16 __cpu_to_le16p(const __u16 *p)
57983 {
57984- return (__force __le16)*p;
57985+ return (__force const __le16)*p;
57986 }
57987 static inline __u16 __le16_to_cpup(const __le16 *p)
57988 {
57989- return (__force __u16)*p;
57990+ return (__force const __u16)*p;
57991 }
57992 static inline __be64 __cpu_to_be64p(const __u64 *p)
57993 {
57994- return (__force __be64)__swab64p(p);
57995+ return (__force const __be64)__swab64p(p);
57996 }
57997 static inline __u64 __be64_to_cpup(const __be64 *p)
57998 {
57999- return __swab64p((__u64 *)p);
58000+ return __swab64p((const __u64 *)p);
58001 }
58002 static inline __be32 __cpu_to_be32p(const __u32 *p)
58003 {
58004- return (__force __be32)__swab32p(p);
58005+ return (__force const __be32)__swab32p(p);
58006 }
58007 static inline __u32 __be32_to_cpup(const __be32 *p)
58008 {
58009- return __swab32p((__u32 *)p);
58010+ return __swab32p((const __u32 *)p);
58011 }
58012 static inline __be16 __cpu_to_be16p(const __u16 *p)
58013 {
58014- return (__force __be16)__swab16p(p);
58015+ return (__force const __be16)__swab16p(p);
58016 }
58017 static inline __u16 __be16_to_cpup(const __be16 *p)
58018 {
58019- return __swab16p((__u16 *)p);
58020+ return __swab16p((const __u16 *)p);
58021 }
58022 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
58023 #define __le64_to_cpus(x) do { (void)(x); } while (0)
58024diff --git a/include/linux/cache.h b/include/linux/cache.h
58025index 4c57065..4307975 100644
58026--- a/include/linux/cache.h
58027+++ b/include/linux/cache.h
58028@@ -16,6 +16,10 @@
58029 #define __read_mostly
58030 #endif
58031
58032+#ifndef __read_only
58033+#define __read_only __read_mostly
58034+#endif
58035+
58036 #ifndef ____cacheline_aligned
58037 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
58038 #endif
58039diff --git a/include/linux/capability.h b/include/linux/capability.h
58040index a63d13d..069bfd5 100644
58041--- a/include/linux/capability.h
58042+++ b/include/linux/capability.h
58043@@ -548,6 +548,9 @@ extern bool capable(int cap);
58044 extern bool ns_capable(struct user_namespace *ns, int cap);
58045 extern bool task_ns_capable(struct task_struct *t, int cap);
58046 extern bool nsown_capable(int cap);
58047+extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
58048+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
58049+extern bool capable_nolog(int cap);
58050
58051 /* audit system wants to get cap info from files as well */
58052 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
58053diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
58054index 04ffb2e..6799180 100644
58055--- a/include/linux/cleancache.h
58056+++ b/include/linux/cleancache.h
58057@@ -31,7 +31,7 @@ struct cleancache_ops {
58058 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
58059 void (*flush_inode)(int, struct cleancache_filekey);
58060 void (*flush_fs)(int);
58061-};
58062+} __no_const;
58063
58064 extern struct cleancache_ops
58065 cleancache_register_ops(struct cleancache_ops *ops);
58066diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
58067index dfadc96..c0e70c1 100644
58068--- a/include/linux/compiler-gcc4.h
58069+++ b/include/linux/compiler-gcc4.h
58070@@ -31,6 +31,12 @@
58071
58072
58073 #if __GNUC_MINOR__ >= 5
58074+
58075+#ifdef CONSTIFY_PLUGIN
58076+#define __no_const __attribute__((no_const))
58077+#define __do_const __attribute__((do_const))
58078+#endif
58079+
58080 /*
58081 * Mark a position in code as unreachable. This can be used to
58082 * suppress control flow warnings after asm blocks that transfer
58083@@ -46,6 +52,11 @@
58084 #define __noclone __attribute__((__noclone__))
58085
58086 #endif
58087+
58088+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
58089+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
58090+#define __bos0(ptr) __bos((ptr), 0)
58091+#define __bos1(ptr) __bos((ptr), 1)
58092 #endif
58093
58094 #if __GNUC_MINOR__ > 0
58095diff --git a/include/linux/compiler.h b/include/linux/compiler.h
58096index 320d6c9..8573a1c 100644
58097--- a/include/linux/compiler.h
58098+++ b/include/linux/compiler.h
58099@@ -5,31 +5,62 @@
58100
58101 #ifdef __CHECKER__
58102 # define __user __attribute__((noderef, address_space(1)))
58103+# define __force_user __force __user
58104 # define __kernel __attribute__((address_space(0)))
58105+# define __force_kernel __force __kernel
58106 # define __safe __attribute__((safe))
58107 # define __force __attribute__((force))
58108 # define __nocast __attribute__((nocast))
58109 # define __iomem __attribute__((noderef, address_space(2)))
58110+# define __force_iomem __force __iomem
58111 # define __acquires(x) __attribute__((context(x,0,1)))
58112 # define __releases(x) __attribute__((context(x,1,0)))
58113 # define __acquire(x) __context__(x,1)
58114 # define __release(x) __context__(x,-1)
58115 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
58116 # define __percpu __attribute__((noderef, address_space(3)))
58117+# define __force_percpu __force __percpu
58118 #ifdef CONFIG_SPARSE_RCU_POINTER
58119 # define __rcu __attribute__((noderef, address_space(4)))
58120+# define __force_rcu __force __rcu
58121 #else
58122 # define __rcu
58123+# define __force_rcu
58124 #endif
58125 extern void __chk_user_ptr(const volatile void __user *);
58126 extern void __chk_io_ptr(const volatile void __iomem *);
58127+#elif defined(CHECKER_PLUGIN)
58128+//# define __user
58129+//# define __force_user
58130+//# define __kernel
58131+//# define __force_kernel
58132+# define __safe
58133+# define __force
58134+# define __nocast
58135+# define __iomem
58136+# define __force_iomem
58137+# define __chk_user_ptr(x) (void)0
58138+# define __chk_io_ptr(x) (void)0
58139+# define __builtin_warning(x, y...) (1)
58140+# define __acquires(x)
58141+# define __releases(x)
58142+# define __acquire(x) (void)0
58143+# define __release(x) (void)0
58144+# define __cond_lock(x,c) (c)
58145+# define __percpu
58146+# define __force_percpu
58147+# define __rcu
58148+# define __force_rcu
58149 #else
58150 # define __user
58151+# define __force_user
58152 # define __kernel
58153+# define __force_kernel
58154 # define __safe
58155 # define __force
58156 # define __nocast
58157 # define __iomem
58158+# define __force_iomem
58159 # define __chk_user_ptr(x) (void)0
58160 # define __chk_io_ptr(x) (void)0
58161 # define __builtin_warning(x, y...) (1)
58162@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
58163 # define __release(x) (void)0
58164 # define __cond_lock(x,c) (c)
58165 # define __percpu
58166+# define __force_percpu
58167 # define __rcu
58168+# define __force_rcu
58169 #endif
58170
58171 #ifdef __KERNEL__
58172@@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
58173 # define __attribute_const__ /* unimplemented */
58174 #endif
58175
58176+#ifndef __no_const
58177+# define __no_const
58178+#endif
58179+
58180+#ifndef __do_const
58181+# define __do_const
58182+#endif
58183+
58184 /*
58185 * Tell gcc if a function is cold. The compiler will assume any path
58186 * directly leading to the call is unlikely.
58187@@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
58188 #define __cold
58189 #endif
58190
58191+#ifndef __alloc_size
58192+#define __alloc_size(...)
58193+#endif
58194+
58195+#ifndef __bos
58196+#define __bos(ptr, arg)
58197+#endif
58198+
58199+#ifndef __bos0
58200+#define __bos0(ptr)
58201+#endif
58202+
58203+#ifndef __bos1
58204+#define __bos1(ptr)
58205+#endif
58206+
58207 /* Simple shorthand for a section definition */
58208 #ifndef __section
58209 # define __section(S) __attribute__ ((__section__(#S)))
58210@@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
58211 * use is to mediate communication between process-level code and irq/NMI
58212 * handlers, all running on the same CPU.
58213 */
58214-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
58215+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
58216+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
58217
58218 #endif /* __LINUX_COMPILER_H */
58219diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
58220index e9eaec5..bfeb9bb 100644
58221--- a/include/linux/cpuset.h
58222+++ b/include/linux/cpuset.h
58223@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
58224 * nodemask.
58225 */
58226 smp_mb();
58227- --ACCESS_ONCE(current->mems_allowed_change_disable);
58228+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
58229 }
58230
58231 static inline void set_mems_allowed(nodemask_t nodemask)
58232diff --git a/include/linux/cred.h b/include/linux/cred.h
58233index 4030896..8d6f342 100644
58234--- a/include/linux/cred.h
58235+++ b/include/linux/cred.h
58236@@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
58237 static inline void validate_process_creds(void)
58238 {
58239 }
58240+static inline void validate_task_creds(struct task_struct *task)
58241+{
58242+}
58243 #endif
58244
58245 /**
58246diff --git a/include/linux/crypto.h b/include/linux/crypto.h
58247index 8a94217..15d49e3 100644
58248--- a/include/linux/crypto.h
58249+++ b/include/linux/crypto.h
58250@@ -365,7 +365,7 @@ struct cipher_tfm {
58251 const u8 *key, unsigned int keylen);
58252 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
58253 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
58254-};
58255+} __no_const;
58256
58257 struct hash_tfm {
58258 int (*init)(struct hash_desc *desc);
58259@@ -386,13 +386,13 @@ struct compress_tfm {
58260 int (*cot_decompress)(struct crypto_tfm *tfm,
58261 const u8 *src, unsigned int slen,
58262 u8 *dst, unsigned int *dlen);
58263-};
58264+} __no_const;
58265
58266 struct rng_tfm {
58267 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
58268 unsigned int dlen);
58269 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
58270-};
58271+} __no_const;
58272
58273 #define crt_ablkcipher crt_u.ablkcipher
58274 #define crt_aead crt_u.aead
58275diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
58276index 7925bf0..d5143d2 100644
58277--- a/include/linux/decompress/mm.h
58278+++ b/include/linux/decompress/mm.h
58279@@ -77,7 +77,7 @@ static void free(void *where)
58280 * warnings when not needed (indeed large_malloc / large_free are not
58281 * needed by inflate */
58282
58283-#define malloc(a) kmalloc(a, GFP_KERNEL)
58284+#define malloc(a) kmalloc((a), GFP_KERNEL)
58285 #define free(a) kfree(a)
58286
58287 #define large_malloc(a) vmalloc(a)
58288diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
58289index e13117c..e9fc938 100644
58290--- a/include/linux/dma-mapping.h
58291+++ b/include/linux/dma-mapping.h
58292@@ -46,7 +46,7 @@ struct dma_map_ops {
58293 u64 (*get_required_mask)(struct device *dev);
58294 #endif
58295 int is_phys;
58296-};
58297+} __do_const;
58298
58299 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
58300
58301diff --git a/include/linux/efi.h b/include/linux/efi.h
58302index 2362a0b..cfaf8fcc 100644
58303--- a/include/linux/efi.h
58304+++ b/include/linux/efi.h
58305@@ -446,7 +446,7 @@ struct efivar_operations {
58306 efi_get_variable_t *get_variable;
58307 efi_get_next_variable_t *get_next_variable;
58308 efi_set_variable_t *set_variable;
58309-};
58310+} __no_const;
58311
58312 struct efivars {
58313 /*
58314diff --git a/include/linux/elf.h b/include/linux/elf.h
58315index 31f0508..5421c01 100644
58316--- a/include/linux/elf.h
58317+++ b/include/linux/elf.h
58318@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
58319 #define PT_GNU_EH_FRAME 0x6474e550
58320
58321 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
58322+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
58323+
58324+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
58325+
58326+/* Constants for the e_flags field */
58327+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
58328+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
58329+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
58330+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
58331+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
58332+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
58333
58334 /*
58335 * Extended Numbering
58336@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
58337 #define DT_DEBUG 21
58338 #define DT_TEXTREL 22
58339 #define DT_JMPREL 23
58340+#define DT_FLAGS 30
58341+ #define DF_TEXTREL 0x00000004
58342 #define DT_ENCODING 32
58343 #define OLD_DT_LOOS 0x60000000
58344 #define DT_LOOS 0x6000000d
58345@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
58346 #define PF_W 0x2
58347 #define PF_X 0x1
58348
58349+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
58350+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
58351+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
58352+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
58353+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
58354+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
58355+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
58356+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
58357+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
58358+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
58359+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
58360+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
58361+
58362 typedef struct elf32_phdr{
58363 Elf32_Word p_type;
58364 Elf32_Off p_offset;
58365@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
58366 #define EI_OSABI 7
58367 #define EI_PAD 8
58368
58369+#define EI_PAX 14
58370+
58371 #define ELFMAG0 0x7f /* EI_MAG */
58372 #define ELFMAG1 'E'
58373 #define ELFMAG2 'L'
58374@@ -423,6 +451,7 @@ extern Elf32_Dyn _DYNAMIC [];
58375 #define elf_note elf32_note
58376 #define elf_addr_t Elf32_Off
58377 #define Elf_Half Elf32_Half
58378+#define elf_dyn Elf32_Dyn
58379
58380 #else
58381
58382@@ -433,6 +462,7 @@ extern Elf64_Dyn _DYNAMIC [];
58383 #define elf_note elf64_note
58384 #define elf_addr_t Elf64_Off
58385 #define Elf_Half Elf64_Half
58386+#define elf_dyn Elf64_Dyn
58387
58388 #endif
58389
58390diff --git a/include/linux/filter.h b/include/linux/filter.h
58391index 8eeb205..d59bfa2 100644
58392--- a/include/linux/filter.h
58393+++ b/include/linux/filter.h
58394@@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
58395
58396 struct sk_buff;
58397 struct sock;
58398+struct bpf_jit_work;
58399
58400 struct sk_filter
58401 {
58402@@ -141,6 +142,9 @@ struct sk_filter
58403 unsigned int len; /* Number of filter blocks */
58404 unsigned int (*bpf_func)(const struct sk_buff *skb,
58405 const struct sock_filter *filter);
58406+#ifdef CONFIG_BPF_JIT
58407+ struct bpf_jit_work *work;
58408+#endif
58409 struct rcu_head rcu;
58410 struct sock_filter insns[0];
58411 };
58412diff --git a/include/linux/firewire.h b/include/linux/firewire.h
58413index 84ccf8e..2e9b14c 100644
58414--- a/include/linux/firewire.h
58415+++ b/include/linux/firewire.h
58416@@ -428,7 +428,7 @@ struct fw_iso_context {
58417 union {
58418 fw_iso_callback_t sc;
58419 fw_iso_mc_callback_t mc;
58420- } callback;
58421+ } __no_const callback;
58422 void *callback_data;
58423 };
58424
58425diff --git a/include/linux/fs.h b/include/linux/fs.h
58426index 10b2288..09180e4 100644
58427--- a/include/linux/fs.h
58428+++ b/include/linux/fs.h
58429@@ -1609,7 +1609,8 @@ struct file_operations {
58430 int (*setlease)(struct file *, long, struct file_lock **);
58431 long (*fallocate)(struct file *file, int mode, loff_t offset,
58432 loff_t len);
58433-};
58434+} __do_const;
58435+typedef struct file_operations __no_const file_operations_no_const;
58436
58437 struct inode_operations {
58438 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
58439diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
58440index 003dc0f..3c4ea97 100644
58441--- a/include/linux/fs_struct.h
58442+++ b/include/linux/fs_struct.h
58443@@ -6,7 +6,7 @@
58444 #include <linux/seqlock.h>
58445
58446 struct fs_struct {
58447- int users;
58448+ atomic_t users;
58449 spinlock_t lock;
58450 seqcount_t seq;
58451 int umask;
58452diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
58453index ce31408..b1ad003 100644
58454--- a/include/linux/fscache-cache.h
58455+++ b/include/linux/fscache-cache.h
58456@@ -102,7 +102,7 @@ struct fscache_operation {
58457 fscache_operation_release_t release;
58458 };
58459
58460-extern atomic_t fscache_op_debug_id;
58461+extern atomic_unchecked_t fscache_op_debug_id;
58462 extern void fscache_op_work_func(struct work_struct *work);
58463
58464 extern void fscache_enqueue_operation(struct fscache_operation *);
58465@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
58466 {
58467 INIT_WORK(&op->work, fscache_op_work_func);
58468 atomic_set(&op->usage, 1);
58469- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
58470+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
58471 op->processor = processor;
58472 op->release = release;
58473 INIT_LIST_HEAD(&op->pend_link);
58474diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
58475index 2a53f10..0187fdf 100644
58476--- a/include/linux/fsnotify.h
58477+++ b/include/linux/fsnotify.h
58478@@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
58479 */
58480 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
58481 {
58482- return kstrdup(name, GFP_KERNEL);
58483+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
58484 }
58485
58486 /*
58487diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
58488index 91d0e0a3..035666b 100644
58489--- a/include/linux/fsnotify_backend.h
58490+++ b/include/linux/fsnotify_backend.h
58491@@ -105,6 +105,7 @@ struct fsnotify_ops {
58492 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
58493 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
58494 };
58495+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
58496
58497 /*
58498 * A group is a "thing" that wants to receive notification about filesystem
58499diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
58500index c3da42d..c70e0df 100644
58501--- a/include/linux/ftrace_event.h
58502+++ b/include/linux/ftrace_event.h
58503@@ -97,7 +97,7 @@ struct trace_event_functions {
58504 trace_print_func raw;
58505 trace_print_func hex;
58506 trace_print_func binary;
58507-};
58508+} __no_const;
58509
58510 struct trace_event {
58511 struct hlist_node node;
58512@@ -254,7 +254,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
58513 extern int trace_add_event_call(struct ftrace_event_call *call);
58514 extern void trace_remove_event_call(struct ftrace_event_call *call);
58515
58516-#define is_signed_type(type) (((type)(-1)) < 0)
58517+#define is_signed_type(type) (((type)(-1)) < (type)1)
58518
58519 int trace_set_clr_event(const char *system, const char *event, int set);
58520
58521diff --git a/include/linux/genhd.h b/include/linux/genhd.h
58522index 6d18f35..ab71e2c 100644
58523--- a/include/linux/genhd.h
58524+++ b/include/linux/genhd.h
58525@@ -185,7 +185,7 @@ struct gendisk {
58526 struct kobject *slave_dir;
58527
58528 struct timer_rand_state *random;
58529- atomic_t sync_io; /* RAID */
58530+ atomic_unchecked_t sync_io; /* RAID */
58531 struct disk_events *ev;
58532 #ifdef CONFIG_BLK_DEV_INTEGRITY
58533 struct blk_integrity *integrity;
58534diff --git a/include/linux/gracl.h b/include/linux/gracl.h
58535new file mode 100644
58536index 0000000..8a130b6
58537--- /dev/null
58538+++ b/include/linux/gracl.h
58539@@ -0,0 +1,319 @@
58540+#ifndef GR_ACL_H
58541+#define GR_ACL_H
58542+
58543+#include <linux/grdefs.h>
58544+#include <linux/resource.h>
58545+#include <linux/capability.h>
58546+#include <linux/dcache.h>
58547+#include <asm/resource.h>
58548+
58549+/* Major status information */
58550+
58551+#define GR_VERSION "grsecurity 2.9"
58552+#define GRSECURITY_VERSION 0x2900
58553+
58554+enum {
58555+ GR_SHUTDOWN = 0,
58556+ GR_ENABLE = 1,
58557+ GR_SPROLE = 2,
58558+ GR_RELOAD = 3,
58559+ GR_SEGVMOD = 4,
58560+ GR_STATUS = 5,
58561+ GR_UNSPROLE = 6,
58562+ GR_PASSSET = 7,
58563+ GR_SPROLEPAM = 8,
58564+};
58565+
58566+/* Password setup definitions
58567+ * kernel/grhash.c */
58568+enum {
58569+ GR_PW_LEN = 128,
58570+ GR_SALT_LEN = 16,
58571+ GR_SHA_LEN = 32,
58572+};
58573+
58574+enum {
58575+ GR_SPROLE_LEN = 64,
58576+};
58577+
58578+enum {
58579+ GR_NO_GLOB = 0,
58580+ GR_REG_GLOB,
58581+ GR_CREATE_GLOB
58582+};
58583+
58584+#define GR_NLIMITS 32
58585+
58586+/* Begin Data Structures */
58587+
58588+struct sprole_pw {
58589+ unsigned char *rolename;
58590+ unsigned char salt[GR_SALT_LEN];
58591+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
58592+};
58593+
58594+struct name_entry {
58595+ __u32 key;
58596+ ino_t inode;
58597+ dev_t device;
58598+ char *name;
58599+ __u16 len;
58600+ __u8 deleted;
58601+ struct name_entry *prev;
58602+ struct name_entry *next;
58603+};
58604+
58605+struct inodev_entry {
58606+ struct name_entry *nentry;
58607+ struct inodev_entry *prev;
58608+ struct inodev_entry *next;
58609+};
58610+
58611+struct acl_role_db {
58612+ struct acl_role_label **r_hash;
58613+ __u32 r_size;
58614+};
58615+
58616+struct inodev_db {
58617+ struct inodev_entry **i_hash;
58618+ __u32 i_size;
58619+};
58620+
58621+struct name_db {
58622+ struct name_entry **n_hash;
58623+ __u32 n_size;
58624+};
58625+
58626+struct crash_uid {
58627+ uid_t uid;
58628+ unsigned long expires;
58629+};
58630+
58631+struct gr_hash_struct {
58632+ void **table;
58633+ void **nametable;
58634+ void *first;
58635+ __u32 table_size;
58636+ __u32 used_size;
58637+ int type;
58638+};
58639+
58640+/* Userspace Grsecurity ACL data structures */
58641+
58642+struct acl_subject_label {
58643+ char *filename;
58644+ ino_t inode;
58645+ dev_t device;
58646+ __u32 mode;
58647+ kernel_cap_t cap_mask;
58648+ kernel_cap_t cap_lower;
58649+ kernel_cap_t cap_invert_audit;
58650+
58651+ struct rlimit res[GR_NLIMITS];
58652+ __u32 resmask;
58653+
58654+ __u8 user_trans_type;
58655+ __u8 group_trans_type;
58656+ uid_t *user_transitions;
58657+ gid_t *group_transitions;
58658+ __u16 user_trans_num;
58659+ __u16 group_trans_num;
58660+
58661+ __u32 sock_families[2];
58662+ __u32 ip_proto[8];
58663+ __u32 ip_type;
58664+ struct acl_ip_label **ips;
58665+ __u32 ip_num;
58666+ __u32 inaddr_any_override;
58667+
58668+ __u32 crashes;
58669+ unsigned long expires;
58670+
58671+ struct acl_subject_label *parent_subject;
58672+ struct gr_hash_struct *hash;
58673+ struct acl_subject_label *prev;
58674+ struct acl_subject_label *next;
58675+
58676+ struct acl_object_label **obj_hash;
58677+ __u32 obj_hash_size;
58678+ __u16 pax_flags;
58679+};
58680+
58681+struct role_allowed_ip {
58682+ __u32 addr;
58683+ __u32 netmask;
58684+
58685+ struct role_allowed_ip *prev;
58686+ struct role_allowed_ip *next;
58687+};
58688+
58689+struct role_transition {
58690+ char *rolename;
58691+
58692+ struct role_transition *prev;
58693+ struct role_transition *next;
58694+};
58695+
58696+struct acl_role_label {
58697+ char *rolename;
58698+ uid_t uidgid;
58699+ __u16 roletype;
58700+
58701+ __u16 auth_attempts;
58702+ unsigned long expires;
58703+
58704+ struct acl_subject_label *root_label;
58705+ struct gr_hash_struct *hash;
58706+
58707+ struct acl_role_label *prev;
58708+ struct acl_role_label *next;
58709+
58710+ struct role_transition *transitions;
58711+ struct role_allowed_ip *allowed_ips;
58712+ uid_t *domain_children;
58713+ __u16 domain_child_num;
58714+
58715+ umode_t umask;
58716+
58717+ struct acl_subject_label **subj_hash;
58718+ __u32 subj_hash_size;
58719+};
58720+
58721+struct user_acl_role_db {
58722+ struct acl_role_label **r_table;
58723+ __u32 num_pointers; /* Number of allocations to track */
58724+ __u32 num_roles; /* Number of roles */
58725+ __u32 num_domain_children; /* Number of domain children */
58726+ __u32 num_subjects; /* Number of subjects */
58727+ __u32 num_objects; /* Number of objects */
58728+};
58729+
58730+struct acl_object_label {
58731+ char *filename;
58732+ ino_t inode;
58733+ dev_t device;
58734+ __u32 mode;
58735+
58736+ struct acl_subject_label *nested;
58737+ struct acl_object_label *globbed;
58738+
58739+ /* next two structures not used */
58740+
58741+ struct acl_object_label *prev;
58742+ struct acl_object_label *next;
58743+};
58744+
58745+struct acl_ip_label {
58746+ char *iface;
58747+ __u32 addr;
58748+ __u32 netmask;
58749+ __u16 low, high;
58750+ __u8 mode;
58751+ __u32 type;
58752+ __u32 proto[8];
58753+
58754+ /* next two structures not used */
58755+
58756+ struct acl_ip_label *prev;
58757+ struct acl_ip_label *next;
58758+};
58759+
58760+struct gr_arg {
58761+ struct user_acl_role_db role_db;
58762+ unsigned char pw[GR_PW_LEN];
58763+ unsigned char salt[GR_SALT_LEN];
58764+ unsigned char sum[GR_SHA_LEN];
58765+ unsigned char sp_role[GR_SPROLE_LEN];
58766+ struct sprole_pw *sprole_pws;
58767+ dev_t segv_device;
58768+ ino_t segv_inode;
58769+ uid_t segv_uid;
58770+ __u16 num_sprole_pws;
58771+ __u16 mode;
58772+};
58773+
58774+struct gr_arg_wrapper {
58775+ struct gr_arg *arg;
58776+ __u32 version;
58777+ __u32 size;
58778+};
58779+
58780+struct subject_map {
58781+ struct acl_subject_label *user;
58782+ struct acl_subject_label *kernel;
58783+ struct subject_map *prev;
58784+ struct subject_map *next;
58785+};
58786+
58787+struct acl_subj_map_db {
58788+ struct subject_map **s_hash;
58789+ __u32 s_size;
58790+};
58791+
58792+/* End Data Structures Section */
58793+
58794+/* Hash functions generated by empirical testing by Brad Spengler
58795+ Makes good use of the low bits of the inode. Generally 0-1 times
58796+ in loop for successful match. 0-3 for unsuccessful match.
58797+ Shift/add algorithm with modulus of table size and an XOR*/
58798+
58799+static __inline__ unsigned int
58800+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58801+{
58802+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
58803+}
58804+
58805+ static __inline__ unsigned int
58806+shash(const struct acl_subject_label *userp, const unsigned int sz)
58807+{
58808+ return ((const unsigned long)userp % sz);
58809+}
58810+
58811+static __inline__ unsigned int
58812+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58813+{
58814+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58815+}
58816+
58817+static __inline__ unsigned int
58818+nhash(const char *name, const __u16 len, const unsigned int sz)
58819+{
58820+ return full_name_hash((const unsigned char *)name, len) % sz;
58821+}
58822+
58823+#define FOR_EACH_ROLE_START(role) \
58824+ role = role_list; \
58825+ while (role) {
58826+
58827+#define FOR_EACH_ROLE_END(role) \
58828+ role = role->prev; \
58829+ }
58830+
58831+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58832+ subj = NULL; \
58833+ iter = 0; \
58834+ while (iter < role->subj_hash_size) { \
58835+ if (subj == NULL) \
58836+ subj = role->subj_hash[iter]; \
58837+ if (subj == NULL) { \
58838+ iter++; \
58839+ continue; \
58840+ }
58841+
58842+#define FOR_EACH_SUBJECT_END(subj,iter) \
58843+ subj = subj->next; \
58844+ if (subj == NULL) \
58845+ iter++; \
58846+ }
58847+
58848+
58849+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58850+ subj = role->hash->first; \
58851+ while (subj != NULL) {
58852+
58853+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58854+ subj = subj->next; \
58855+ }
58856+
58857+#endif
58858+
58859diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
58860new file mode 100644
58861index 0000000..323ecf2
58862--- /dev/null
58863+++ b/include/linux/gralloc.h
58864@@ -0,0 +1,9 @@
58865+#ifndef __GRALLOC_H
58866+#define __GRALLOC_H
58867+
58868+void acl_free_all(void);
58869+int acl_alloc_stack_init(unsigned long size);
58870+void *acl_alloc(unsigned long len);
58871+void *acl_alloc_num(unsigned long num, unsigned long len);
58872+
58873+#endif
58874diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
58875new file mode 100644
58876index 0000000..b30e9bc
58877--- /dev/null
58878+++ b/include/linux/grdefs.h
58879@@ -0,0 +1,140 @@
58880+#ifndef GRDEFS_H
58881+#define GRDEFS_H
58882+
58883+/* Begin grsecurity status declarations */
58884+
58885+enum {
58886+ GR_READY = 0x01,
58887+ GR_STATUS_INIT = 0x00 // disabled state
58888+};
58889+
58890+/* Begin ACL declarations */
58891+
58892+/* Role flags */
58893+
58894+enum {
58895+ GR_ROLE_USER = 0x0001,
58896+ GR_ROLE_GROUP = 0x0002,
58897+ GR_ROLE_DEFAULT = 0x0004,
58898+ GR_ROLE_SPECIAL = 0x0008,
58899+ GR_ROLE_AUTH = 0x0010,
58900+ GR_ROLE_NOPW = 0x0020,
58901+ GR_ROLE_GOD = 0x0040,
58902+ GR_ROLE_LEARN = 0x0080,
58903+ GR_ROLE_TPE = 0x0100,
58904+ GR_ROLE_DOMAIN = 0x0200,
58905+ GR_ROLE_PAM = 0x0400,
58906+ GR_ROLE_PERSIST = 0x0800
58907+};
58908+
58909+/* ACL Subject and Object mode flags */
58910+enum {
58911+ GR_DELETED = 0x80000000
58912+};
58913+
58914+/* ACL Object-only mode flags */
58915+enum {
58916+ GR_READ = 0x00000001,
58917+ GR_APPEND = 0x00000002,
58918+ GR_WRITE = 0x00000004,
58919+ GR_EXEC = 0x00000008,
58920+ GR_FIND = 0x00000010,
58921+ GR_INHERIT = 0x00000020,
58922+ GR_SETID = 0x00000040,
58923+ GR_CREATE = 0x00000080,
58924+ GR_DELETE = 0x00000100,
58925+ GR_LINK = 0x00000200,
58926+ GR_AUDIT_READ = 0x00000400,
58927+ GR_AUDIT_APPEND = 0x00000800,
58928+ GR_AUDIT_WRITE = 0x00001000,
58929+ GR_AUDIT_EXEC = 0x00002000,
58930+ GR_AUDIT_FIND = 0x00004000,
58931+ GR_AUDIT_INHERIT= 0x00008000,
58932+ GR_AUDIT_SETID = 0x00010000,
58933+ GR_AUDIT_CREATE = 0x00020000,
58934+ GR_AUDIT_DELETE = 0x00040000,
58935+ GR_AUDIT_LINK = 0x00080000,
58936+ GR_PTRACERD = 0x00100000,
58937+ GR_NOPTRACE = 0x00200000,
58938+ GR_SUPPRESS = 0x00400000,
58939+ GR_NOLEARN = 0x00800000,
58940+ GR_INIT_TRANSFER= 0x01000000
58941+};
58942+
58943+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58944+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58945+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58946+
58947+/* ACL subject-only mode flags */
58948+enum {
58949+ GR_KILL = 0x00000001,
58950+ GR_VIEW = 0x00000002,
58951+ GR_PROTECTED = 0x00000004,
58952+ GR_LEARN = 0x00000008,
58953+ GR_OVERRIDE = 0x00000010,
58954+ /* just a placeholder, this mode is only used in userspace */
58955+ GR_DUMMY = 0x00000020,
58956+ GR_PROTSHM = 0x00000040,
58957+ GR_KILLPROC = 0x00000080,
58958+ GR_KILLIPPROC = 0x00000100,
58959+ /* just a placeholder, this mode is only used in userspace */
58960+ GR_NOTROJAN = 0x00000200,
58961+ GR_PROTPROCFD = 0x00000400,
58962+ GR_PROCACCT = 0x00000800,
58963+ GR_RELAXPTRACE = 0x00001000,
58964+ GR_NESTED = 0x00002000,
58965+ GR_INHERITLEARN = 0x00004000,
58966+ GR_PROCFIND = 0x00008000,
58967+ GR_POVERRIDE = 0x00010000,
58968+ GR_KERNELAUTH = 0x00020000,
58969+ GR_ATSECURE = 0x00040000,
58970+ GR_SHMEXEC = 0x00080000
58971+};
58972+
58973+enum {
58974+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58975+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58976+ GR_PAX_ENABLE_MPROTECT = 0x0004,
58977+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
58978+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58979+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58980+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58981+ GR_PAX_DISABLE_MPROTECT = 0x0400,
58982+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
58983+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58984+};
58985+
58986+enum {
58987+ GR_ID_USER = 0x01,
58988+ GR_ID_GROUP = 0x02,
58989+};
58990+
58991+enum {
58992+ GR_ID_ALLOW = 0x01,
58993+ GR_ID_DENY = 0x02,
58994+};
58995+
58996+#define GR_CRASH_RES 31
58997+#define GR_UIDTABLE_MAX 500
58998+
58999+/* begin resource learning section */
59000+enum {
59001+ GR_RLIM_CPU_BUMP = 60,
59002+ GR_RLIM_FSIZE_BUMP = 50000,
59003+ GR_RLIM_DATA_BUMP = 10000,
59004+ GR_RLIM_STACK_BUMP = 1000,
59005+ GR_RLIM_CORE_BUMP = 10000,
59006+ GR_RLIM_RSS_BUMP = 500000,
59007+ GR_RLIM_NPROC_BUMP = 1,
59008+ GR_RLIM_NOFILE_BUMP = 5,
59009+ GR_RLIM_MEMLOCK_BUMP = 50000,
59010+ GR_RLIM_AS_BUMP = 500000,
59011+ GR_RLIM_LOCKS_BUMP = 2,
59012+ GR_RLIM_SIGPENDING_BUMP = 5,
59013+ GR_RLIM_MSGQUEUE_BUMP = 10000,
59014+ GR_RLIM_NICE_BUMP = 1,
59015+ GR_RLIM_RTPRIO_BUMP = 1,
59016+ GR_RLIM_RTTIME_BUMP = 1000000
59017+};
59018+
59019+#endif
59020diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
59021new file mode 100644
59022index 0000000..da390f1
59023--- /dev/null
59024+++ b/include/linux/grinternal.h
59025@@ -0,0 +1,221 @@
59026+#ifndef __GRINTERNAL_H
59027+#define __GRINTERNAL_H
59028+
59029+#ifdef CONFIG_GRKERNSEC
59030+
59031+#include <linux/fs.h>
59032+#include <linux/mnt_namespace.h>
59033+#include <linux/nsproxy.h>
59034+#include <linux/gracl.h>
59035+#include <linux/grdefs.h>
59036+#include <linux/grmsg.h>
59037+
59038+void gr_add_learn_entry(const char *fmt, ...)
59039+ __attribute__ ((format (printf, 1, 2)));
59040+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
59041+ const struct vfsmount *mnt);
59042+__u32 gr_check_create(const struct dentry *new_dentry,
59043+ const struct dentry *parent,
59044+ const struct vfsmount *mnt, const __u32 mode);
59045+int gr_check_protected_task(const struct task_struct *task);
59046+__u32 to_gr_audit(const __u32 reqmode);
59047+int gr_set_acls(const int type);
59048+int gr_apply_subject_to_task(struct task_struct *task);
59049+int gr_acl_is_enabled(void);
59050+char gr_roletype_to_char(void);
59051+
59052+void gr_handle_alertkill(struct task_struct *task);
59053+char *gr_to_filename(const struct dentry *dentry,
59054+ const struct vfsmount *mnt);
59055+char *gr_to_filename1(const struct dentry *dentry,
59056+ const struct vfsmount *mnt);
59057+char *gr_to_filename2(const struct dentry *dentry,
59058+ const struct vfsmount *mnt);
59059+char *gr_to_filename3(const struct dentry *dentry,
59060+ const struct vfsmount *mnt);
59061+
59062+extern int grsec_enable_ptrace_readexec;
59063+extern int grsec_enable_harden_ptrace;
59064+extern int grsec_enable_link;
59065+extern int grsec_enable_fifo;
59066+extern int grsec_enable_execve;
59067+extern int grsec_enable_shm;
59068+extern int grsec_enable_execlog;
59069+extern int grsec_enable_signal;
59070+extern int grsec_enable_audit_ptrace;
59071+extern int grsec_enable_forkfail;
59072+extern int grsec_enable_time;
59073+extern int grsec_enable_rofs;
59074+extern int grsec_enable_chroot_shmat;
59075+extern int grsec_enable_chroot_mount;
59076+extern int grsec_enable_chroot_double;
59077+extern int grsec_enable_chroot_pivot;
59078+extern int grsec_enable_chroot_chdir;
59079+extern int grsec_enable_chroot_chmod;
59080+extern int grsec_enable_chroot_mknod;
59081+extern int grsec_enable_chroot_fchdir;
59082+extern int grsec_enable_chroot_nice;
59083+extern int grsec_enable_chroot_execlog;
59084+extern int grsec_enable_chroot_caps;
59085+extern int grsec_enable_chroot_sysctl;
59086+extern int grsec_enable_chroot_unix;
59087+extern int grsec_enable_tpe;
59088+extern int grsec_tpe_gid;
59089+extern int grsec_enable_tpe_all;
59090+extern int grsec_enable_tpe_invert;
59091+extern int grsec_enable_socket_all;
59092+extern int grsec_socket_all_gid;
59093+extern int grsec_enable_socket_client;
59094+extern int grsec_socket_client_gid;
59095+extern int grsec_enable_socket_server;
59096+extern int grsec_socket_server_gid;
59097+extern int grsec_audit_gid;
59098+extern int grsec_enable_group;
59099+extern int grsec_enable_audit_textrel;
59100+extern int grsec_enable_log_rwxmaps;
59101+extern int grsec_enable_mount;
59102+extern int grsec_enable_chdir;
59103+extern int grsec_resource_logging;
59104+extern int grsec_enable_blackhole;
59105+extern int grsec_lastack_retries;
59106+extern int grsec_enable_brute;
59107+extern int grsec_lock;
59108+
59109+extern spinlock_t grsec_alert_lock;
59110+extern unsigned long grsec_alert_wtime;
59111+extern unsigned long grsec_alert_fyet;
59112+
59113+extern spinlock_t grsec_audit_lock;
59114+
59115+extern rwlock_t grsec_exec_file_lock;
59116+
59117+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
59118+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
59119+ (tsk)->exec_file->f_vfsmnt) : "/")
59120+
59121+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
59122+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
59123+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
59124+
59125+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
59126+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
59127+ (tsk)->exec_file->f_vfsmnt) : "/")
59128+
59129+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
59130+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
59131+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
59132+
59133+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
59134+
59135+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
59136+
59137+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
59138+ (task)->pid, (cred)->uid, \
59139+ (cred)->euid, (cred)->gid, (cred)->egid, \
59140+ gr_parent_task_fullpath(task), \
59141+ (task)->real_parent->comm, (task)->real_parent->pid, \
59142+ (pcred)->uid, (pcred)->euid, \
59143+ (pcred)->gid, (pcred)->egid
59144+
59145+#define GR_CHROOT_CAPS {{ \
59146+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
59147+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
59148+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
59149+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
59150+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
59151+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
59152+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
59153+
59154+#define security_learn(normal_msg,args...) \
59155+({ \
59156+ read_lock(&grsec_exec_file_lock); \
59157+ gr_add_learn_entry(normal_msg "\n", ## args); \
59158+ read_unlock(&grsec_exec_file_lock); \
59159+})
59160+
59161+enum {
59162+ GR_DO_AUDIT,
59163+ GR_DONT_AUDIT,
59164+ /* used for non-audit messages that we shouldn't kill the task on */
59165+ GR_DONT_AUDIT_GOOD
59166+};
59167+
59168+enum {
59169+ GR_TTYSNIFF,
59170+ GR_RBAC,
59171+ GR_RBAC_STR,
59172+ GR_STR_RBAC,
59173+ GR_RBAC_MODE2,
59174+ GR_RBAC_MODE3,
59175+ GR_FILENAME,
59176+ GR_SYSCTL_HIDDEN,
59177+ GR_NOARGS,
59178+ GR_ONE_INT,
59179+ GR_ONE_INT_TWO_STR,
59180+ GR_ONE_STR,
59181+ GR_STR_INT,
59182+ GR_TWO_STR_INT,
59183+ GR_TWO_INT,
59184+ GR_TWO_U64,
59185+ GR_THREE_INT,
59186+ GR_FIVE_INT_TWO_STR,
59187+ GR_TWO_STR,
59188+ GR_THREE_STR,
59189+ GR_FOUR_STR,
59190+ GR_STR_FILENAME,
59191+ GR_FILENAME_STR,
59192+ GR_FILENAME_TWO_INT,
59193+ GR_FILENAME_TWO_INT_STR,
59194+ GR_TEXTREL,
59195+ GR_PTRACE,
59196+ GR_RESOURCE,
59197+ GR_CAP,
59198+ GR_SIG,
59199+ GR_SIG2,
59200+ GR_CRASH1,
59201+ GR_CRASH2,
59202+ GR_PSACCT,
59203+ GR_RWXMAP
59204+};
59205+
59206+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
59207+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
59208+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
59209+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
59210+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
59211+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
59212+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
59213+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
59214+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
59215+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
59216+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
59217+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
59218+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
59219+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
59220+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
59221+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
59222+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
59223+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
59224+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
59225+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
59226+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
59227+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
59228+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
59229+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
59230+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
59231+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
59232+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
59233+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
59234+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
59235+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
59236+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
59237+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
59238+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
59239+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
59240+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
59241+
59242+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
59243+
59244+#endif
59245+
59246+#endif
59247diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
59248new file mode 100644
59249index 0000000..ae576a1
59250--- /dev/null
59251+++ b/include/linux/grmsg.h
59252@@ -0,0 +1,109 @@
59253+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
59254+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
59255+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
59256+#define GR_STOPMOD_MSG "denied modification of module state by "
59257+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
59258+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
59259+#define GR_IOPERM_MSG "denied use of ioperm() by "
59260+#define GR_IOPL_MSG "denied use of iopl() by "
59261+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
59262+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
59263+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
59264+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
59265+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
59266+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
59267+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
59268+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
59269+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
59270+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
59271+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
59272+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
59273+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
59274+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
59275+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
59276+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
59277+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
59278+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
59279+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
59280+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
59281+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
59282+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
59283+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
59284+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
59285+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
59286+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
59287+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
59288+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
59289+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
59290+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
59291+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
59292+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
59293+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
59294+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
59295+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
59296+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
59297+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
59298+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
59299+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
59300+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
59301+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
59302+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
59303+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
59304+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
59305+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
59306+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
59307+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
59308+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
59309+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
59310+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
59311+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
59312+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
59313+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
59314+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
59315+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
59316+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
59317+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
59318+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
59319+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
59320+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
59321+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
59322+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
59323+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
59324+#define GR_FAILFORK_MSG "failed fork with errno %s by "
59325+#define GR_NICE_CHROOT_MSG "denied priority change by "
59326+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
59327+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
59328+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
59329+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
59330+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
59331+#define GR_TIME_MSG "time set by "
59332+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
59333+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
59334+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
59335+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
59336+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
59337+#define GR_BIND_MSG "denied bind() by "
59338+#define GR_CONNECT_MSG "denied connect() by "
59339+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
59340+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
59341+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
59342+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
59343+#define GR_CAP_ACL_MSG "use of %s denied for "
59344+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
59345+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
59346+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
59347+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
59348+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
59349+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
59350+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
59351+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
59352+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
59353+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
59354+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
59355+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
59356+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
59357+#define GR_VM86_MSG "denied use of vm86 by "
59358+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
59359+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
59360+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
59361+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
59362diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
59363new file mode 100644
59364index 0000000..2ccf677
59365--- /dev/null
59366+++ b/include/linux/grsecurity.h
59367@@ -0,0 +1,229 @@
59368+#ifndef GR_SECURITY_H
59369+#define GR_SECURITY_H
59370+#include <linux/fs.h>
59371+#include <linux/fs_struct.h>
59372+#include <linux/binfmts.h>
59373+#include <linux/gracl.h>
59374+
59375+/* notify of brain-dead configs */
59376+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59377+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
59378+#endif
59379+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
59380+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
59381+#endif
59382+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
59383+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
59384+#endif
59385+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
59386+#error "CONFIG_PAX enabled, but no PaX options are enabled."
59387+#endif
59388+
59389+#include <linux/compat.h>
59390+
59391+struct user_arg_ptr {
59392+#ifdef CONFIG_COMPAT
59393+ bool is_compat;
59394+#endif
59395+ union {
59396+ const char __user *const __user *native;
59397+#ifdef CONFIG_COMPAT
59398+ compat_uptr_t __user *compat;
59399+#endif
59400+ } ptr;
59401+};
59402+
59403+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
59404+void gr_handle_brute_check(void);
59405+void gr_handle_kernel_exploit(void);
59406+int gr_process_user_ban(void);
59407+
59408+char gr_roletype_to_char(void);
59409+
59410+int gr_acl_enable_at_secure(void);
59411+
59412+int gr_check_user_change(int real, int effective, int fs);
59413+int gr_check_group_change(int real, int effective, int fs);
59414+
59415+void gr_del_task_from_ip_table(struct task_struct *p);
59416+
59417+int gr_pid_is_chrooted(struct task_struct *p);
59418+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
59419+int gr_handle_chroot_nice(void);
59420+int gr_handle_chroot_sysctl(const int op);
59421+int gr_handle_chroot_setpriority(struct task_struct *p,
59422+ const int niceval);
59423+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
59424+int gr_handle_chroot_chroot(const struct dentry *dentry,
59425+ const struct vfsmount *mnt);
59426+void gr_handle_chroot_chdir(struct path *path);
59427+int gr_handle_chroot_chmod(const struct dentry *dentry,
59428+ const struct vfsmount *mnt, const int mode);
59429+int gr_handle_chroot_mknod(const struct dentry *dentry,
59430+ const struct vfsmount *mnt, const int mode);
59431+int gr_handle_chroot_mount(const struct dentry *dentry,
59432+ const struct vfsmount *mnt,
59433+ const char *dev_name);
59434+int gr_handle_chroot_pivot(void);
59435+int gr_handle_chroot_unix(const pid_t pid);
59436+
59437+int gr_handle_rawio(const struct inode *inode);
59438+
59439+void gr_handle_ioperm(void);
59440+void gr_handle_iopl(void);
59441+
59442+umode_t gr_acl_umask(void);
59443+
59444+int gr_tpe_allow(const struct file *file);
59445+
59446+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
59447+void gr_clear_chroot_entries(struct task_struct *task);
59448+
59449+void gr_log_forkfail(const int retval);
59450+void gr_log_timechange(void);
59451+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
59452+void gr_log_chdir(const struct dentry *dentry,
59453+ const struct vfsmount *mnt);
59454+void gr_log_chroot_exec(const struct dentry *dentry,
59455+ const struct vfsmount *mnt);
59456+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
59457+void gr_log_remount(const char *devname, const int retval);
59458+void gr_log_unmount(const char *devname, const int retval);
59459+void gr_log_mount(const char *from, const char *to, const int retval);
59460+void gr_log_textrel(struct vm_area_struct *vma);
59461+void gr_log_rwxmmap(struct file *file);
59462+void gr_log_rwxmprotect(struct file *file);
59463+
59464+int gr_handle_follow_link(const struct inode *parent,
59465+ const struct inode *inode,
59466+ const struct dentry *dentry,
59467+ const struct vfsmount *mnt);
59468+int gr_handle_fifo(const struct dentry *dentry,
59469+ const struct vfsmount *mnt,
59470+ const struct dentry *dir, const int flag,
59471+ const int acc_mode);
59472+int gr_handle_hardlink(const struct dentry *dentry,
59473+ const struct vfsmount *mnt,
59474+ struct inode *inode,
59475+ const int mode, const char *to);
59476+
59477+int gr_is_capable(const int cap);
59478+int gr_is_capable_nolog(const int cap);
59479+void gr_learn_resource(const struct task_struct *task, const int limit,
59480+ const unsigned long wanted, const int gt);
59481+void gr_copy_label(struct task_struct *tsk);
59482+void gr_handle_crash(struct task_struct *task, const int sig);
59483+int gr_handle_signal(const struct task_struct *p, const int sig);
59484+int gr_check_crash_uid(const uid_t uid);
59485+int gr_check_protected_task(const struct task_struct *task);
59486+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
59487+int gr_acl_handle_mmap(const struct file *file,
59488+ const unsigned long prot);
59489+int gr_acl_handle_mprotect(const struct file *file,
59490+ const unsigned long prot);
59491+int gr_check_hidden_task(const struct task_struct *tsk);
59492+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
59493+ const struct vfsmount *mnt);
59494+__u32 gr_acl_handle_utime(const struct dentry *dentry,
59495+ const struct vfsmount *mnt);
59496+__u32 gr_acl_handle_access(const struct dentry *dentry,
59497+ const struct vfsmount *mnt, const int fmode);
59498+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
59499+ const struct vfsmount *mnt, umode_t *mode);
59500+__u32 gr_acl_handle_chown(const struct dentry *dentry,
59501+ const struct vfsmount *mnt);
59502+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
59503+ const struct vfsmount *mnt);
59504+int gr_handle_ptrace(struct task_struct *task, const long request);
59505+int gr_handle_proc_ptrace(struct task_struct *task);
59506+__u32 gr_acl_handle_execve(const struct dentry *dentry,
59507+ const struct vfsmount *mnt);
59508+int gr_check_crash_exec(const struct file *filp);
59509+int gr_acl_is_enabled(void);
59510+void gr_set_kernel_label(struct task_struct *task);
59511+void gr_set_role_label(struct task_struct *task, const uid_t uid,
59512+ const gid_t gid);
59513+int gr_set_proc_label(const struct dentry *dentry,
59514+ const struct vfsmount *mnt,
59515+ const int unsafe_flags);
59516+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
59517+ const struct vfsmount *mnt);
59518+__u32 gr_acl_handle_open(const struct dentry *dentry,
59519+ const struct vfsmount *mnt, int acc_mode);
59520+__u32 gr_acl_handle_creat(const struct dentry *dentry,
59521+ const struct dentry *p_dentry,
59522+ const struct vfsmount *p_mnt,
59523+ int open_flags, int acc_mode, const int imode);
59524+void gr_handle_create(const struct dentry *dentry,
59525+ const struct vfsmount *mnt);
59526+void gr_handle_proc_create(const struct dentry *dentry,
59527+ const struct inode *inode);
59528+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
59529+ const struct dentry *parent_dentry,
59530+ const struct vfsmount *parent_mnt,
59531+ const int mode);
59532+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
59533+ const struct dentry *parent_dentry,
59534+ const struct vfsmount *parent_mnt);
59535+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
59536+ const struct vfsmount *mnt);
59537+void gr_handle_delete(const ino_t ino, const dev_t dev);
59538+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
59539+ const struct vfsmount *mnt);
59540+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
59541+ const struct dentry *parent_dentry,
59542+ const struct vfsmount *parent_mnt,
59543+ const char *from);
59544+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
59545+ const struct dentry *parent_dentry,
59546+ const struct vfsmount *parent_mnt,
59547+ const struct dentry *old_dentry,
59548+ const struct vfsmount *old_mnt, const char *to);
59549+int gr_acl_handle_rename(struct dentry *new_dentry,
59550+ struct dentry *parent_dentry,
59551+ const struct vfsmount *parent_mnt,
59552+ struct dentry *old_dentry,
59553+ struct inode *old_parent_inode,
59554+ struct vfsmount *old_mnt, const char *newname);
59555+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59556+ struct dentry *old_dentry,
59557+ struct dentry *new_dentry,
59558+ struct vfsmount *mnt, const __u8 replace);
59559+__u32 gr_check_link(const struct dentry *new_dentry,
59560+ const struct dentry *parent_dentry,
59561+ const struct vfsmount *parent_mnt,
59562+ const struct dentry *old_dentry,
59563+ const struct vfsmount *old_mnt);
59564+int gr_acl_handle_filldir(const struct file *file, const char *name,
59565+ const unsigned int namelen, const ino_t ino);
59566+
59567+__u32 gr_acl_handle_unix(const struct dentry *dentry,
59568+ const struct vfsmount *mnt);
59569+void gr_acl_handle_exit(void);
59570+void gr_acl_handle_psacct(struct task_struct *task, const long code);
59571+int gr_acl_handle_procpidmem(const struct task_struct *task);
59572+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
59573+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
59574+void gr_audit_ptrace(struct task_struct *task);
59575+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
59576+
59577+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
59578+
59579+#ifdef CONFIG_GRKERNSEC
59580+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
59581+void gr_handle_vm86(void);
59582+void gr_handle_mem_readwrite(u64 from, u64 to);
59583+
59584+void gr_log_badprocpid(const char *entry);
59585+
59586+extern int grsec_enable_dmesg;
59587+extern int grsec_disable_privio;
59588+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59589+extern int grsec_enable_chroot_findtask;
59590+#endif
59591+#ifdef CONFIG_GRKERNSEC_SETXID
59592+extern int grsec_enable_setxid;
59593+#endif
59594+#endif
59595+
59596+#endif
59597diff --git a/include/linux/grsock.h b/include/linux/grsock.h
59598new file mode 100644
59599index 0000000..e7ffaaf
59600--- /dev/null
59601+++ b/include/linux/grsock.h
59602@@ -0,0 +1,19 @@
59603+#ifndef __GRSOCK_H
59604+#define __GRSOCK_H
59605+
59606+extern void gr_attach_curr_ip(const struct sock *sk);
59607+extern int gr_handle_sock_all(const int family, const int type,
59608+ const int protocol);
59609+extern int gr_handle_sock_server(const struct sockaddr *sck);
59610+extern int gr_handle_sock_server_other(const struct sock *sck);
59611+extern int gr_handle_sock_client(const struct sockaddr *sck);
59612+extern int gr_search_connect(struct socket * sock,
59613+ struct sockaddr_in * addr);
59614+extern int gr_search_bind(struct socket * sock,
59615+ struct sockaddr_in * addr);
59616+extern int gr_search_listen(struct socket * sock);
59617+extern int gr_search_accept(struct socket * sock);
59618+extern int gr_search_socket(const int domain, const int type,
59619+ const int protocol);
59620+
59621+#endif
59622diff --git a/include/linux/hid.h b/include/linux/hid.h
59623index c235e4e..f0cf7a0 100644
59624--- a/include/linux/hid.h
59625+++ b/include/linux/hid.h
59626@@ -679,7 +679,7 @@ struct hid_ll_driver {
59627 unsigned int code, int value);
59628
59629 int (*parse)(struct hid_device *hdev);
59630-};
59631+} __no_const;
59632
59633 #define PM_HINT_FULLON 1<<5
59634 #define PM_HINT_NORMAL 1<<1
59635diff --git a/include/linux/highmem.h b/include/linux/highmem.h
59636index 3a93f73..b19d0b3 100644
59637--- a/include/linux/highmem.h
59638+++ b/include/linux/highmem.h
59639@@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
59640 kunmap_atomic(kaddr, KM_USER0);
59641 }
59642
59643+static inline void sanitize_highpage(struct page *page)
59644+{
59645+ void *kaddr;
59646+ unsigned long flags;
59647+
59648+ local_irq_save(flags);
59649+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
59650+ clear_page(kaddr);
59651+ kunmap_atomic(kaddr, KM_CLEARPAGE);
59652+ local_irq_restore(flags);
59653+}
59654+
59655 static inline void zero_user_segments(struct page *page,
59656 unsigned start1, unsigned end1,
59657 unsigned start2, unsigned end2)
59658diff --git a/include/linux/i2c.h b/include/linux/i2c.h
59659index 07d103a..04ec65b 100644
59660--- a/include/linux/i2c.h
59661+++ b/include/linux/i2c.h
59662@@ -364,6 +364,7 @@ struct i2c_algorithm {
59663 /* To determine what the adapter supports */
59664 u32 (*functionality) (struct i2c_adapter *);
59665 };
59666+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
59667
59668 /*
59669 * i2c_adapter is the structure used to identify a physical i2c bus along
59670diff --git a/include/linux/i2o.h b/include/linux/i2o.h
59671index a6deef4..c56a7f2 100644
59672--- a/include/linux/i2o.h
59673+++ b/include/linux/i2o.h
59674@@ -564,7 +564,7 @@ struct i2o_controller {
59675 struct i2o_device *exec; /* Executive */
59676 #if BITS_PER_LONG == 64
59677 spinlock_t context_list_lock; /* lock for context_list */
59678- atomic_t context_list_counter; /* needed for unique contexts */
59679+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
59680 struct list_head context_list; /* list of context id's
59681 and pointers */
59682 #endif
59683diff --git a/include/linux/init.h b/include/linux/init.h
59684index 9146f39..885354d 100644
59685--- a/include/linux/init.h
59686+++ b/include/linux/init.h
59687@@ -293,13 +293,13 @@ void __init parse_early_options(char *cmdline);
59688
59689 /* Each module must use one module_init(). */
59690 #define module_init(initfn) \
59691- static inline initcall_t __inittest(void) \
59692+ static inline __used initcall_t __inittest(void) \
59693 { return initfn; } \
59694 int init_module(void) __attribute__((alias(#initfn)));
59695
59696 /* This is only required if you want to be unloadable. */
59697 #define module_exit(exitfn) \
59698- static inline exitcall_t __exittest(void) \
59699+ static inline __used exitcall_t __exittest(void) \
59700 { return exitfn; } \
59701 void cleanup_module(void) __attribute__((alias(#exitfn)));
59702
59703diff --git a/include/linux/init_task.h b/include/linux/init_task.h
59704index 32574ee..00d4ef1 100644
59705--- a/include/linux/init_task.h
59706+++ b/include/linux/init_task.h
59707@@ -128,6 +128,12 @@ extern struct cred init_cred;
59708
59709 #define INIT_TASK_COMM "swapper"
59710
59711+#ifdef CONFIG_X86
59712+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
59713+#else
59714+#define INIT_TASK_THREAD_INFO
59715+#endif
59716+
59717 /*
59718 * INIT_TASK is used to set up the first task table, touch at
59719 * your own risk!. Base=0, limit=0x1fffff (=2MB)
59720@@ -166,6 +172,7 @@ extern struct cred init_cred;
59721 RCU_INIT_POINTER(.cred, &init_cred), \
59722 .comm = INIT_TASK_COMM, \
59723 .thread = INIT_THREAD, \
59724+ INIT_TASK_THREAD_INFO \
59725 .fs = &init_fs, \
59726 .files = &init_files, \
59727 .signal = &init_signals, \
59728diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
59729index e6ca56d..8583707 100644
59730--- a/include/linux/intel-iommu.h
59731+++ b/include/linux/intel-iommu.h
59732@@ -296,7 +296,7 @@ struct iommu_flush {
59733 u8 fm, u64 type);
59734 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59735 unsigned int size_order, u64 type);
59736-};
59737+} __no_const;
59738
59739 enum {
59740 SR_DMAR_FECTL_REG,
59741diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
59742index a64b00e..464d8bc 100644
59743--- a/include/linux/interrupt.h
59744+++ b/include/linux/interrupt.h
59745@@ -441,7 +441,7 @@ enum
59746 /* map softirq index to softirq name. update 'softirq_to_name' in
59747 * kernel/softirq.c when adding a new softirq.
59748 */
59749-extern char *softirq_to_name[NR_SOFTIRQS];
59750+extern const char * const softirq_to_name[NR_SOFTIRQS];
59751
59752 /* softirq mask and active fields moved to irq_cpustat_t in
59753 * asm/hardirq.h to get better cache usage. KAO
59754@@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
59755
59756 struct softirq_action
59757 {
59758- void (*action)(struct softirq_action *);
59759+ void (*action)(void);
59760 };
59761
59762 asmlinkage void do_softirq(void);
59763 asmlinkage void __do_softirq(void);
59764-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59765+extern void open_softirq(int nr, void (*action)(void));
59766 extern void softirq_init(void);
59767 static inline void __raise_softirq_irqoff(unsigned int nr)
59768 {
59769diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
59770index 3875719..4cd454c 100644
59771--- a/include/linux/kallsyms.h
59772+++ b/include/linux/kallsyms.h
59773@@ -15,7 +15,8 @@
59774
59775 struct module;
59776
59777-#ifdef CONFIG_KALLSYMS
59778+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59779+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59780 /* Lookup the address for a symbol. Returns 0 if not found. */
59781 unsigned long kallsyms_lookup_name(const char *name);
59782
59783@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
59784 /* Stupid that this does nothing, but I didn't create this mess. */
59785 #define __print_symbol(fmt, addr)
59786 #endif /*CONFIG_KALLSYMS*/
59787+#else /* when included by kallsyms.c, vsnprintf.c, or
59788+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59789+extern void __print_symbol(const char *fmt, unsigned long address);
59790+extern int sprint_backtrace(char *buffer, unsigned long address);
59791+extern int sprint_symbol(char *buffer, unsigned long address);
59792+const char *kallsyms_lookup(unsigned long addr,
59793+ unsigned long *symbolsize,
59794+ unsigned long *offset,
59795+ char **modname, char *namebuf);
59796+#endif
59797
59798 /* This macro allows us to keep printk typechecking */
59799 static __printf(1, 2)
59800diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
59801index fa39183..40160be 100644
59802--- a/include/linux/kgdb.h
59803+++ b/include/linux/kgdb.h
59804@@ -53,7 +53,7 @@ extern int kgdb_connected;
59805 extern int kgdb_io_module_registered;
59806
59807 extern atomic_t kgdb_setting_breakpoint;
59808-extern atomic_t kgdb_cpu_doing_single_step;
59809+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59810
59811 extern struct task_struct *kgdb_usethread;
59812 extern struct task_struct *kgdb_contthread;
59813@@ -251,7 +251,7 @@ struct kgdb_arch {
59814 void (*disable_hw_break)(struct pt_regs *regs);
59815 void (*remove_all_hw_break)(void);
59816 void (*correct_hw_break)(void);
59817-};
59818+} __do_const;
59819
59820 /**
59821 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
59822@@ -276,7 +276,7 @@ struct kgdb_io {
59823 void (*pre_exception) (void);
59824 void (*post_exception) (void);
59825 int is_console;
59826-};
59827+} __do_const;
59828
59829 extern struct kgdb_arch arch_kgdb_ops;
59830
59831diff --git a/include/linux/kmod.h b/include/linux/kmod.h
59832index b16f653..eb908f4 100644
59833--- a/include/linux/kmod.h
59834+++ b/include/linux/kmod.h
59835@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
59836 * usually useless though. */
59837 extern __printf(2, 3)
59838 int __request_module(bool wait, const char *name, ...);
59839+extern __printf(3, 4)
59840+int ___request_module(bool wait, char *param_name, const char *name, ...);
59841 #define request_module(mod...) __request_module(true, mod)
59842 #define request_module_nowait(mod...) __request_module(false, mod)
59843 #define try_then_request_module(x, mod...) \
59844diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
59845index d526231..086e89b 100644
59846--- a/include/linux/kvm_host.h
59847+++ b/include/linux/kvm_host.h
59848@@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
59849 void vcpu_load(struct kvm_vcpu *vcpu);
59850 void vcpu_put(struct kvm_vcpu *vcpu);
59851
59852-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59853+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59854 struct module *module);
59855 void kvm_exit(void);
59856
59857@@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
59858 struct kvm_guest_debug *dbg);
59859 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59860
59861-int kvm_arch_init(void *opaque);
59862+int kvm_arch_init(const void *opaque);
59863 void kvm_arch_exit(void);
59864
59865 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59866diff --git a/include/linux/libata.h b/include/linux/libata.h
59867index cafc09a..d7e7829 100644
59868--- a/include/linux/libata.h
59869+++ b/include/linux/libata.h
59870@@ -909,7 +909,7 @@ struct ata_port_operations {
59871 * fields must be pointers.
59872 */
59873 const struct ata_port_operations *inherits;
59874-};
59875+} __do_const;
59876
59877 struct ata_port_info {
59878 unsigned long flags;
59879diff --git a/include/linux/mca.h b/include/linux/mca.h
59880index 3797270..7765ede 100644
59881--- a/include/linux/mca.h
59882+++ b/include/linux/mca.h
59883@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59884 int region);
59885 void * (*mca_transform_memory)(struct mca_device *,
59886 void *memory);
59887-};
59888+} __no_const;
59889
59890 struct mca_bus {
59891 u64 default_dma_mask;
59892diff --git a/include/linux/memory.h b/include/linux/memory.h
59893index 935699b..11042cc 100644
59894--- a/include/linux/memory.h
59895+++ b/include/linux/memory.h
59896@@ -144,7 +144,7 @@ struct memory_accessor {
59897 size_t count);
59898 ssize_t (*write)(struct memory_accessor *, const char *buf,
59899 off_t offset, size_t count);
59900-};
59901+} __no_const;
59902
59903 /*
59904 * Kernel text modification mutex, used for code patching. Users of this lock
59905diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
59906index 9970337..9444122 100644
59907--- a/include/linux/mfd/abx500.h
59908+++ b/include/linux/mfd/abx500.h
59909@@ -188,6 +188,7 @@ struct abx500_ops {
59910 int (*event_registers_startup_state_get) (struct device *, u8 *);
59911 int (*startup_irq_enabled) (struct device *, unsigned int);
59912 };
59913+typedef struct abx500_ops __no_const abx500_ops_no_const;
59914
59915 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
59916 void abx500_remove_ops(struct device *dev);
59917diff --git a/include/linux/mm.h b/include/linux/mm.h
59918index 4baadd1..2e0b45e 100644
59919--- a/include/linux/mm.h
59920+++ b/include/linux/mm.h
59921@@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
59922
59923 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59924 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59925+
59926+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59927+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59928+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59929+#else
59930 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59931+#endif
59932+
59933 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59934 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59935
59936@@ -1012,34 +1019,6 @@ int set_page_dirty(struct page *page);
59937 int set_page_dirty_lock(struct page *page);
59938 int clear_page_dirty_for_io(struct page *page);
59939
59940-/* Is the vma a continuation of the stack vma above it? */
59941-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
59942-{
59943- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59944-}
59945-
59946-static inline int stack_guard_page_start(struct vm_area_struct *vma,
59947- unsigned long addr)
59948-{
59949- return (vma->vm_flags & VM_GROWSDOWN) &&
59950- (vma->vm_start == addr) &&
59951- !vma_growsdown(vma->vm_prev, addr);
59952-}
59953-
59954-/* Is the vma a continuation of the stack vma below it? */
59955-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
59956-{
59957- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
59958-}
59959-
59960-static inline int stack_guard_page_end(struct vm_area_struct *vma,
59961- unsigned long addr)
59962-{
59963- return (vma->vm_flags & VM_GROWSUP) &&
59964- (vma->vm_end == addr) &&
59965- !vma_growsup(vma->vm_next, addr);
59966-}
59967-
59968 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59969 unsigned long old_addr, struct vm_area_struct *new_vma,
59970 unsigned long new_addr, unsigned long len);
59971@@ -1134,6 +1113,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
59972 }
59973 #endif
59974
59975+#ifdef CONFIG_MMU
59976+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
59977+#else
59978+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
59979+{
59980+ return __pgprot(0);
59981+}
59982+#endif
59983+
59984 int vma_wants_writenotify(struct vm_area_struct *vma);
59985
59986 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
59987@@ -1419,6 +1407,7 @@ out:
59988 }
59989
59990 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59991+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59992
59993 extern unsigned long do_brk(unsigned long, unsigned long);
59994
59995@@ -1476,6 +1465,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
59996 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59997 struct vm_area_struct **pprev);
59998
59999+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
60000+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
60001+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
60002+
60003 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
60004 NULL if none. Assume start_addr < end_addr. */
60005 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
60006@@ -1492,15 +1485,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
60007 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
60008 }
60009
60010-#ifdef CONFIG_MMU
60011-pgprot_t vm_get_page_prot(unsigned long vm_flags);
60012-#else
60013-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
60014-{
60015- return __pgprot(0);
60016-}
60017-#endif
60018-
60019 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
60020 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
60021 unsigned long pfn, unsigned long size, pgprot_t);
60022@@ -1614,7 +1598,7 @@ extern int unpoison_memory(unsigned long pfn);
60023 extern int sysctl_memory_failure_early_kill;
60024 extern int sysctl_memory_failure_recovery;
60025 extern void shake_page(struct page *p, int access);
60026-extern atomic_long_t mce_bad_pages;
60027+extern atomic_long_unchecked_t mce_bad_pages;
60028 extern int soft_offline_page(struct page *page, int flags);
60029
60030 extern void dump_page(struct page *page);
60031@@ -1628,5 +1612,11 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
60032 unsigned int pages_per_huge_page);
60033 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
60034
60035+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
60036+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
60037+#else
60038+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
60039+#endif
60040+
60041 #endif /* __KERNEL__ */
60042 #endif /* _LINUX_MM_H */
60043diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
60044index 5b42f1b..759e4b4 100644
60045--- a/include/linux/mm_types.h
60046+++ b/include/linux/mm_types.h
60047@@ -253,6 +253,8 @@ struct vm_area_struct {
60048 #ifdef CONFIG_NUMA
60049 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
60050 #endif
60051+
60052+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
60053 };
60054
60055 struct core_thread {
60056@@ -389,6 +391,24 @@ struct mm_struct {
60057 #ifdef CONFIG_CPUMASK_OFFSTACK
60058 struct cpumask cpumask_allocation;
60059 #endif
60060+
60061+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60062+ unsigned long pax_flags;
60063+#endif
60064+
60065+#ifdef CONFIG_PAX_DLRESOLVE
60066+ unsigned long call_dl_resolve;
60067+#endif
60068+
60069+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
60070+ unsigned long call_syscall;
60071+#endif
60072+
60073+#ifdef CONFIG_PAX_ASLR
60074+ unsigned long delta_mmap; /* randomized offset */
60075+ unsigned long delta_stack; /* randomized offset */
60076+#endif
60077+
60078 };
60079
60080 static inline void mm_init_cpumask(struct mm_struct *mm)
60081diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
60082index 1d1b1e1..2a13c78 100644
60083--- a/include/linux/mmu_notifier.h
60084+++ b/include/linux/mmu_notifier.h
60085@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
60086 */
60087 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
60088 ({ \
60089- pte_t __pte; \
60090+ pte_t ___pte; \
60091 struct vm_area_struct *___vma = __vma; \
60092 unsigned long ___address = __address; \
60093- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
60094+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
60095 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
60096- __pte; \
60097+ ___pte; \
60098 })
60099
60100 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
60101diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
60102index 188cb2f..d78409b 100644
60103--- a/include/linux/mmzone.h
60104+++ b/include/linux/mmzone.h
60105@@ -369,7 +369,7 @@ struct zone {
60106 unsigned long flags; /* zone flags, see below */
60107
60108 /* Zone statistics */
60109- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60110+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60111
60112 /*
60113 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
60114diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
60115index 468819c..17b9db3 100644
60116--- a/include/linux/mod_devicetable.h
60117+++ b/include/linux/mod_devicetable.h
60118@@ -12,7 +12,7 @@
60119 typedef unsigned long kernel_ulong_t;
60120 #endif
60121
60122-#define PCI_ANY_ID (~0)
60123+#define PCI_ANY_ID ((__u16)~0)
60124
60125 struct pci_device_id {
60126 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
60127@@ -131,7 +131,7 @@ struct usb_device_id {
60128 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
60129 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
60130
60131-#define HID_ANY_ID (~0)
60132+#define HID_ANY_ID (~0U)
60133
60134 struct hid_device_id {
60135 __u16 bus;
60136diff --git a/include/linux/module.h b/include/linux/module.h
60137index 3cb7839..511cb87 100644
60138--- a/include/linux/module.h
60139+++ b/include/linux/module.h
60140@@ -17,6 +17,7 @@
60141 #include <linux/moduleparam.h>
60142 #include <linux/tracepoint.h>
60143 #include <linux/export.h>
60144+#include <linux/fs.h>
60145
60146 #include <linux/percpu.h>
60147 #include <asm/module.h>
60148@@ -261,19 +262,16 @@ struct module
60149 int (*init)(void);
60150
60151 /* If this is non-NULL, vfree after init() returns */
60152- void *module_init;
60153+ void *module_init_rx, *module_init_rw;
60154
60155 /* Here is the actual code + data, vfree'd on unload. */
60156- void *module_core;
60157+ void *module_core_rx, *module_core_rw;
60158
60159 /* Here are the sizes of the init and core sections */
60160- unsigned int init_size, core_size;
60161+ unsigned int init_size_rw, core_size_rw;
60162
60163 /* The size of the executable code in each section. */
60164- unsigned int init_text_size, core_text_size;
60165-
60166- /* Size of RO sections of the module (text+rodata) */
60167- unsigned int init_ro_size, core_ro_size;
60168+ unsigned int init_size_rx, core_size_rx;
60169
60170 /* Arch-specific module values */
60171 struct mod_arch_specific arch;
60172@@ -329,6 +327,10 @@ struct module
60173 #ifdef CONFIG_EVENT_TRACING
60174 struct ftrace_event_call **trace_events;
60175 unsigned int num_trace_events;
60176+ struct file_operations trace_id;
60177+ struct file_operations trace_enable;
60178+ struct file_operations trace_format;
60179+ struct file_operations trace_filter;
60180 #endif
60181 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
60182 unsigned int num_ftrace_callsites;
60183@@ -379,16 +381,46 @@ bool is_module_address(unsigned long addr);
60184 bool is_module_percpu_address(unsigned long addr);
60185 bool is_module_text_address(unsigned long addr);
60186
60187+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
60188+{
60189+
60190+#ifdef CONFIG_PAX_KERNEXEC
60191+ if (ktla_ktva(addr) >= (unsigned long)start &&
60192+ ktla_ktva(addr) < (unsigned long)start + size)
60193+ return 1;
60194+#endif
60195+
60196+ return ((void *)addr >= start && (void *)addr < start + size);
60197+}
60198+
60199+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
60200+{
60201+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
60202+}
60203+
60204+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
60205+{
60206+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
60207+}
60208+
60209+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
60210+{
60211+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
60212+}
60213+
60214+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
60215+{
60216+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
60217+}
60218+
60219 static inline int within_module_core(unsigned long addr, struct module *mod)
60220 {
60221- return (unsigned long)mod->module_core <= addr &&
60222- addr < (unsigned long)mod->module_core + mod->core_size;
60223+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
60224 }
60225
60226 static inline int within_module_init(unsigned long addr, struct module *mod)
60227 {
60228- return (unsigned long)mod->module_init <= addr &&
60229- addr < (unsigned long)mod->module_init + mod->init_size;
60230+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
60231 }
60232
60233 /* Search for module by name: must hold module_mutex. */
60234diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
60235index b2be02e..6a9fdb1 100644
60236--- a/include/linux/moduleloader.h
60237+++ b/include/linux/moduleloader.h
60238@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
60239 sections. Returns NULL on failure. */
60240 void *module_alloc(unsigned long size);
60241
60242+#ifdef CONFIG_PAX_KERNEXEC
60243+void *module_alloc_exec(unsigned long size);
60244+#else
60245+#define module_alloc_exec(x) module_alloc(x)
60246+#endif
60247+
60248 /* Free memory returned from module_alloc. */
60249 void module_free(struct module *mod, void *module_region);
60250
60251+#ifdef CONFIG_PAX_KERNEXEC
60252+void module_free_exec(struct module *mod, void *module_region);
60253+#else
60254+#define module_free_exec(x, y) module_free((x), (y))
60255+#endif
60256+
60257 /* Apply the given relocation to the (simplified) ELF. Return -error
60258 or 0. */
60259 int apply_relocate(Elf_Shdr *sechdrs,
60260diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
60261index 7939f63..ec6df57 100644
60262--- a/include/linux/moduleparam.h
60263+++ b/include/linux/moduleparam.h
60264@@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
60265 * @len is usually just sizeof(string).
60266 */
60267 #define module_param_string(name, string, len, perm) \
60268- static const struct kparam_string __param_string_##name \
60269+ static const struct kparam_string __param_string_##name __used \
60270 = { len, string }; \
60271 __module_param_call(MODULE_PARAM_PREFIX, name, \
60272 &param_ops_string, \
60273@@ -395,7 +395,7 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
60274 * module_param_named() for why this might be necessary.
60275 */
60276 #define module_param_array_named(name, array, type, nump, perm) \
60277- static const struct kparam_array __param_arr_##name \
60278+ static const struct kparam_array __param_arr_##name __used \
60279 = { .max = ARRAY_SIZE(array), .num = nump, \
60280 .ops = &param_ops_##type, \
60281 .elemsize = sizeof(array[0]), .elem = array }; \
60282diff --git a/include/linux/namei.h b/include/linux/namei.h
60283index ffc0213..2c1f2cb 100644
60284--- a/include/linux/namei.h
60285+++ b/include/linux/namei.h
60286@@ -24,7 +24,7 @@ struct nameidata {
60287 unsigned seq;
60288 int last_type;
60289 unsigned depth;
60290- char *saved_names[MAX_NESTED_LINKS + 1];
60291+ const char *saved_names[MAX_NESTED_LINKS + 1];
60292
60293 /* Intent data */
60294 union {
60295@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
60296 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
60297 extern void unlock_rename(struct dentry *, struct dentry *);
60298
60299-static inline void nd_set_link(struct nameidata *nd, char *path)
60300+static inline void nd_set_link(struct nameidata *nd, const char *path)
60301 {
60302 nd->saved_names[nd->depth] = path;
60303 }
60304
60305-static inline char *nd_get_link(struct nameidata *nd)
60306+static inline const char *nd_get_link(const struct nameidata *nd)
60307 {
60308 return nd->saved_names[nd->depth];
60309 }
60310diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
60311index a82ad4d..90d15b7 100644
60312--- a/include/linux/netdevice.h
60313+++ b/include/linux/netdevice.h
60314@@ -949,6 +949,7 @@ struct net_device_ops {
60315 int (*ndo_set_features)(struct net_device *dev,
60316 u32 features);
60317 };
60318+typedef struct net_device_ops __no_const net_device_ops_no_const;
60319
60320 /*
60321 * The DEVICE structure.
60322@@ -1088,7 +1089,7 @@ struct net_device {
60323 int iflink;
60324
60325 struct net_device_stats stats;
60326- atomic_long_t rx_dropped; /* dropped packets by core network
60327+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
60328 * Do not use this in drivers.
60329 */
60330
60331diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
60332new file mode 100644
60333index 0000000..33f4af8
60334--- /dev/null
60335+++ b/include/linux/netfilter/xt_gradm.h
60336@@ -0,0 +1,9 @@
60337+#ifndef _LINUX_NETFILTER_XT_GRADM_H
60338+#define _LINUX_NETFILTER_XT_GRADM_H 1
60339+
60340+struct xt_gradm_mtinfo {
60341+ __u16 flags;
60342+ __u16 invflags;
60343+};
60344+
60345+#endif
60346diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
60347index c65a18a..0c05f3a 100644
60348--- a/include/linux/of_pdt.h
60349+++ b/include/linux/of_pdt.h
60350@@ -32,7 +32,7 @@ struct of_pdt_ops {
60351
60352 /* return 0 on success; fill in 'len' with number of bytes in path */
60353 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
60354-};
60355+} __no_const;
60356
60357 extern void *prom_early_alloc(unsigned long size);
60358
60359diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
60360index a4c5624..79d6d88 100644
60361--- a/include/linux/oprofile.h
60362+++ b/include/linux/oprofile.h
60363@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
60364 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
60365 char const * name, ulong * val);
60366
60367-/** Create a file for read-only access to an atomic_t. */
60368+/** Create a file for read-only access to an atomic_unchecked_t. */
60369 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
60370- char const * name, atomic_t * val);
60371+ char const * name, atomic_unchecked_t * val);
60372
60373 /** create a directory */
60374 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
60375diff --git a/include/linux/padata.h b/include/linux/padata.h
60376index 4633b2f..988bc08 100644
60377--- a/include/linux/padata.h
60378+++ b/include/linux/padata.h
60379@@ -129,7 +129,7 @@ struct parallel_data {
60380 struct padata_instance *pinst;
60381 struct padata_parallel_queue __percpu *pqueue;
60382 struct padata_serial_queue __percpu *squeue;
60383- atomic_t seq_nr;
60384+ atomic_unchecked_t seq_nr;
60385 atomic_t reorder_objects;
60386 atomic_t refcnt;
60387 unsigned int max_seq_nr;
60388diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
60389index b1f8912..c955bff 100644
60390--- a/include/linux/perf_event.h
60391+++ b/include/linux/perf_event.h
60392@@ -748,8 +748,8 @@ struct perf_event {
60393
60394 enum perf_event_active_state state;
60395 unsigned int attach_state;
60396- local64_t count;
60397- atomic64_t child_count;
60398+ local64_t count; /* PaX: fix it one day */
60399+ atomic64_unchecked_t child_count;
60400
60401 /*
60402 * These are the total time in nanoseconds that the event
60403@@ -800,8 +800,8 @@ struct perf_event {
60404 * These accumulate total time (in nanoseconds) that children
60405 * events have been enabled and running, respectively.
60406 */
60407- atomic64_t child_total_time_enabled;
60408- atomic64_t child_total_time_running;
60409+ atomic64_unchecked_t child_total_time_enabled;
60410+ atomic64_unchecked_t child_total_time_running;
60411
60412 /*
60413 * Protect attach/detach and child_list:
60414diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
60415index 77257c9..51d473a 100644
60416--- a/include/linux/pipe_fs_i.h
60417+++ b/include/linux/pipe_fs_i.h
60418@@ -46,9 +46,9 @@ struct pipe_buffer {
60419 struct pipe_inode_info {
60420 wait_queue_head_t wait;
60421 unsigned int nrbufs, curbuf, buffers;
60422- unsigned int readers;
60423- unsigned int writers;
60424- unsigned int waiting_writers;
60425+ atomic_t readers;
60426+ atomic_t writers;
60427+ atomic_t waiting_writers;
60428 unsigned int r_counter;
60429 unsigned int w_counter;
60430 struct page *tmp_page;
60431diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
60432index d3085e7..fd01052 100644
60433--- a/include/linux/pm_runtime.h
60434+++ b/include/linux/pm_runtime.h
60435@@ -95,7 +95,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
60436
60437 static inline void pm_runtime_mark_last_busy(struct device *dev)
60438 {
60439- ACCESS_ONCE(dev->power.last_busy) = jiffies;
60440+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
60441 }
60442
60443 #else /* !CONFIG_PM_RUNTIME */
60444diff --git a/include/linux/poison.h b/include/linux/poison.h
60445index 79159de..f1233a9 100644
60446--- a/include/linux/poison.h
60447+++ b/include/linux/poison.h
60448@@ -19,8 +19,8 @@
60449 * under normal circumstances, used to verify that nobody uses
60450 * non-initialized list entries.
60451 */
60452-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
60453-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
60454+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
60455+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
60456
60457 /********** include/linux/timer.h **********/
60458 /*
60459diff --git a/include/linux/preempt.h b/include/linux/preempt.h
60460index 58969b2..ead129b 100644
60461--- a/include/linux/preempt.h
60462+++ b/include/linux/preempt.h
60463@@ -123,7 +123,7 @@ struct preempt_ops {
60464 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
60465 void (*sched_out)(struct preempt_notifier *notifier,
60466 struct task_struct *next);
60467-};
60468+} __no_const;
60469
60470 /**
60471 * preempt_notifier - key for installing preemption notifiers
60472diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
60473index 643b96c..ef55a9c 100644
60474--- a/include/linux/proc_fs.h
60475+++ b/include/linux/proc_fs.h
60476@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
60477 return proc_create_data(name, mode, parent, proc_fops, NULL);
60478 }
60479
60480+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
60481+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
60482+{
60483+#ifdef CONFIG_GRKERNSEC_PROC_USER
60484+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
60485+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60486+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
60487+#else
60488+ return proc_create_data(name, mode, parent, proc_fops, NULL);
60489+#endif
60490+}
60491+
60492+
60493 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
60494 mode_t mode, struct proc_dir_entry *base,
60495 read_proc_t *read_proc, void * data)
60496@@ -258,7 +271,7 @@ union proc_op {
60497 int (*proc_show)(struct seq_file *m,
60498 struct pid_namespace *ns, struct pid *pid,
60499 struct task_struct *task);
60500-};
60501+} __no_const;
60502
60503 struct ctl_table_header;
60504 struct ctl_table;
60505diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
60506index 800f113..e9ee2e3 100644
60507--- a/include/linux/ptrace.h
60508+++ b/include/linux/ptrace.h
60509@@ -129,10 +129,12 @@ extern void __ptrace_unlink(struct task_struct *child);
60510 extern void exit_ptrace(struct task_struct *tracer);
60511 #define PTRACE_MODE_READ 1
60512 #define PTRACE_MODE_ATTACH 2
60513-/* Returns 0 on success, -errno on denial. */
60514-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
60515 /* Returns true on success, false on denial. */
60516 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
60517+/* Returns true on success, false on denial. */
60518+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
60519+/* Returns true on success, false on denial. */
60520+extern bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode);
60521
60522 static inline int ptrace_reparented(struct task_struct *child)
60523 {
60524diff --git a/include/linux/random.h b/include/linux/random.h
60525index 8f74538..02a1012 100644
60526--- a/include/linux/random.h
60527+++ b/include/linux/random.h
60528@@ -69,12 +69,17 @@ void srandom32(u32 seed);
60529
60530 u32 prandom32(struct rnd_state *);
60531
60532+static inline unsigned long pax_get_random_long(void)
60533+{
60534+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
60535+}
60536+
60537 /*
60538 * Handle minimum values for seeds
60539 */
60540 static inline u32 __seed(u32 x, u32 m)
60541 {
60542- return (x < m) ? x + m : x;
60543+ return (x <= m) ? x + m + 1 : x;
60544 }
60545
60546 /**
60547diff --git a/include/linux/reboot.h b/include/linux/reboot.h
60548index e0879a7..a12f962 100644
60549--- a/include/linux/reboot.h
60550+++ b/include/linux/reboot.h
60551@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
60552 * Architecture-specific implementations of sys_reboot commands.
60553 */
60554
60555-extern void machine_restart(char *cmd);
60556-extern void machine_halt(void);
60557-extern void machine_power_off(void);
60558+extern void machine_restart(char *cmd) __noreturn;
60559+extern void machine_halt(void) __noreturn;
60560+extern void machine_power_off(void) __noreturn;
60561
60562 extern void machine_shutdown(void);
60563 struct pt_regs;
60564@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
60565 */
60566
60567 extern void kernel_restart_prepare(char *cmd);
60568-extern void kernel_restart(char *cmd);
60569-extern void kernel_halt(void);
60570-extern void kernel_power_off(void);
60571+extern void kernel_restart(char *cmd) __noreturn;
60572+extern void kernel_halt(void) __noreturn;
60573+extern void kernel_power_off(void) __noreturn;
60574
60575 extern int C_A_D; /* for sysctl */
60576 void ctrl_alt_del(void);
60577@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
60578 * Emergency restart, callable from an interrupt handler.
60579 */
60580
60581-extern void emergency_restart(void);
60582+extern void emergency_restart(void) __noreturn;
60583 #include <asm/emergency-restart.h>
60584
60585 #endif
60586diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
60587index 96d465f..b084e05 100644
60588--- a/include/linux/reiserfs_fs.h
60589+++ b/include/linux/reiserfs_fs.h
60590@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
60591 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60592
60593 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60594-#define get_generation(s) atomic_read (&fs_generation(s))
60595+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60596 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60597 #define __fs_changed(gen,s) (gen != get_generation (s))
60598 #define fs_changed(gen,s) \
60599diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
60600index 52c83b6..18ed7eb 100644
60601--- a/include/linux/reiserfs_fs_sb.h
60602+++ b/include/linux/reiserfs_fs_sb.h
60603@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
60604 /* Comment? -Hans */
60605 wait_queue_head_t s_wait;
60606 /* To be obsoleted soon by per buffer seals.. -Hans */
60607- atomic_t s_generation_counter; // increased by one every time the
60608+ atomic_unchecked_t s_generation_counter; // increased by one every time the
60609 // tree gets re-balanced
60610 unsigned long s_properties; /* File system properties. Currently holds
60611 on-disk FS format */
60612diff --git a/include/linux/relay.h b/include/linux/relay.h
60613index 14a86bc..17d0700 100644
60614--- a/include/linux/relay.h
60615+++ b/include/linux/relay.h
60616@@ -159,7 +159,7 @@ struct rchan_callbacks
60617 * The callback should return 0 if successful, negative if not.
60618 */
60619 int (*remove_buf_file)(struct dentry *dentry);
60620-};
60621+} __no_const;
60622
60623 /*
60624 * CONFIG_RELAY kernel API, kernel/relay.c
60625diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
60626index c6c6084..5bf1212 100644
60627--- a/include/linux/rfkill.h
60628+++ b/include/linux/rfkill.h
60629@@ -147,6 +147,7 @@ struct rfkill_ops {
60630 void (*query)(struct rfkill *rfkill, void *data);
60631 int (*set_block)(void *data, bool blocked);
60632 };
60633+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
60634
60635 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
60636 /**
60637diff --git a/include/linux/rio.h b/include/linux/rio.h
60638index 4d50611..c6858a2 100644
60639--- a/include/linux/rio.h
60640+++ b/include/linux/rio.h
60641@@ -315,7 +315,7 @@ struct rio_ops {
60642 int mbox, void *buffer, size_t len);
60643 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
60644 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
60645-};
60646+} __no_const;
60647
60648 #define RIO_RESOURCE_MEM 0x00000100
60649 #define RIO_RESOURCE_DOORBELL 0x00000200
60650diff --git a/include/linux/rmap.h b/include/linux/rmap.h
60651index 2148b12..519b820 100644
60652--- a/include/linux/rmap.h
60653+++ b/include/linux/rmap.h
60654@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
60655 void anon_vma_init(void); /* create anon_vma_cachep */
60656 int anon_vma_prepare(struct vm_area_struct *);
60657 void unlink_anon_vmas(struct vm_area_struct *);
60658-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
60659-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
60660+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
60661+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
60662 void __anon_vma_link(struct vm_area_struct *);
60663
60664 static inline void anon_vma_merge(struct vm_area_struct *vma,
60665diff --git a/include/linux/sched.h b/include/linux/sched.h
60666index 1c4f3e9..b4e4851 100644
60667--- a/include/linux/sched.h
60668+++ b/include/linux/sched.h
60669@@ -101,6 +101,7 @@ struct bio_list;
60670 struct fs_struct;
60671 struct perf_event_context;
60672 struct blk_plug;
60673+struct linux_binprm;
60674
60675 /*
60676 * List of flags we want to share for kernel threads,
60677@@ -380,10 +381,13 @@ struct user_namespace;
60678 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60679
60680 extern int sysctl_max_map_count;
60681+extern unsigned long sysctl_heap_stack_gap;
60682
60683 #include <linux/aio.h>
60684
60685 #ifdef CONFIG_MMU
60686+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60687+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60688 extern void arch_pick_mmap_layout(struct mm_struct *mm);
60689 extern unsigned long
60690 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60691@@ -629,6 +633,17 @@ struct signal_struct {
60692 #ifdef CONFIG_TASKSTATS
60693 struct taskstats *stats;
60694 #endif
60695+
60696+#ifdef CONFIG_GRKERNSEC
60697+ u32 curr_ip;
60698+ u32 saved_ip;
60699+ u32 gr_saddr;
60700+ u32 gr_daddr;
60701+ u16 gr_sport;
60702+ u16 gr_dport;
60703+ u8 used_accept:1;
60704+#endif
60705+
60706 #ifdef CONFIG_AUDIT
60707 unsigned audit_tty;
60708 struct tty_audit_buf *tty_audit_buf;
60709@@ -710,6 +725,11 @@ struct user_struct {
60710 struct key *session_keyring; /* UID's default session keyring */
60711 #endif
60712
60713+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60714+ unsigned int banned;
60715+ unsigned long ban_expires;
60716+#endif
60717+
60718 /* Hash table maintenance information */
60719 struct hlist_node uidhash_node;
60720 uid_t uid;
60721@@ -1337,8 +1357,8 @@ struct task_struct {
60722 struct list_head thread_group;
60723
60724 struct completion *vfork_done; /* for vfork() */
60725- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60726- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60727+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60728+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60729
60730 cputime_t utime, stime, utimescaled, stimescaled;
60731 cputime_t gtime;
60732@@ -1354,13 +1374,6 @@ struct task_struct {
60733 struct task_cputime cputime_expires;
60734 struct list_head cpu_timers[3];
60735
60736-/* process credentials */
60737- const struct cred __rcu *real_cred; /* objective and real subjective task
60738- * credentials (COW) */
60739- const struct cred __rcu *cred; /* effective (overridable) subjective task
60740- * credentials (COW) */
60741- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60742-
60743 char comm[TASK_COMM_LEN]; /* executable name excluding path
60744 - access with [gs]et_task_comm (which lock
60745 it with task_lock())
60746@@ -1377,8 +1390,16 @@ struct task_struct {
60747 #endif
60748 /* CPU-specific state of this task */
60749 struct thread_struct thread;
60750+/* thread_info moved to task_struct */
60751+#ifdef CONFIG_X86
60752+ struct thread_info tinfo;
60753+#endif
60754 /* filesystem information */
60755 struct fs_struct *fs;
60756+
60757+ const struct cred __rcu *cred; /* effective (overridable) subjective task
60758+ * credentials (COW) */
60759+
60760 /* open file information */
60761 struct files_struct *files;
60762 /* namespaces */
60763@@ -1425,6 +1446,11 @@ struct task_struct {
60764 struct rt_mutex_waiter *pi_blocked_on;
60765 #endif
60766
60767+/* process credentials */
60768+ const struct cred __rcu *real_cred; /* objective and real subjective task
60769+ * credentials (COW) */
60770+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60771+
60772 #ifdef CONFIG_DEBUG_MUTEXES
60773 /* mutex deadlock detection */
60774 struct mutex_waiter *blocked_on;
60775@@ -1540,6 +1566,27 @@ struct task_struct {
60776 unsigned long default_timer_slack_ns;
60777
60778 struct list_head *scm_work_list;
60779+
60780+#ifdef CONFIG_GRKERNSEC
60781+ /* grsecurity */
60782+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60783+ u64 exec_id;
60784+#endif
60785+#ifdef CONFIG_GRKERNSEC_SETXID
60786+ const struct cred *delayed_cred;
60787+#endif
60788+ struct dentry *gr_chroot_dentry;
60789+ struct acl_subject_label *acl;
60790+ struct acl_role_label *role;
60791+ struct file *exec_file;
60792+ u16 acl_role_id;
60793+ /* is this the task that authenticated to the special role */
60794+ u8 acl_sp_role;
60795+ u8 is_writable;
60796+ u8 brute;
60797+ u8 gr_is_chrooted;
60798+#endif
60799+
60800 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60801 /* Index of current stored address in ret_stack */
60802 int curr_ret_stack;
60803@@ -1574,6 +1621,51 @@ struct task_struct {
60804 #endif
60805 };
60806
60807+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60808+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60809+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60810+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60811+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60812+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60813+
60814+#ifdef CONFIG_PAX_SOFTMODE
60815+extern int pax_softmode;
60816+#endif
60817+
60818+extern int pax_check_flags(unsigned long *);
60819+
60820+/* if tsk != current then task_lock must be held on it */
60821+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60822+static inline unsigned long pax_get_flags(struct task_struct *tsk)
60823+{
60824+ if (likely(tsk->mm))
60825+ return tsk->mm->pax_flags;
60826+ else
60827+ return 0UL;
60828+}
60829+
60830+/* if tsk != current then task_lock must be held on it */
60831+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60832+{
60833+ if (likely(tsk->mm)) {
60834+ tsk->mm->pax_flags = flags;
60835+ return 0;
60836+ }
60837+ return -EINVAL;
60838+}
60839+#endif
60840+
60841+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60842+extern void pax_set_initial_flags(struct linux_binprm *bprm);
60843+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60844+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60845+#endif
60846+
60847+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60848+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
60849+extern void pax_report_refcount_overflow(struct pt_regs *regs);
60850+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60851+
60852 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60853 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
60854
60855@@ -2081,7 +2173,9 @@ void yield(void);
60856 extern struct exec_domain default_exec_domain;
60857
60858 union thread_union {
60859+#ifndef CONFIG_X86
60860 struct thread_info thread_info;
60861+#endif
60862 unsigned long stack[THREAD_SIZE/sizeof(long)];
60863 };
60864
60865@@ -2114,6 +2208,7 @@ extern struct pid_namespace init_pid_ns;
60866 */
60867
60868 extern struct task_struct *find_task_by_vpid(pid_t nr);
60869+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60870 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60871 struct pid_namespace *ns);
60872
60873@@ -2235,6 +2330,12 @@ static inline void mmdrop(struct mm_struct * mm)
60874 extern void mmput(struct mm_struct *);
60875 /* Grab a reference to a task's mm, if it is not already going away */
60876 extern struct mm_struct *get_task_mm(struct task_struct *task);
60877+/*
60878+ * Grab a reference to a task's mm, if it is not already going away
60879+ * and ptrace_may_access with the mode parameter passed to it
60880+ * succeeds.
60881+ */
60882+extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
60883 /* Remove the current tasks stale references to the old mm_struct */
60884 extern void mm_release(struct task_struct *, struct mm_struct *);
60885 /* Allocate a new mm structure and copy contents from tsk->mm */
60886@@ -2251,7 +2352,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
60887 extern void exit_itimers(struct signal_struct *);
60888 extern void flush_itimer_signals(void);
60889
60890-extern NORET_TYPE void do_group_exit(int);
60891+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60892
60893 extern void daemonize(const char *, ...);
60894 extern int allow_signal(int);
60895@@ -2416,13 +2517,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
60896
60897 #endif
60898
60899-static inline int object_is_on_stack(void *obj)
60900+static inline int object_starts_on_stack(void *obj)
60901 {
60902- void *stack = task_stack_page(current);
60903+ const void *stack = task_stack_page(current);
60904
60905 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60906 }
60907
60908+#ifdef CONFIG_PAX_USERCOPY
60909+extern int object_is_on_stack(const void *obj, unsigned long len);
60910+#endif
60911+
60912 extern void thread_info_cache_init(void);
60913
60914 #ifdef CONFIG_DEBUG_STACK_USAGE
60915diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
60916index 899fbb4..1cb4138 100644
60917--- a/include/linux/screen_info.h
60918+++ b/include/linux/screen_info.h
60919@@ -43,7 +43,8 @@ struct screen_info {
60920 __u16 pages; /* 0x32 */
60921 __u16 vesa_attributes; /* 0x34 */
60922 __u32 capabilities; /* 0x36 */
60923- __u8 _reserved[6]; /* 0x3a */
60924+ __u16 vesapm_size; /* 0x3a */
60925+ __u8 _reserved[4]; /* 0x3c */
60926 } __attribute__((packed));
60927
60928 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60929diff --git a/include/linux/security.h b/include/linux/security.h
60930index e8c619d..e0cbd1c 100644
60931--- a/include/linux/security.h
60932+++ b/include/linux/security.h
60933@@ -37,6 +37,7 @@
60934 #include <linux/xfrm.h>
60935 #include <linux/slab.h>
60936 #include <linux/xattr.h>
60937+#include <linux/grsecurity.h>
60938 #include <net/flow.h>
60939
60940 /* Maximum number of letters for an LSM name string */
60941diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
60942index 0b69a46..b2ffa4c 100644
60943--- a/include/linux/seq_file.h
60944+++ b/include/linux/seq_file.h
60945@@ -24,6 +24,9 @@ struct seq_file {
60946 struct mutex lock;
60947 const struct seq_operations *op;
60948 int poll_event;
60949+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60950+ u64 exec_id;
60951+#endif
60952 void *private;
60953 };
60954
60955@@ -33,6 +36,7 @@ struct seq_operations {
60956 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
60957 int (*show) (struct seq_file *m, void *v);
60958 };
60959+typedef struct seq_operations __no_const seq_operations_no_const;
60960
60961 #define SEQ_SKIP 1
60962
60963diff --git a/include/linux/shm.h b/include/linux/shm.h
60964index 92808b8..c28cac4 100644
60965--- a/include/linux/shm.h
60966+++ b/include/linux/shm.h
60967@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
60968
60969 /* The task created the shm object. NULL if the task is dead. */
60970 struct task_struct *shm_creator;
60971+#ifdef CONFIG_GRKERNSEC
60972+ time_t shm_createtime;
60973+ pid_t shm_lapid;
60974+#endif
60975 };
60976
60977 /* shm_mode upper byte flags */
60978diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
60979index fe86488..1563c1c 100644
60980--- a/include/linux/skbuff.h
60981+++ b/include/linux/skbuff.h
60982@@ -642,7 +642,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
60983 */
60984 static inline int skb_queue_empty(const struct sk_buff_head *list)
60985 {
60986- return list->next == (struct sk_buff *)list;
60987+ return list->next == (const struct sk_buff *)list;
60988 }
60989
60990 /**
60991@@ -655,7 +655,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
60992 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60993 const struct sk_buff *skb)
60994 {
60995- return skb->next == (struct sk_buff *)list;
60996+ return skb->next == (const struct sk_buff *)list;
60997 }
60998
60999 /**
61000@@ -668,7 +668,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
61001 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
61002 const struct sk_buff *skb)
61003 {
61004- return skb->prev == (struct sk_buff *)list;
61005+ return skb->prev == (const struct sk_buff *)list;
61006 }
61007
61008 /**
61009@@ -1523,7 +1523,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
61010 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
61011 */
61012 #ifndef NET_SKB_PAD
61013-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
61014+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
61015 #endif
61016
61017 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
61018diff --git a/include/linux/slab.h b/include/linux/slab.h
61019index 573c809..e84c132 100644
61020--- a/include/linux/slab.h
61021+++ b/include/linux/slab.h
61022@@ -11,12 +11,20 @@
61023
61024 #include <linux/gfp.h>
61025 #include <linux/types.h>
61026+#include <linux/err.h>
61027
61028 /*
61029 * Flags to pass to kmem_cache_create().
61030 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
61031 */
61032 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
61033+
61034+#ifdef CONFIG_PAX_USERCOPY
61035+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
61036+#else
61037+#define SLAB_USERCOPY 0x00000000UL
61038+#endif
61039+
61040 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
61041 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
61042 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
61043@@ -87,10 +95,13 @@
61044 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
61045 * Both make kfree a no-op.
61046 */
61047-#define ZERO_SIZE_PTR ((void *)16)
61048+#define ZERO_SIZE_PTR \
61049+({ \
61050+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
61051+ (void *)(-MAX_ERRNO-1L); \
61052+})
61053
61054-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
61055- (unsigned long)ZERO_SIZE_PTR)
61056+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
61057
61058 /*
61059 * struct kmem_cache related prototypes
61060@@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
61061 void kfree(const void *);
61062 void kzfree(const void *);
61063 size_t ksize(const void *);
61064+void check_object_size(const void *ptr, unsigned long n, bool to);
61065
61066 /*
61067 * Allocator specific definitions. These are mainly used to establish optimized
61068@@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
61069
61070 void __init kmem_cache_init_late(void);
61071
61072+#define kmalloc(x, y) \
61073+({ \
61074+ void *___retval; \
61075+ intoverflow_t ___x = (intoverflow_t)x; \
61076+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
61077+ ___retval = NULL; \
61078+ else \
61079+ ___retval = kmalloc((size_t)___x, (y)); \
61080+ ___retval; \
61081+})
61082+
61083+#define kmalloc_node(x, y, z) \
61084+({ \
61085+ void *___retval; \
61086+ intoverflow_t ___x = (intoverflow_t)x; \
61087+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
61088+ ___retval = NULL; \
61089+ else \
61090+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
61091+ ___retval; \
61092+})
61093+
61094+#define kzalloc(x, y) \
61095+({ \
61096+ void *___retval; \
61097+ intoverflow_t ___x = (intoverflow_t)x; \
61098+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
61099+ ___retval = NULL; \
61100+ else \
61101+ ___retval = kzalloc((size_t)___x, (y)); \
61102+ ___retval; \
61103+})
61104+
61105+#define __krealloc(x, y, z) \
61106+({ \
61107+ void *___retval; \
61108+ intoverflow_t ___y = (intoverflow_t)y; \
61109+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
61110+ ___retval = NULL; \
61111+ else \
61112+ ___retval = __krealloc((x), (size_t)___y, (z)); \
61113+ ___retval; \
61114+})
61115+
61116+#define krealloc(x, y, z) \
61117+({ \
61118+ void *___retval; \
61119+ intoverflow_t ___y = (intoverflow_t)y; \
61120+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
61121+ ___retval = NULL; \
61122+ else \
61123+ ___retval = krealloc((x), (size_t)___y, (z)); \
61124+ ___retval; \
61125+})
61126+
61127 #endif /* _LINUX_SLAB_H */
61128diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
61129index d00e0ba..1b3bf7b 100644
61130--- a/include/linux/slab_def.h
61131+++ b/include/linux/slab_def.h
61132@@ -68,10 +68,10 @@ struct kmem_cache {
61133 unsigned long node_allocs;
61134 unsigned long node_frees;
61135 unsigned long node_overflow;
61136- atomic_t allochit;
61137- atomic_t allocmiss;
61138- atomic_t freehit;
61139- atomic_t freemiss;
61140+ atomic_unchecked_t allochit;
61141+ atomic_unchecked_t allocmiss;
61142+ atomic_unchecked_t freehit;
61143+ atomic_unchecked_t freemiss;
61144
61145 /*
61146 * If debugging is enabled, then the allocator can add additional
61147diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
61148index a32bcfd..53b71f4 100644
61149--- a/include/linux/slub_def.h
61150+++ b/include/linux/slub_def.h
61151@@ -89,7 +89,7 @@ struct kmem_cache {
61152 struct kmem_cache_order_objects max;
61153 struct kmem_cache_order_objects min;
61154 gfp_t allocflags; /* gfp flags to use on each alloc */
61155- int refcount; /* Refcount for slab cache destroy */
61156+ atomic_t refcount; /* Refcount for slab cache destroy */
61157 void (*ctor)(void *);
61158 int inuse; /* Offset to metadata */
61159 int align; /* Alignment */
61160@@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
61161 }
61162
61163 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
61164-void *__kmalloc(size_t size, gfp_t flags);
61165+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
61166
61167 static __always_inline void *
61168 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
61169diff --git a/include/linux/sonet.h b/include/linux/sonet.h
61170index de8832d..0147b46 100644
61171--- a/include/linux/sonet.h
61172+++ b/include/linux/sonet.h
61173@@ -61,7 +61,7 @@ struct sonet_stats {
61174 #include <linux/atomic.h>
61175
61176 struct k_sonet_stats {
61177-#define __HANDLE_ITEM(i) atomic_t i
61178+#define __HANDLE_ITEM(i) atomic_unchecked_t i
61179 __SONET_ITEMS
61180 #undef __HANDLE_ITEM
61181 };
61182diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
61183index 3d8f9c4..69f1c0a 100644
61184--- a/include/linux/sunrpc/clnt.h
61185+++ b/include/linux/sunrpc/clnt.h
61186@@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
61187 {
61188 switch (sap->sa_family) {
61189 case AF_INET:
61190- return ntohs(((struct sockaddr_in *)sap)->sin_port);
61191+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
61192 case AF_INET6:
61193- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
61194+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
61195 }
61196 return 0;
61197 }
61198@@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
61199 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
61200 const struct sockaddr *src)
61201 {
61202- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
61203+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
61204 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
61205
61206 dsin->sin_family = ssin->sin_family;
61207@@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
61208 if (sa->sa_family != AF_INET6)
61209 return 0;
61210
61211- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
61212+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
61213 }
61214
61215 #endif /* __KERNEL__ */
61216diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
61217index e775689..9e206d9 100644
61218--- a/include/linux/sunrpc/sched.h
61219+++ b/include/linux/sunrpc/sched.h
61220@@ -105,6 +105,7 @@ struct rpc_call_ops {
61221 void (*rpc_call_done)(struct rpc_task *, void *);
61222 void (*rpc_release)(void *);
61223 };
61224+typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
61225
61226 struct rpc_task_setup {
61227 struct rpc_task *task;
61228diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
61229index c14fe86..393245e 100644
61230--- a/include/linux/sunrpc/svc_rdma.h
61231+++ b/include/linux/sunrpc/svc_rdma.h
61232@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
61233 extern unsigned int svcrdma_max_requests;
61234 extern unsigned int svcrdma_max_req_size;
61235
61236-extern atomic_t rdma_stat_recv;
61237-extern atomic_t rdma_stat_read;
61238-extern atomic_t rdma_stat_write;
61239-extern atomic_t rdma_stat_sq_starve;
61240-extern atomic_t rdma_stat_rq_starve;
61241-extern atomic_t rdma_stat_rq_poll;
61242-extern atomic_t rdma_stat_rq_prod;
61243-extern atomic_t rdma_stat_sq_poll;
61244-extern atomic_t rdma_stat_sq_prod;
61245+extern atomic_unchecked_t rdma_stat_recv;
61246+extern atomic_unchecked_t rdma_stat_read;
61247+extern atomic_unchecked_t rdma_stat_write;
61248+extern atomic_unchecked_t rdma_stat_sq_starve;
61249+extern atomic_unchecked_t rdma_stat_rq_starve;
61250+extern atomic_unchecked_t rdma_stat_rq_poll;
61251+extern atomic_unchecked_t rdma_stat_rq_prod;
61252+extern atomic_unchecked_t rdma_stat_sq_poll;
61253+extern atomic_unchecked_t rdma_stat_sq_prod;
61254
61255 #define RPCRDMA_VERSION 1
61256
61257diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
61258index 703cfa33..0b8ca72ac 100644
61259--- a/include/linux/sysctl.h
61260+++ b/include/linux/sysctl.h
61261@@ -155,7 +155,11 @@ enum
61262 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
61263 };
61264
61265-
61266+#ifdef CONFIG_PAX_SOFTMODE
61267+enum {
61268+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
61269+};
61270+#endif
61271
61272 /* CTL_VM names: */
61273 enum
61274@@ -968,6 +972,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
61275
61276 extern int proc_dostring(struct ctl_table *, int,
61277 void __user *, size_t *, loff_t *);
61278+extern int proc_dostring_modpriv(struct ctl_table *, int,
61279+ void __user *, size_t *, loff_t *);
61280 extern int proc_dointvec(struct ctl_table *, int,
61281 void __user *, size_t *, loff_t *);
61282 extern int proc_dointvec_minmax(struct ctl_table *, int,
61283diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
61284index a71a292..51bd91d 100644
61285--- a/include/linux/tracehook.h
61286+++ b/include/linux/tracehook.h
61287@@ -54,12 +54,12 @@ struct linux_binprm;
61288 /*
61289 * ptrace report for syscall entry and exit looks identical.
61290 */
61291-static inline void ptrace_report_syscall(struct pt_regs *regs)
61292+static inline int ptrace_report_syscall(struct pt_regs *regs)
61293 {
61294 int ptrace = current->ptrace;
61295
61296 if (!(ptrace & PT_PTRACED))
61297- return;
61298+ return 0;
61299
61300 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
61301
61302@@ -72,6 +72,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
61303 send_sig(current->exit_code, current, 1);
61304 current->exit_code = 0;
61305 }
61306+
61307+ return fatal_signal_pending(current);
61308 }
61309
61310 /**
61311@@ -96,8 +98,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
61312 static inline __must_check int tracehook_report_syscall_entry(
61313 struct pt_regs *regs)
61314 {
61315- ptrace_report_syscall(regs);
61316- return 0;
61317+ return ptrace_report_syscall(regs);
61318 }
61319
61320 /**
61321diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
61322index ff7dc08..893e1bd 100644
61323--- a/include/linux/tty_ldisc.h
61324+++ b/include/linux/tty_ldisc.h
61325@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
61326
61327 struct module *owner;
61328
61329- int refcount;
61330+ atomic_t refcount;
61331 };
61332
61333 struct tty_ldisc {
61334diff --git a/include/linux/types.h b/include/linux/types.h
61335index 57a9723..dbe234a 100644
61336--- a/include/linux/types.h
61337+++ b/include/linux/types.h
61338@@ -213,10 +213,26 @@ typedef struct {
61339 int counter;
61340 } atomic_t;
61341
61342+#ifdef CONFIG_PAX_REFCOUNT
61343+typedef struct {
61344+ int counter;
61345+} atomic_unchecked_t;
61346+#else
61347+typedef atomic_t atomic_unchecked_t;
61348+#endif
61349+
61350 #ifdef CONFIG_64BIT
61351 typedef struct {
61352 long counter;
61353 } atomic64_t;
61354+
61355+#ifdef CONFIG_PAX_REFCOUNT
61356+typedef struct {
61357+ long counter;
61358+} atomic64_unchecked_t;
61359+#else
61360+typedef atomic64_t atomic64_unchecked_t;
61361+#endif
61362 #endif
61363
61364 struct list_head {
61365diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
61366index 5ca0951..ab496a5 100644
61367--- a/include/linux/uaccess.h
61368+++ b/include/linux/uaccess.h
61369@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
61370 long ret; \
61371 mm_segment_t old_fs = get_fs(); \
61372 \
61373- set_fs(KERNEL_DS); \
61374 pagefault_disable(); \
61375- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
61376- pagefault_enable(); \
61377+ set_fs(KERNEL_DS); \
61378+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
61379 set_fs(old_fs); \
61380+ pagefault_enable(); \
61381 ret; \
61382 })
61383
61384diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
61385index 99c1b4d..bb94261 100644
61386--- a/include/linux/unaligned/access_ok.h
61387+++ b/include/linux/unaligned/access_ok.h
61388@@ -6,32 +6,32 @@
61389
61390 static inline u16 get_unaligned_le16(const void *p)
61391 {
61392- return le16_to_cpup((__le16 *)p);
61393+ return le16_to_cpup((const __le16 *)p);
61394 }
61395
61396 static inline u32 get_unaligned_le32(const void *p)
61397 {
61398- return le32_to_cpup((__le32 *)p);
61399+ return le32_to_cpup((const __le32 *)p);
61400 }
61401
61402 static inline u64 get_unaligned_le64(const void *p)
61403 {
61404- return le64_to_cpup((__le64 *)p);
61405+ return le64_to_cpup((const __le64 *)p);
61406 }
61407
61408 static inline u16 get_unaligned_be16(const void *p)
61409 {
61410- return be16_to_cpup((__be16 *)p);
61411+ return be16_to_cpup((const __be16 *)p);
61412 }
61413
61414 static inline u32 get_unaligned_be32(const void *p)
61415 {
61416- return be32_to_cpup((__be32 *)p);
61417+ return be32_to_cpup((const __be32 *)p);
61418 }
61419
61420 static inline u64 get_unaligned_be64(const void *p)
61421 {
61422- return be64_to_cpup((__be64 *)p);
61423+ return be64_to_cpup((const __be64 *)p);
61424 }
61425
61426 static inline void put_unaligned_le16(u16 val, void *p)
61427diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
61428index e5a40c3..20ab0f6 100644
61429--- a/include/linux/usb/renesas_usbhs.h
61430+++ b/include/linux/usb/renesas_usbhs.h
61431@@ -39,7 +39,7 @@ enum {
61432 */
61433 struct renesas_usbhs_driver_callback {
61434 int (*notify_hotplug)(struct platform_device *pdev);
61435-};
61436+} __no_const;
61437
61438 /*
61439 * callback functions for platform
61440@@ -89,7 +89,7 @@ struct renesas_usbhs_platform_callback {
61441 * VBUS control is needed for Host
61442 */
61443 int (*set_vbus)(struct platform_device *pdev, int enable);
61444-};
61445+} __no_const;
61446
61447 /*
61448 * parameters for renesas usbhs
61449diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
61450index 6f8fbcf..8259001 100644
61451--- a/include/linux/vermagic.h
61452+++ b/include/linux/vermagic.h
61453@@ -25,9 +25,35 @@
61454 #define MODULE_ARCH_VERMAGIC ""
61455 #endif
61456
61457+#ifdef CONFIG_PAX_REFCOUNT
61458+#define MODULE_PAX_REFCOUNT "REFCOUNT "
61459+#else
61460+#define MODULE_PAX_REFCOUNT ""
61461+#endif
61462+
61463+#ifdef CONSTIFY_PLUGIN
61464+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
61465+#else
61466+#define MODULE_CONSTIFY_PLUGIN ""
61467+#endif
61468+
61469+#ifdef STACKLEAK_PLUGIN
61470+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
61471+#else
61472+#define MODULE_STACKLEAK_PLUGIN ""
61473+#endif
61474+
61475+#ifdef CONFIG_GRKERNSEC
61476+#define MODULE_GRSEC "GRSEC "
61477+#else
61478+#define MODULE_GRSEC ""
61479+#endif
61480+
61481 #define VERMAGIC_STRING \
61482 UTS_RELEASE " " \
61483 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
61484 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
61485- MODULE_ARCH_VERMAGIC
61486+ MODULE_ARCH_VERMAGIC \
61487+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
61488+ MODULE_GRSEC
61489
61490diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
61491index 4bde182..aec92c1 100644
61492--- a/include/linux/vmalloc.h
61493+++ b/include/linux/vmalloc.h
61494@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
61495 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
61496 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
61497 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
61498+
61499+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
61500+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
61501+#endif
61502+
61503 /* bits [20..32] reserved for arch specific ioremap internals */
61504
61505 /*
61506@@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
61507 # endif
61508 #endif
61509
61510+#define vmalloc(x) \
61511+({ \
61512+ void *___retval; \
61513+ intoverflow_t ___x = (intoverflow_t)x; \
61514+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
61515+ ___retval = NULL; \
61516+ else \
61517+ ___retval = vmalloc((unsigned long)___x); \
61518+ ___retval; \
61519+})
61520+
61521+#define vzalloc(x) \
61522+({ \
61523+ void *___retval; \
61524+ intoverflow_t ___x = (intoverflow_t)x; \
61525+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
61526+ ___retval = NULL; \
61527+ else \
61528+ ___retval = vzalloc((unsigned long)___x); \
61529+ ___retval; \
61530+})
61531+
61532+#define __vmalloc(x, y, z) \
61533+({ \
61534+ void *___retval; \
61535+ intoverflow_t ___x = (intoverflow_t)x; \
61536+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
61537+ ___retval = NULL; \
61538+ else \
61539+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
61540+ ___retval; \
61541+})
61542+
61543+#define vmalloc_user(x) \
61544+({ \
61545+ void *___retval; \
61546+ intoverflow_t ___x = (intoverflow_t)x; \
61547+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
61548+ ___retval = NULL; \
61549+ else \
61550+ ___retval = vmalloc_user((unsigned long)___x); \
61551+ ___retval; \
61552+})
61553+
61554+#define vmalloc_exec(x) \
61555+({ \
61556+ void *___retval; \
61557+ intoverflow_t ___x = (intoverflow_t)x; \
61558+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
61559+ ___retval = NULL; \
61560+ else \
61561+ ___retval = vmalloc_exec((unsigned long)___x); \
61562+ ___retval; \
61563+})
61564+
61565+#define vmalloc_node(x, y) \
61566+({ \
61567+ void *___retval; \
61568+ intoverflow_t ___x = (intoverflow_t)x; \
61569+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
61570+ ___retval = NULL; \
61571+ else \
61572+ ___retval = vmalloc_node((unsigned long)___x, (y));\
61573+ ___retval; \
61574+})
61575+
61576+#define vzalloc_node(x, y) \
61577+({ \
61578+ void *___retval; \
61579+ intoverflow_t ___x = (intoverflow_t)x; \
61580+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
61581+ ___retval = NULL; \
61582+ else \
61583+ ___retval = vzalloc_node((unsigned long)___x, (y));\
61584+ ___retval; \
61585+})
61586+
61587+#define vmalloc_32(x) \
61588+({ \
61589+ void *___retval; \
61590+ intoverflow_t ___x = (intoverflow_t)x; \
61591+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
61592+ ___retval = NULL; \
61593+ else \
61594+ ___retval = vmalloc_32((unsigned long)___x); \
61595+ ___retval; \
61596+})
61597+
61598+#define vmalloc_32_user(x) \
61599+({ \
61600+void *___retval; \
61601+ intoverflow_t ___x = (intoverflow_t)x; \
61602+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
61603+ ___retval = NULL; \
61604+ else \
61605+ ___retval = vmalloc_32_user((unsigned long)___x);\
61606+ ___retval; \
61607+})
61608+
61609 #endif /* _LINUX_VMALLOC_H */
61610diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
61611index 65efb92..137adbb 100644
61612--- a/include/linux/vmstat.h
61613+++ b/include/linux/vmstat.h
61614@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
61615 /*
61616 * Zone based page accounting with per cpu differentials.
61617 */
61618-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61619+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61620
61621 static inline void zone_page_state_add(long x, struct zone *zone,
61622 enum zone_stat_item item)
61623 {
61624- atomic_long_add(x, &zone->vm_stat[item]);
61625- atomic_long_add(x, &vm_stat[item]);
61626+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
61627+ atomic_long_add_unchecked(x, &vm_stat[item]);
61628 }
61629
61630 static inline unsigned long global_page_state(enum zone_stat_item item)
61631 {
61632- long x = atomic_long_read(&vm_stat[item]);
61633+ long x = atomic_long_read_unchecked(&vm_stat[item]);
61634 #ifdef CONFIG_SMP
61635 if (x < 0)
61636 x = 0;
61637@@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
61638 static inline unsigned long zone_page_state(struct zone *zone,
61639 enum zone_stat_item item)
61640 {
61641- long x = atomic_long_read(&zone->vm_stat[item]);
61642+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61643 #ifdef CONFIG_SMP
61644 if (x < 0)
61645 x = 0;
61646@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
61647 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
61648 enum zone_stat_item item)
61649 {
61650- long x = atomic_long_read(&zone->vm_stat[item]);
61651+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61652
61653 #ifdef CONFIG_SMP
61654 int cpu;
61655@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
61656
61657 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
61658 {
61659- atomic_long_inc(&zone->vm_stat[item]);
61660- atomic_long_inc(&vm_stat[item]);
61661+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
61662+ atomic_long_inc_unchecked(&vm_stat[item]);
61663 }
61664
61665 static inline void __inc_zone_page_state(struct page *page,
61666@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
61667
61668 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
61669 {
61670- atomic_long_dec(&zone->vm_stat[item]);
61671- atomic_long_dec(&vm_stat[item]);
61672+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
61673+ atomic_long_dec_unchecked(&vm_stat[item]);
61674 }
61675
61676 static inline void __dec_zone_page_state(struct page *page,
61677diff --git a/include/linux/xattr.h b/include/linux/xattr.h
61678index e5d1220..ef6e406 100644
61679--- a/include/linux/xattr.h
61680+++ b/include/linux/xattr.h
61681@@ -57,6 +57,11 @@
61682 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
61683 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
61684
61685+/* User namespace */
61686+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
61687+#define XATTR_PAX_FLAGS_SUFFIX "flags"
61688+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
61689+
61690 #ifdef __KERNEL__
61691
61692 #include <linux/types.h>
61693diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
61694index 4aeff96..b378cdc 100644
61695--- a/include/media/saa7146_vv.h
61696+++ b/include/media/saa7146_vv.h
61697@@ -163,7 +163,7 @@ struct saa7146_ext_vv
61698 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
61699
61700 /* the extension can override this */
61701- struct v4l2_ioctl_ops ops;
61702+ v4l2_ioctl_ops_no_const ops;
61703 /* pointer to the saa7146 core ops */
61704 const struct v4l2_ioctl_ops *core_ops;
61705
61706diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
61707index c7c40f1..4f01585 100644
61708--- a/include/media/v4l2-dev.h
61709+++ b/include/media/v4l2-dev.h
61710@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
61711
61712
61713 struct v4l2_file_operations {
61714- struct module *owner;
61715+ struct module * const owner;
61716 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61717 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61718 unsigned int (*poll) (struct file *, struct poll_table_struct *);
61719@@ -68,6 +68,7 @@ struct v4l2_file_operations {
61720 int (*open) (struct file *);
61721 int (*release) (struct file *);
61722 };
61723+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
61724
61725 /*
61726 * Newer version of video_device, handled by videodev2.c
61727diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
61728index 4d1c74a..65e1221 100644
61729--- a/include/media/v4l2-ioctl.h
61730+++ b/include/media/v4l2-ioctl.h
61731@@ -274,7 +274,7 @@ struct v4l2_ioctl_ops {
61732 long (*vidioc_default) (struct file *file, void *fh,
61733 bool valid_prio, int cmd, void *arg);
61734 };
61735-
61736+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
61737
61738 /* v4l debugging and diagnostics */
61739
61740diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
61741index 8d55251..dfe5b0a 100644
61742--- a/include/net/caif/caif_hsi.h
61743+++ b/include/net/caif/caif_hsi.h
61744@@ -98,7 +98,7 @@ struct cfhsi_drv {
61745 void (*rx_done_cb) (struct cfhsi_drv *drv);
61746 void (*wake_up_cb) (struct cfhsi_drv *drv);
61747 void (*wake_down_cb) (struct cfhsi_drv *drv);
61748-};
61749+} __no_const;
61750
61751 /* Structure implemented by HSI device. */
61752 struct cfhsi_dev {
61753diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
61754index 9e5425b..8136ffc 100644
61755--- a/include/net/caif/cfctrl.h
61756+++ b/include/net/caif/cfctrl.h
61757@@ -52,7 +52,7 @@ struct cfctrl_rsp {
61758 void (*radioset_rsp)(void);
61759 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
61760 struct cflayer *client_layer);
61761-};
61762+} __no_const;
61763
61764 /* Link Setup Parameters for CAIF-Links. */
61765 struct cfctrl_link_param {
61766@@ -101,8 +101,8 @@ struct cfctrl_request_info {
61767 struct cfctrl {
61768 struct cfsrvl serv;
61769 struct cfctrl_rsp res;
61770- atomic_t req_seq_no;
61771- atomic_t rsp_seq_no;
61772+ atomic_unchecked_t req_seq_no;
61773+ atomic_unchecked_t rsp_seq_no;
61774 struct list_head list;
61775 /* Protects from simultaneous access to first_req list */
61776 spinlock_t info_list_lock;
61777diff --git a/include/net/flow.h b/include/net/flow.h
61778index 2a7eefd..3250f3b 100644
61779--- a/include/net/flow.h
61780+++ b/include/net/flow.h
61781@@ -218,6 +218,6 @@ extern struct flow_cache_object *flow_cache_lookup(
61782
61783 extern void flow_cache_flush(void);
61784 extern void flow_cache_flush_deferred(void);
61785-extern atomic_t flow_cache_genid;
61786+extern atomic_unchecked_t flow_cache_genid;
61787
61788 #endif
61789diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
61790index e9ff3fc..9d3e5c7 100644
61791--- a/include/net/inetpeer.h
61792+++ b/include/net/inetpeer.h
61793@@ -48,8 +48,8 @@ struct inet_peer {
61794 */
61795 union {
61796 struct {
61797- atomic_t rid; /* Frag reception counter */
61798- atomic_t ip_id_count; /* IP ID for the next packet */
61799+ atomic_unchecked_t rid; /* Frag reception counter */
61800+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
61801 __u32 tcp_ts;
61802 __u32 tcp_ts_stamp;
61803 };
61804@@ -113,11 +113,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
61805 more++;
61806 inet_peer_refcheck(p);
61807 do {
61808- old = atomic_read(&p->ip_id_count);
61809+ old = atomic_read_unchecked(&p->ip_id_count);
61810 new = old + more;
61811 if (!new)
61812 new = 1;
61813- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
61814+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
61815 return new;
61816 }
61817
61818diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
61819index 10422ef..662570f 100644
61820--- a/include/net/ip_fib.h
61821+++ b/include/net/ip_fib.h
61822@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
61823
61824 #define FIB_RES_SADDR(net, res) \
61825 ((FIB_RES_NH(res).nh_saddr_genid == \
61826- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
61827+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
61828 FIB_RES_NH(res).nh_saddr : \
61829 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
61830 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
61831diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
61832index e5a7b9a..f4fc44b 100644
61833--- a/include/net/ip_vs.h
61834+++ b/include/net/ip_vs.h
61835@@ -509,7 +509,7 @@ struct ip_vs_conn {
61836 struct ip_vs_conn *control; /* Master control connection */
61837 atomic_t n_control; /* Number of controlled ones */
61838 struct ip_vs_dest *dest; /* real server */
61839- atomic_t in_pkts; /* incoming packet counter */
61840+ atomic_unchecked_t in_pkts; /* incoming packet counter */
61841
61842 /* packet transmitter for different forwarding methods. If it
61843 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61844@@ -647,7 +647,7 @@ struct ip_vs_dest {
61845 __be16 port; /* port number of the server */
61846 union nf_inet_addr addr; /* IP address of the server */
61847 volatile unsigned flags; /* dest status flags */
61848- atomic_t conn_flags; /* flags to copy to conn */
61849+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
61850 atomic_t weight; /* server weight */
61851
61852 atomic_t refcnt; /* reference counter */
61853diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
61854index 69b610a..fe3962c 100644
61855--- a/include/net/irda/ircomm_core.h
61856+++ b/include/net/irda/ircomm_core.h
61857@@ -51,7 +51,7 @@ typedef struct {
61858 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61859 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61860 struct ircomm_info *);
61861-} call_t;
61862+} __no_const call_t;
61863
61864 struct ircomm_cb {
61865 irda_queue_t queue;
61866diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
61867index 59ba38bc..d515662 100644
61868--- a/include/net/irda/ircomm_tty.h
61869+++ b/include/net/irda/ircomm_tty.h
61870@@ -35,6 +35,7 @@
61871 #include <linux/termios.h>
61872 #include <linux/timer.h>
61873 #include <linux/tty.h> /* struct tty_struct */
61874+#include <asm/local.h>
61875
61876 #include <net/irda/irias_object.h>
61877 #include <net/irda/ircomm_core.h>
61878@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61879 unsigned short close_delay;
61880 unsigned short closing_wait; /* time to wait before closing */
61881
61882- int open_count;
61883- int blocked_open; /* # of blocked opens */
61884+ local_t open_count;
61885+ local_t blocked_open; /* # of blocked opens */
61886
61887 /* Protect concurent access to :
61888 * o self->open_count
61889diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
61890index f2419cf..473679f 100644
61891--- a/include/net/iucv/af_iucv.h
61892+++ b/include/net/iucv/af_iucv.h
61893@@ -139,7 +139,7 @@ struct iucv_sock {
61894 struct iucv_sock_list {
61895 struct hlist_head head;
61896 rwlock_t lock;
61897- atomic_t autobind_name;
61898+ atomic_unchecked_t autobind_name;
61899 };
61900
61901 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61902diff --git a/include/net/neighbour.h b/include/net/neighbour.h
61903index 2720884..3aa5c25 100644
61904--- a/include/net/neighbour.h
61905+++ b/include/net/neighbour.h
61906@@ -122,7 +122,7 @@ struct neigh_ops {
61907 void (*error_report)(struct neighbour *, struct sk_buff *);
61908 int (*output)(struct neighbour *, struct sk_buff *);
61909 int (*connected_output)(struct neighbour *, struct sk_buff *);
61910-};
61911+} __do_const;
61912
61913 struct pneigh_entry {
61914 struct pneigh_entry *next;
61915diff --git a/include/net/netlink.h b/include/net/netlink.h
61916index cb1f350..3279d2c 100644
61917--- a/include/net/netlink.h
61918+++ b/include/net/netlink.h
61919@@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
61920 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61921 {
61922 if (mark)
61923- skb_trim(skb, (unsigned char *) mark - skb->data);
61924+ skb_trim(skb, (const unsigned char *) mark - skb->data);
61925 }
61926
61927 /**
61928diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
61929index d786b4f..4c3dd41 100644
61930--- a/include/net/netns/ipv4.h
61931+++ b/include/net/netns/ipv4.h
61932@@ -56,8 +56,8 @@ struct netns_ipv4 {
61933
61934 unsigned int sysctl_ping_group_range[2];
61935
61936- atomic_t rt_genid;
61937- atomic_t dev_addr_genid;
61938+ atomic_unchecked_t rt_genid;
61939+ atomic_unchecked_t dev_addr_genid;
61940
61941 #ifdef CONFIG_IP_MROUTE
61942 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
61943diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
61944index 6a72a58..e6a127d 100644
61945--- a/include/net/sctp/sctp.h
61946+++ b/include/net/sctp/sctp.h
61947@@ -318,9 +318,9 @@ do { \
61948
61949 #else /* SCTP_DEBUG */
61950
61951-#define SCTP_DEBUG_PRINTK(whatever...)
61952-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
61953-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61954+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61955+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
61956+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61957 #define SCTP_ENABLE_DEBUG
61958 #define SCTP_DISABLE_DEBUG
61959 #define SCTP_ASSERT(expr, str, func)
61960diff --git a/include/net/sock.h b/include/net/sock.h
61961index 32e3937..87a1dbc 100644
61962--- a/include/net/sock.h
61963+++ b/include/net/sock.h
61964@@ -277,7 +277,7 @@ struct sock {
61965 #ifdef CONFIG_RPS
61966 __u32 sk_rxhash;
61967 #endif
61968- atomic_t sk_drops;
61969+ atomic_unchecked_t sk_drops;
61970 int sk_rcvbuf;
61971
61972 struct sk_filter __rcu *sk_filter;
61973@@ -1402,7 +1402,7 @@ static inline void sk_nocaps_add(struct sock *sk, int flags)
61974 }
61975
61976 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
61977- char __user *from, char *to,
61978+ char __user *from, unsigned char *to,
61979 int copy, int offset)
61980 {
61981 if (skb->ip_summed == CHECKSUM_NONE) {
61982diff --git a/include/net/tcp.h b/include/net/tcp.h
61983index bb18c4d..bb87972 100644
61984--- a/include/net/tcp.h
61985+++ b/include/net/tcp.h
61986@@ -1409,7 +1409,7 @@ struct tcp_seq_afinfo {
61987 char *name;
61988 sa_family_t family;
61989 const struct file_operations *seq_fops;
61990- struct seq_operations seq_ops;
61991+ seq_operations_no_const seq_ops;
61992 };
61993
61994 struct tcp_iter_state {
61995diff --git a/include/net/udp.h b/include/net/udp.h
61996index 3b285f4..0219639 100644
61997--- a/include/net/udp.h
61998+++ b/include/net/udp.h
61999@@ -237,7 +237,7 @@ struct udp_seq_afinfo {
62000 sa_family_t family;
62001 struct udp_table *udp_table;
62002 const struct file_operations *seq_fops;
62003- struct seq_operations seq_ops;
62004+ seq_operations_no_const seq_ops;
62005 };
62006
62007 struct udp_iter_state {
62008diff --git a/include/net/xfrm.h b/include/net/xfrm.h
62009index b203e14..1df3991 100644
62010--- a/include/net/xfrm.h
62011+++ b/include/net/xfrm.h
62012@@ -505,7 +505,7 @@ struct xfrm_policy {
62013 struct timer_list timer;
62014
62015 struct flow_cache_object flo;
62016- atomic_t genid;
62017+ atomic_unchecked_t genid;
62018 u32 priority;
62019 u32 index;
62020 struct xfrm_mark mark;
62021diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
62022index 1a046b1..ee0bef0 100644
62023--- a/include/rdma/iw_cm.h
62024+++ b/include/rdma/iw_cm.h
62025@@ -122,7 +122,7 @@ struct iw_cm_verbs {
62026 int backlog);
62027
62028 int (*destroy_listen)(struct iw_cm_id *cm_id);
62029-};
62030+} __no_const;
62031
62032 /**
62033 * iw_create_cm_id - Create an IW CM identifier.
62034diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
62035index 5d1a758..1dbf795 100644
62036--- a/include/scsi/libfc.h
62037+++ b/include/scsi/libfc.h
62038@@ -748,6 +748,7 @@ struct libfc_function_template {
62039 */
62040 void (*disc_stop_final) (struct fc_lport *);
62041 };
62042+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
62043
62044 /**
62045 * struct fc_disc - Discovery context
62046@@ -851,7 +852,7 @@ struct fc_lport {
62047 struct fc_vport *vport;
62048
62049 /* Operational Information */
62050- struct libfc_function_template tt;
62051+ libfc_function_template_no_const tt;
62052 u8 link_up;
62053 u8 qfull;
62054 enum fc_lport_state state;
62055diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
62056index 5591ed5..13eb457 100644
62057--- a/include/scsi/scsi_device.h
62058+++ b/include/scsi/scsi_device.h
62059@@ -161,9 +161,9 @@ struct scsi_device {
62060 unsigned int max_device_blocked; /* what device_blocked counts down from */
62061 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
62062
62063- atomic_t iorequest_cnt;
62064- atomic_t iodone_cnt;
62065- atomic_t ioerr_cnt;
62066+ atomic_unchecked_t iorequest_cnt;
62067+ atomic_unchecked_t iodone_cnt;
62068+ atomic_unchecked_t ioerr_cnt;
62069
62070 struct device sdev_gendev,
62071 sdev_dev;
62072diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
62073index 2a65167..91e01f8 100644
62074--- a/include/scsi/scsi_transport_fc.h
62075+++ b/include/scsi/scsi_transport_fc.h
62076@@ -711,7 +711,7 @@ struct fc_function_template {
62077 unsigned long show_host_system_hostname:1;
62078
62079 unsigned long disable_target_scan:1;
62080-};
62081+} __do_const;
62082
62083
62084 /**
62085diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
62086index 030b87c..98a6954 100644
62087--- a/include/sound/ak4xxx-adda.h
62088+++ b/include/sound/ak4xxx-adda.h
62089@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
62090 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
62091 unsigned char val);
62092 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
62093-};
62094+} __no_const;
62095
62096 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
62097
62098diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
62099index 8c05e47..2b5df97 100644
62100--- a/include/sound/hwdep.h
62101+++ b/include/sound/hwdep.h
62102@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
62103 struct snd_hwdep_dsp_status *status);
62104 int (*dsp_load)(struct snd_hwdep *hw,
62105 struct snd_hwdep_dsp_image *image);
62106-};
62107+} __no_const;
62108
62109 struct snd_hwdep {
62110 struct snd_card *card;
62111diff --git a/include/sound/info.h b/include/sound/info.h
62112index 5492cc4..1a65278 100644
62113--- a/include/sound/info.h
62114+++ b/include/sound/info.h
62115@@ -44,7 +44,7 @@ struct snd_info_entry_text {
62116 struct snd_info_buffer *buffer);
62117 void (*write)(struct snd_info_entry *entry,
62118 struct snd_info_buffer *buffer);
62119-};
62120+} __no_const;
62121
62122 struct snd_info_entry_ops {
62123 int (*open)(struct snd_info_entry *entry,
62124diff --git a/include/sound/pcm.h b/include/sound/pcm.h
62125index 0cf91b2..b70cae4 100644
62126--- a/include/sound/pcm.h
62127+++ b/include/sound/pcm.h
62128@@ -81,6 +81,7 @@ struct snd_pcm_ops {
62129 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
62130 int (*ack)(struct snd_pcm_substream *substream);
62131 };
62132+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
62133
62134 /*
62135 *
62136diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
62137index af1b49e..a5d55a5 100644
62138--- a/include/sound/sb16_csp.h
62139+++ b/include/sound/sb16_csp.h
62140@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
62141 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
62142 int (*csp_stop) (struct snd_sb_csp * p);
62143 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
62144-};
62145+} __no_const;
62146
62147 /*
62148 * CSP private data
62149diff --git a/include/sound/soc.h b/include/sound/soc.h
62150index 11cfb59..e3f93f4 100644
62151--- a/include/sound/soc.h
62152+++ b/include/sound/soc.h
62153@@ -683,7 +683,7 @@ struct snd_soc_platform_driver {
62154 /* platform IO - used for platform DAPM */
62155 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
62156 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
62157-};
62158+} __do_const;
62159
62160 struct snd_soc_platform {
62161 const char *name;
62162diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
62163index 444cd6b..3327cc5 100644
62164--- a/include/sound/ymfpci.h
62165+++ b/include/sound/ymfpci.h
62166@@ -358,7 +358,7 @@ struct snd_ymfpci {
62167 spinlock_t reg_lock;
62168 spinlock_t voice_lock;
62169 wait_queue_head_t interrupt_sleep;
62170- atomic_t interrupt_sleep_count;
62171+ atomic_unchecked_t interrupt_sleep_count;
62172 struct snd_info_entry *proc_entry;
62173 const struct firmware *dsp_microcode;
62174 const struct firmware *controller_microcode;
62175diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
62176index 94bbec3..3a8c6b0 100644
62177--- a/include/target/target_core_base.h
62178+++ b/include/target/target_core_base.h
62179@@ -346,7 +346,7 @@ struct t10_reservation_ops {
62180 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
62181 int (*t10_pr_register)(struct se_cmd *);
62182 int (*t10_pr_clear)(struct se_cmd *);
62183-};
62184+} __no_const;
62185
62186 struct t10_reservation {
62187 /* Reservation effects all target ports */
62188@@ -465,8 +465,8 @@ struct se_cmd {
62189 atomic_t t_se_count;
62190 atomic_t t_task_cdbs_left;
62191 atomic_t t_task_cdbs_ex_left;
62192- atomic_t t_task_cdbs_sent;
62193- atomic_t t_transport_aborted;
62194+ atomic_unchecked_t t_task_cdbs_sent;
62195+ atomic_unchecked_t t_transport_aborted;
62196 atomic_t t_transport_active;
62197 atomic_t t_transport_complete;
62198 atomic_t t_transport_queue_active;
62199@@ -705,7 +705,7 @@ struct se_device {
62200 /* Active commands on this virtual SE device */
62201 atomic_t simple_cmds;
62202 atomic_t depth_left;
62203- atomic_t dev_ordered_id;
62204+ atomic_unchecked_t dev_ordered_id;
62205 atomic_t execute_tasks;
62206 atomic_t dev_ordered_sync;
62207 atomic_t dev_qf_count;
62208diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
62209index 1c09820..7f5ec79 100644
62210--- a/include/trace/events/irq.h
62211+++ b/include/trace/events/irq.h
62212@@ -36,7 +36,7 @@ struct softirq_action;
62213 */
62214 TRACE_EVENT(irq_handler_entry,
62215
62216- TP_PROTO(int irq, struct irqaction *action),
62217+ TP_PROTO(int irq, const struct irqaction *action),
62218
62219 TP_ARGS(irq, action),
62220
62221@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
62222 */
62223 TRACE_EVENT(irq_handler_exit,
62224
62225- TP_PROTO(int irq, struct irqaction *action, int ret),
62226+ TP_PROTO(int irq, const struct irqaction *action, int ret),
62227
62228 TP_ARGS(irq, action, ret),
62229
62230diff --git a/include/video/udlfb.h b/include/video/udlfb.h
62231index c41f308..6918de3 100644
62232--- a/include/video/udlfb.h
62233+++ b/include/video/udlfb.h
62234@@ -52,10 +52,10 @@ struct dlfb_data {
62235 u32 pseudo_palette[256];
62236 int blank_mode; /*one of FB_BLANK_ */
62237 /* blit-only rendering path metrics, exposed through sysfs */
62238- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
62239- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
62240- atomic_t bytes_sent; /* to usb, after compression including overhead */
62241- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
62242+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
62243+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
62244+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
62245+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
62246 };
62247
62248 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
62249diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
62250index 0993a22..32ba2fe 100644
62251--- a/include/video/uvesafb.h
62252+++ b/include/video/uvesafb.h
62253@@ -177,6 +177,7 @@ struct uvesafb_par {
62254 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
62255 u8 pmi_setpal; /* PMI for palette changes */
62256 u16 *pmi_base; /* protected mode interface location */
62257+ u8 *pmi_code; /* protected mode code location */
62258 void *pmi_start;
62259 void *pmi_pal;
62260 u8 *vbe_state_orig; /*
62261diff --git a/init/Kconfig b/init/Kconfig
62262index 43298f9..2f56c12 100644
62263--- a/init/Kconfig
62264+++ b/init/Kconfig
62265@@ -1214,7 +1214,7 @@ config SLUB_DEBUG
62266
62267 config COMPAT_BRK
62268 bool "Disable heap randomization"
62269- default y
62270+ default n
62271 help
62272 Randomizing heap placement makes heap exploits harder, but it
62273 also breaks ancient binaries (including anything libc5 based).
62274diff --git a/init/do_mounts.c b/init/do_mounts.c
62275index db6e5ee..7677ff7 100644
62276--- a/init/do_mounts.c
62277+++ b/init/do_mounts.c
62278@@ -325,11 +325,11 @@ static void __init get_fs_names(char *page)
62279
62280 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
62281 {
62282- int err = sys_mount(name, "/root", fs, flags, data);
62283+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
62284 if (err)
62285 return err;
62286
62287- sys_chdir((const char __user __force *)"/root");
62288+ sys_chdir((const char __force_user*)"/root");
62289 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
62290 printk(KERN_INFO
62291 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
62292@@ -448,18 +448,18 @@ void __init change_floppy(char *fmt, ...)
62293 va_start(args, fmt);
62294 vsprintf(buf, fmt, args);
62295 va_end(args);
62296- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
62297+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
62298 if (fd >= 0) {
62299 sys_ioctl(fd, FDEJECT, 0);
62300 sys_close(fd);
62301 }
62302 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
62303- fd = sys_open("/dev/console", O_RDWR, 0);
62304+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
62305 if (fd >= 0) {
62306 sys_ioctl(fd, TCGETS, (long)&termios);
62307 termios.c_lflag &= ~ICANON;
62308 sys_ioctl(fd, TCSETSF, (long)&termios);
62309- sys_read(fd, &c, 1);
62310+ sys_read(fd, (char __user *)&c, 1);
62311 termios.c_lflag |= ICANON;
62312 sys_ioctl(fd, TCSETSF, (long)&termios);
62313 sys_close(fd);
62314@@ -553,6 +553,6 @@ void __init prepare_namespace(void)
62315 mount_root();
62316 out:
62317 devtmpfs_mount("dev");
62318- sys_mount(".", "/", NULL, MS_MOVE, NULL);
62319- sys_chroot((const char __user __force *)".");
62320+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
62321+ sys_chroot((const char __force_user *)".");
62322 }
62323diff --git a/init/do_mounts.h b/init/do_mounts.h
62324index f5b978a..69dbfe8 100644
62325--- a/init/do_mounts.h
62326+++ b/init/do_mounts.h
62327@@ -15,15 +15,15 @@ extern int root_mountflags;
62328
62329 static inline int create_dev(char *name, dev_t dev)
62330 {
62331- sys_unlink(name);
62332- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
62333+ sys_unlink((char __force_user *)name);
62334+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
62335 }
62336
62337 #if BITS_PER_LONG == 32
62338 static inline u32 bstat(char *name)
62339 {
62340 struct stat64 stat;
62341- if (sys_stat64(name, &stat) != 0)
62342+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
62343 return 0;
62344 if (!S_ISBLK(stat.st_mode))
62345 return 0;
62346@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
62347 static inline u32 bstat(char *name)
62348 {
62349 struct stat stat;
62350- if (sys_newstat(name, &stat) != 0)
62351+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
62352 return 0;
62353 if (!S_ISBLK(stat.st_mode))
62354 return 0;
62355diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
62356index 3098a38..253064e 100644
62357--- a/init/do_mounts_initrd.c
62358+++ b/init/do_mounts_initrd.c
62359@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
62360 create_dev("/dev/root.old", Root_RAM0);
62361 /* mount initrd on rootfs' /root */
62362 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
62363- sys_mkdir("/old", 0700);
62364- root_fd = sys_open("/", 0, 0);
62365- old_fd = sys_open("/old", 0, 0);
62366+ sys_mkdir((const char __force_user *)"/old", 0700);
62367+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
62368+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
62369 /* move initrd over / and chdir/chroot in initrd root */
62370- sys_chdir("/root");
62371- sys_mount(".", "/", NULL, MS_MOVE, NULL);
62372- sys_chroot(".");
62373+ sys_chdir((const char __force_user *)"/root");
62374+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
62375+ sys_chroot((const char __force_user *)".");
62376
62377 /*
62378 * In case that a resume from disk is carried out by linuxrc or one of
62379@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
62380
62381 /* move initrd to rootfs' /old */
62382 sys_fchdir(old_fd);
62383- sys_mount("/", ".", NULL, MS_MOVE, NULL);
62384+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
62385 /* switch root and cwd back to / of rootfs */
62386 sys_fchdir(root_fd);
62387- sys_chroot(".");
62388+ sys_chroot((const char __force_user *)".");
62389 sys_close(old_fd);
62390 sys_close(root_fd);
62391
62392 if (new_decode_dev(real_root_dev) == Root_RAM0) {
62393- sys_chdir("/old");
62394+ sys_chdir((const char __force_user *)"/old");
62395 return;
62396 }
62397
62398@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
62399 mount_root();
62400
62401 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
62402- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
62403+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
62404 if (!error)
62405 printk("okay\n");
62406 else {
62407- int fd = sys_open("/dev/root.old", O_RDWR, 0);
62408+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
62409 if (error == -ENOENT)
62410 printk("/initrd does not exist. Ignored.\n");
62411 else
62412 printk("failed\n");
62413 printk(KERN_NOTICE "Unmounting old root\n");
62414- sys_umount("/old", MNT_DETACH);
62415+ sys_umount((char __force_user *)"/old", MNT_DETACH);
62416 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
62417 if (fd < 0) {
62418 error = fd;
62419@@ -116,11 +116,11 @@ int __init initrd_load(void)
62420 * mounted in the normal path.
62421 */
62422 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
62423- sys_unlink("/initrd.image");
62424+ sys_unlink((const char __force_user *)"/initrd.image");
62425 handle_initrd();
62426 return 1;
62427 }
62428 }
62429- sys_unlink("/initrd.image");
62430+ sys_unlink((const char __force_user *)"/initrd.image");
62431 return 0;
62432 }
62433diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
62434index 32c4799..c27ee74 100644
62435--- a/init/do_mounts_md.c
62436+++ b/init/do_mounts_md.c
62437@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
62438 partitioned ? "_d" : "", minor,
62439 md_setup_args[ent].device_names);
62440
62441- fd = sys_open(name, 0, 0);
62442+ fd = sys_open((char __force_user *)name, 0, 0);
62443 if (fd < 0) {
62444 printk(KERN_ERR "md: open failed - cannot start "
62445 "array %s\n", name);
62446@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
62447 * array without it
62448 */
62449 sys_close(fd);
62450- fd = sys_open(name, 0, 0);
62451+ fd = sys_open((char __force_user *)name, 0, 0);
62452 sys_ioctl(fd, BLKRRPART, 0);
62453 }
62454 sys_close(fd);
62455@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
62456
62457 wait_for_device_probe();
62458
62459- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
62460+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
62461 if (fd >= 0) {
62462 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
62463 sys_close(fd);
62464diff --git a/init/initramfs.c b/init/initramfs.c
62465index 2531811..040d4d4 100644
62466--- a/init/initramfs.c
62467+++ b/init/initramfs.c
62468@@ -74,7 +74,7 @@ static void __init free_hash(void)
62469 }
62470 }
62471
62472-static long __init do_utime(char __user *filename, time_t mtime)
62473+static long __init do_utime(__force char __user *filename, time_t mtime)
62474 {
62475 struct timespec t[2];
62476
62477@@ -109,7 +109,7 @@ static void __init dir_utime(void)
62478 struct dir_entry *de, *tmp;
62479 list_for_each_entry_safe(de, tmp, &dir_list, list) {
62480 list_del(&de->list);
62481- do_utime(de->name, de->mtime);
62482+ do_utime((char __force_user *)de->name, de->mtime);
62483 kfree(de->name);
62484 kfree(de);
62485 }
62486@@ -271,7 +271,7 @@ static int __init maybe_link(void)
62487 if (nlink >= 2) {
62488 char *old = find_link(major, minor, ino, mode, collected);
62489 if (old)
62490- return (sys_link(old, collected) < 0) ? -1 : 1;
62491+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
62492 }
62493 return 0;
62494 }
62495@@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
62496 {
62497 struct stat st;
62498
62499- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
62500+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
62501 if (S_ISDIR(st.st_mode))
62502- sys_rmdir(path);
62503+ sys_rmdir((char __force_user *)path);
62504 else
62505- sys_unlink(path);
62506+ sys_unlink((char __force_user *)path);
62507 }
62508 }
62509
62510@@ -305,7 +305,7 @@ static int __init do_name(void)
62511 int openflags = O_WRONLY|O_CREAT;
62512 if (ml != 1)
62513 openflags |= O_TRUNC;
62514- wfd = sys_open(collected, openflags, mode);
62515+ wfd = sys_open((char __force_user *)collected, openflags, mode);
62516
62517 if (wfd >= 0) {
62518 sys_fchown(wfd, uid, gid);
62519@@ -317,17 +317,17 @@ static int __init do_name(void)
62520 }
62521 }
62522 } else if (S_ISDIR(mode)) {
62523- sys_mkdir(collected, mode);
62524- sys_chown(collected, uid, gid);
62525- sys_chmod(collected, mode);
62526+ sys_mkdir((char __force_user *)collected, mode);
62527+ sys_chown((char __force_user *)collected, uid, gid);
62528+ sys_chmod((char __force_user *)collected, mode);
62529 dir_add(collected, mtime);
62530 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
62531 S_ISFIFO(mode) || S_ISSOCK(mode)) {
62532 if (maybe_link() == 0) {
62533- sys_mknod(collected, mode, rdev);
62534- sys_chown(collected, uid, gid);
62535- sys_chmod(collected, mode);
62536- do_utime(collected, mtime);
62537+ sys_mknod((char __force_user *)collected, mode, rdev);
62538+ sys_chown((char __force_user *)collected, uid, gid);
62539+ sys_chmod((char __force_user *)collected, mode);
62540+ do_utime((char __force_user *)collected, mtime);
62541 }
62542 }
62543 return 0;
62544@@ -336,15 +336,15 @@ static int __init do_name(void)
62545 static int __init do_copy(void)
62546 {
62547 if (count >= body_len) {
62548- sys_write(wfd, victim, body_len);
62549+ sys_write(wfd, (char __force_user *)victim, body_len);
62550 sys_close(wfd);
62551- do_utime(vcollected, mtime);
62552+ do_utime((char __force_user *)vcollected, mtime);
62553 kfree(vcollected);
62554 eat(body_len);
62555 state = SkipIt;
62556 return 0;
62557 } else {
62558- sys_write(wfd, victim, count);
62559+ sys_write(wfd, (char __force_user *)victim, count);
62560 body_len -= count;
62561 eat(count);
62562 return 1;
62563@@ -355,9 +355,9 @@ static int __init do_symlink(void)
62564 {
62565 collected[N_ALIGN(name_len) + body_len] = '\0';
62566 clean_path(collected, 0);
62567- sys_symlink(collected + N_ALIGN(name_len), collected);
62568- sys_lchown(collected, uid, gid);
62569- do_utime(collected, mtime);
62570+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
62571+ sys_lchown((char __force_user *)collected, uid, gid);
62572+ do_utime((char __force_user *)collected, mtime);
62573 state = SkipIt;
62574 next_state = Reset;
62575 return 0;
62576diff --git a/init/main.c b/init/main.c
62577index 217ed23..ec5406f 100644
62578--- a/init/main.c
62579+++ b/init/main.c
62580@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
62581 extern void tc_init(void);
62582 #endif
62583
62584+extern void grsecurity_init(void);
62585+
62586 /*
62587 * Debug helper: via this flag we know that we are in 'early bootup code'
62588 * where only the boot processor is running with IRQ disabled. This means
62589@@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
62590
62591 __setup("reset_devices", set_reset_devices);
62592
62593+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
62594+extern char pax_enter_kernel_user[];
62595+extern char pax_exit_kernel_user[];
62596+extern pgdval_t clone_pgd_mask;
62597+#endif
62598+
62599+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
62600+static int __init setup_pax_nouderef(char *str)
62601+{
62602+#ifdef CONFIG_X86_32
62603+ unsigned int cpu;
62604+ struct desc_struct *gdt;
62605+
62606+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
62607+ gdt = get_cpu_gdt_table(cpu);
62608+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
62609+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
62610+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
62611+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
62612+ }
62613+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
62614+#else
62615+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
62616+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
62617+ clone_pgd_mask = ~(pgdval_t)0UL;
62618+#endif
62619+
62620+ return 0;
62621+}
62622+early_param("pax_nouderef", setup_pax_nouderef);
62623+#endif
62624+
62625+#ifdef CONFIG_PAX_SOFTMODE
62626+int pax_softmode;
62627+
62628+static int __init setup_pax_softmode(char *str)
62629+{
62630+ get_option(&str, &pax_softmode);
62631+ return 1;
62632+}
62633+__setup("pax_softmode=", setup_pax_softmode);
62634+#endif
62635+
62636 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
62637 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
62638 static const char *panic_later, *panic_param;
62639@@ -681,6 +726,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
62640 {
62641 int count = preempt_count();
62642 int ret;
62643+ const char *msg1 = "", *msg2 = "";
62644
62645 if (initcall_debug)
62646 ret = do_one_initcall_debug(fn);
62647@@ -693,15 +739,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
62648 sprintf(msgbuf, "error code %d ", ret);
62649
62650 if (preempt_count() != count) {
62651- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
62652+ msg1 = " preemption imbalance";
62653 preempt_count() = count;
62654 }
62655 if (irqs_disabled()) {
62656- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
62657+ msg2 = " disabled interrupts";
62658 local_irq_enable();
62659 }
62660- if (msgbuf[0]) {
62661- printk("initcall %pF returned with %s\n", fn, msgbuf);
62662+ if (msgbuf[0] || *msg1 || *msg2) {
62663+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
62664 }
62665
62666 return ret;
62667@@ -820,7 +866,7 @@ static int __init kernel_init(void * unused)
62668 do_basic_setup();
62669
62670 /* Open the /dev/console on the rootfs, this should never fail */
62671- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
62672+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
62673 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
62674
62675 (void) sys_dup(0);
62676@@ -833,11 +879,13 @@ static int __init kernel_init(void * unused)
62677 if (!ramdisk_execute_command)
62678 ramdisk_execute_command = "/init";
62679
62680- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
62681+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
62682 ramdisk_execute_command = NULL;
62683 prepare_namespace();
62684 }
62685
62686+ grsecurity_init();
62687+
62688 /*
62689 * Ok, we have completed the initial bootup, and
62690 * we're essentially up and running. Get rid of the
62691diff --git a/ipc/mqueue.c b/ipc/mqueue.c
62692index 5b4293d..f179875 100644
62693--- a/ipc/mqueue.c
62694+++ b/ipc/mqueue.c
62695@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
62696 mq_bytes = (mq_msg_tblsz +
62697 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
62698
62699+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
62700 spin_lock(&mq_lock);
62701 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
62702 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
62703diff --git a/ipc/msg.c b/ipc/msg.c
62704index 7385de2..a8180e08 100644
62705--- a/ipc/msg.c
62706+++ b/ipc/msg.c
62707@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
62708 return security_msg_queue_associate(msq, msgflg);
62709 }
62710
62711+static struct ipc_ops msg_ops = {
62712+ .getnew = newque,
62713+ .associate = msg_security,
62714+ .more_checks = NULL
62715+};
62716+
62717 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
62718 {
62719 struct ipc_namespace *ns;
62720- struct ipc_ops msg_ops;
62721 struct ipc_params msg_params;
62722
62723 ns = current->nsproxy->ipc_ns;
62724
62725- msg_ops.getnew = newque;
62726- msg_ops.associate = msg_security;
62727- msg_ops.more_checks = NULL;
62728-
62729 msg_params.key = key;
62730 msg_params.flg = msgflg;
62731
62732diff --git a/ipc/sem.c b/ipc/sem.c
62733index 5215a81..cfc0cac 100644
62734--- a/ipc/sem.c
62735+++ b/ipc/sem.c
62736@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
62737 return 0;
62738 }
62739
62740+static struct ipc_ops sem_ops = {
62741+ .getnew = newary,
62742+ .associate = sem_security,
62743+ .more_checks = sem_more_checks
62744+};
62745+
62746 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62747 {
62748 struct ipc_namespace *ns;
62749- struct ipc_ops sem_ops;
62750 struct ipc_params sem_params;
62751
62752 ns = current->nsproxy->ipc_ns;
62753@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62754 if (nsems < 0 || nsems > ns->sc_semmsl)
62755 return -EINVAL;
62756
62757- sem_ops.getnew = newary;
62758- sem_ops.associate = sem_security;
62759- sem_ops.more_checks = sem_more_checks;
62760-
62761 sem_params.key = key;
62762 sem_params.flg = semflg;
62763 sem_params.u.nsems = nsems;
62764diff --git a/ipc/shm.c b/ipc/shm.c
62765index b76be5b..859e750 100644
62766--- a/ipc/shm.c
62767+++ b/ipc/shm.c
62768@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
62769 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62770 #endif
62771
62772+#ifdef CONFIG_GRKERNSEC
62773+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62774+ const time_t shm_createtime, const uid_t cuid,
62775+ const int shmid);
62776+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62777+ const time_t shm_createtime);
62778+#endif
62779+
62780 void shm_init_ns(struct ipc_namespace *ns)
62781 {
62782 ns->shm_ctlmax = SHMMAX;
62783@@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
62784 shp->shm_lprid = 0;
62785 shp->shm_atim = shp->shm_dtim = 0;
62786 shp->shm_ctim = get_seconds();
62787+#ifdef CONFIG_GRKERNSEC
62788+ {
62789+ struct timespec timeval;
62790+ do_posix_clock_monotonic_gettime(&timeval);
62791+
62792+ shp->shm_createtime = timeval.tv_sec;
62793+ }
62794+#endif
62795 shp->shm_segsz = size;
62796 shp->shm_nattch = 0;
62797 shp->shm_file = file;
62798@@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
62799 return 0;
62800 }
62801
62802+static struct ipc_ops shm_ops = {
62803+ .getnew = newseg,
62804+ .associate = shm_security,
62805+ .more_checks = shm_more_checks
62806+};
62807+
62808 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62809 {
62810 struct ipc_namespace *ns;
62811- struct ipc_ops shm_ops;
62812 struct ipc_params shm_params;
62813
62814 ns = current->nsproxy->ipc_ns;
62815
62816- shm_ops.getnew = newseg;
62817- shm_ops.associate = shm_security;
62818- shm_ops.more_checks = shm_more_checks;
62819-
62820 shm_params.key = key;
62821 shm_params.flg = shmflg;
62822 shm_params.u.size = size;
62823@@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62824 f_mode = FMODE_READ | FMODE_WRITE;
62825 }
62826 if (shmflg & SHM_EXEC) {
62827+
62828+#ifdef CONFIG_PAX_MPROTECT
62829+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
62830+ goto out;
62831+#endif
62832+
62833 prot |= PROT_EXEC;
62834 acc_mode |= S_IXUGO;
62835 }
62836@@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62837 if (err)
62838 goto out_unlock;
62839
62840+#ifdef CONFIG_GRKERNSEC
62841+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62842+ shp->shm_perm.cuid, shmid) ||
62843+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62844+ err = -EACCES;
62845+ goto out_unlock;
62846+ }
62847+#endif
62848+
62849 path = shp->shm_file->f_path;
62850 path_get(&path);
62851 shp->shm_nattch++;
62852+#ifdef CONFIG_GRKERNSEC
62853+ shp->shm_lapid = current->pid;
62854+#endif
62855 size = i_size_read(path.dentry->d_inode);
62856 shm_unlock(shp);
62857
62858diff --git a/kernel/acct.c b/kernel/acct.c
62859index fa7eb3d..7faf116 100644
62860--- a/kernel/acct.c
62861+++ b/kernel/acct.c
62862@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
62863 */
62864 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62865 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62866- file->f_op->write(file, (char *)&ac,
62867+ file->f_op->write(file, (char __force_user *)&ac,
62868 sizeof(acct_t), &file->f_pos);
62869 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62870 set_fs(fs);
62871diff --git a/kernel/audit.c b/kernel/audit.c
62872index 09fae26..ed71d5b 100644
62873--- a/kernel/audit.c
62874+++ b/kernel/audit.c
62875@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
62876 3) suppressed due to audit_rate_limit
62877 4) suppressed due to audit_backlog_limit
62878 */
62879-static atomic_t audit_lost = ATOMIC_INIT(0);
62880+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62881
62882 /* The netlink socket. */
62883 static struct sock *audit_sock;
62884@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
62885 unsigned long now;
62886 int print;
62887
62888- atomic_inc(&audit_lost);
62889+ atomic_inc_unchecked(&audit_lost);
62890
62891 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62892
62893@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
62894 printk(KERN_WARNING
62895 "audit: audit_lost=%d audit_rate_limit=%d "
62896 "audit_backlog_limit=%d\n",
62897- atomic_read(&audit_lost),
62898+ atomic_read_unchecked(&audit_lost),
62899 audit_rate_limit,
62900 audit_backlog_limit);
62901 audit_panic(message);
62902@@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
62903 status_set.pid = audit_pid;
62904 status_set.rate_limit = audit_rate_limit;
62905 status_set.backlog_limit = audit_backlog_limit;
62906- status_set.lost = atomic_read(&audit_lost);
62907+ status_set.lost = atomic_read_unchecked(&audit_lost);
62908 status_set.backlog = skb_queue_len(&audit_skb_queue);
62909 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62910 &status_set, sizeof(status_set));
62911@@ -1260,12 +1260,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
62912 avail = audit_expand(ab,
62913 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
62914 if (!avail)
62915- goto out;
62916+ goto out_va_end;
62917 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
62918 }
62919- va_end(args2);
62920 if (len > 0)
62921 skb_put(skb, len);
62922+out_va_end:
62923+ va_end(args2);
62924 out:
62925 return;
62926 }
62927diff --git a/kernel/auditsc.c b/kernel/auditsc.c
62928index 47b7fc1..c003c33 100644
62929--- a/kernel/auditsc.c
62930+++ b/kernel/auditsc.c
62931@@ -1166,8 +1166,8 @@ static void audit_log_execve_info(struct audit_context *context,
62932 struct audit_buffer **ab,
62933 struct audit_aux_data_execve *axi)
62934 {
62935- int i;
62936- size_t len, len_sent = 0;
62937+ int i, len;
62938+ size_t len_sent = 0;
62939 const char __user *p;
62940 char *buf;
62941
62942@@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
62943 }
62944
62945 /* global counter which is incremented every time something logs in */
62946-static atomic_t session_id = ATOMIC_INIT(0);
62947+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62948
62949 /**
62950 * audit_set_loginuid - set a task's audit_context loginuid
62951@@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
62952 */
62953 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62954 {
62955- unsigned int sessionid = atomic_inc_return(&session_id);
62956+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62957 struct audit_context *context = task->audit_context;
62958
62959 if (context && context->in_syscall) {
62960diff --git a/kernel/capability.c b/kernel/capability.c
62961index b463871..fa3ea1f 100644
62962--- a/kernel/capability.c
62963+++ b/kernel/capability.c
62964@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
62965 * before modification is attempted and the application
62966 * fails.
62967 */
62968+ if (tocopy > ARRAY_SIZE(kdata))
62969+ return -EFAULT;
62970+
62971 if (copy_to_user(dataptr, kdata, tocopy
62972 * sizeof(struct __user_cap_data_struct))) {
62973 return -EFAULT;
62974@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
62975 BUG();
62976 }
62977
62978- if (security_capable(ns, current_cred(), cap) == 0) {
62979+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
62980 current->flags |= PF_SUPERPRIV;
62981 return true;
62982 }
62983@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *ns, int cap)
62984 }
62985 EXPORT_SYMBOL(ns_capable);
62986
62987+bool ns_capable_nolog(struct user_namespace *ns, int cap)
62988+{
62989+ if (unlikely(!cap_valid(cap))) {
62990+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62991+ BUG();
62992+ }
62993+
62994+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
62995+ current->flags |= PF_SUPERPRIV;
62996+ return true;
62997+ }
62998+ return false;
62999+}
63000+EXPORT_SYMBOL(ns_capable_nolog);
63001+
63002+bool capable_nolog(int cap)
63003+{
63004+ return ns_capable_nolog(&init_user_ns, cap);
63005+}
63006+EXPORT_SYMBOL(capable_nolog);
63007+
63008 /**
63009 * task_ns_capable - Determine whether current task has a superior
63010 * capability targeted at a specific task's user namespace.
63011@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct *t, int cap)
63012 }
63013 EXPORT_SYMBOL(task_ns_capable);
63014
63015+bool task_ns_capable_nolog(struct task_struct *t, int cap)
63016+{
63017+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
63018+}
63019+EXPORT_SYMBOL(task_ns_capable_nolog);
63020+
63021 /**
63022 * nsown_capable - Check superior capability to one's own user_ns
63023 * @cap: The capability in question
63024diff --git a/kernel/compat.c b/kernel/compat.c
63025index f346ced..aa2b1f4 100644
63026--- a/kernel/compat.c
63027+++ b/kernel/compat.c
63028@@ -13,6 +13,7 @@
63029
63030 #include <linux/linkage.h>
63031 #include <linux/compat.h>
63032+#include <linux/module.h>
63033 #include <linux/errno.h>
63034 #include <linux/time.h>
63035 #include <linux/signal.h>
63036@@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
63037 mm_segment_t oldfs;
63038 long ret;
63039
63040- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
63041+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
63042 oldfs = get_fs();
63043 set_fs(KERNEL_DS);
63044 ret = hrtimer_nanosleep_restart(restart);
63045@@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
63046 oldfs = get_fs();
63047 set_fs(KERNEL_DS);
63048 ret = hrtimer_nanosleep(&tu,
63049- rmtp ? (struct timespec __user *)&rmt : NULL,
63050+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
63051 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
63052 set_fs(oldfs);
63053
63054@@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
63055 mm_segment_t old_fs = get_fs();
63056
63057 set_fs(KERNEL_DS);
63058- ret = sys_sigpending((old_sigset_t __user *) &s);
63059+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
63060 set_fs(old_fs);
63061 if (ret == 0)
63062 ret = put_user(s, set);
63063@@ -332,8 +333,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
63064 old_fs = get_fs();
63065 set_fs(KERNEL_DS);
63066 ret = sys_sigprocmask(how,
63067- set ? (old_sigset_t __user *) &s : NULL,
63068- oset ? (old_sigset_t __user *) &s : NULL);
63069+ set ? (old_sigset_t __force_user *) &s : NULL,
63070+ oset ? (old_sigset_t __force_user *) &s : NULL);
63071 set_fs(old_fs);
63072 if (ret == 0)
63073 if (oset)
63074@@ -370,7 +371,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
63075 mm_segment_t old_fs = get_fs();
63076
63077 set_fs(KERNEL_DS);
63078- ret = sys_old_getrlimit(resource, &r);
63079+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
63080 set_fs(old_fs);
63081
63082 if (!ret) {
63083@@ -442,7 +443,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
63084 mm_segment_t old_fs = get_fs();
63085
63086 set_fs(KERNEL_DS);
63087- ret = sys_getrusage(who, (struct rusage __user *) &r);
63088+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
63089 set_fs(old_fs);
63090
63091 if (ret)
63092@@ -469,8 +470,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
63093 set_fs (KERNEL_DS);
63094 ret = sys_wait4(pid,
63095 (stat_addr ?
63096- (unsigned int __user *) &status : NULL),
63097- options, (struct rusage __user *) &r);
63098+ (unsigned int __force_user *) &status : NULL),
63099+ options, (struct rusage __force_user *) &r);
63100 set_fs (old_fs);
63101
63102 if (ret > 0) {
63103@@ -495,8 +496,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
63104 memset(&info, 0, sizeof(info));
63105
63106 set_fs(KERNEL_DS);
63107- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
63108- uru ? (struct rusage __user *)&ru : NULL);
63109+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
63110+ uru ? (struct rusage __force_user *)&ru : NULL);
63111 set_fs(old_fs);
63112
63113 if ((ret < 0) || (info.si_signo == 0))
63114@@ -626,8 +627,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
63115 oldfs = get_fs();
63116 set_fs(KERNEL_DS);
63117 err = sys_timer_settime(timer_id, flags,
63118- (struct itimerspec __user *) &newts,
63119- (struct itimerspec __user *) &oldts);
63120+ (struct itimerspec __force_user *) &newts,
63121+ (struct itimerspec __force_user *) &oldts);
63122 set_fs(oldfs);
63123 if (!err && old && put_compat_itimerspec(old, &oldts))
63124 return -EFAULT;
63125@@ -644,7 +645,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
63126 oldfs = get_fs();
63127 set_fs(KERNEL_DS);
63128 err = sys_timer_gettime(timer_id,
63129- (struct itimerspec __user *) &ts);
63130+ (struct itimerspec __force_user *) &ts);
63131 set_fs(oldfs);
63132 if (!err && put_compat_itimerspec(setting, &ts))
63133 return -EFAULT;
63134@@ -663,7 +664,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
63135 oldfs = get_fs();
63136 set_fs(KERNEL_DS);
63137 err = sys_clock_settime(which_clock,
63138- (struct timespec __user *) &ts);
63139+ (struct timespec __force_user *) &ts);
63140 set_fs(oldfs);
63141 return err;
63142 }
63143@@ -678,7 +679,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
63144 oldfs = get_fs();
63145 set_fs(KERNEL_DS);
63146 err = sys_clock_gettime(which_clock,
63147- (struct timespec __user *) &ts);
63148+ (struct timespec __force_user *) &ts);
63149 set_fs(oldfs);
63150 if (!err && put_compat_timespec(&ts, tp))
63151 return -EFAULT;
63152@@ -698,7 +699,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
63153
63154 oldfs = get_fs();
63155 set_fs(KERNEL_DS);
63156- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
63157+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
63158 set_fs(oldfs);
63159
63160 err = compat_put_timex(utp, &txc);
63161@@ -718,7 +719,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
63162 oldfs = get_fs();
63163 set_fs(KERNEL_DS);
63164 err = sys_clock_getres(which_clock,
63165- (struct timespec __user *) &ts);
63166+ (struct timespec __force_user *) &ts);
63167 set_fs(oldfs);
63168 if (!err && tp && put_compat_timespec(&ts, tp))
63169 return -EFAULT;
63170@@ -730,9 +731,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
63171 long err;
63172 mm_segment_t oldfs;
63173 struct timespec tu;
63174- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
63175+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
63176
63177- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
63178+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
63179 oldfs = get_fs();
63180 set_fs(KERNEL_DS);
63181 err = clock_nanosleep_restart(restart);
63182@@ -764,8 +765,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
63183 oldfs = get_fs();
63184 set_fs(KERNEL_DS);
63185 err = sys_clock_nanosleep(which_clock, flags,
63186- (struct timespec __user *) &in,
63187- (struct timespec __user *) &out);
63188+ (struct timespec __force_user *) &in,
63189+ (struct timespec __force_user *) &out);
63190 set_fs(oldfs);
63191
63192 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
63193diff --git a/kernel/configs.c b/kernel/configs.c
63194index 42e8fa0..9e7406b 100644
63195--- a/kernel/configs.c
63196+++ b/kernel/configs.c
63197@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
63198 struct proc_dir_entry *entry;
63199
63200 /* create the current config file */
63201+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
63202+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
63203+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
63204+ &ikconfig_file_ops);
63205+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63206+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
63207+ &ikconfig_file_ops);
63208+#endif
63209+#else
63210 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
63211 &ikconfig_file_ops);
63212+#endif
63213+
63214 if (!entry)
63215 return -ENOMEM;
63216
63217diff --git a/kernel/cred.c b/kernel/cred.c
63218index 5791612..a3c04dc 100644
63219--- a/kernel/cred.c
63220+++ b/kernel/cred.c
63221@@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
63222 validate_creds(cred);
63223 put_cred(cred);
63224 }
63225+
63226+#ifdef CONFIG_GRKERNSEC_SETXID
63227+ cred = (struct cred *) tsk->delayed_cred;
63228+ if (cred) {
63229+ tsk->delayed_cred = NULL;
63230+ validate_creds(cred);
63231+ put_cred(cred);
63232+ }
63233+#endif
63234 }
63235
63236 /**
63237@@ -470,7 +479,7 @@ error_put:
63238 * Always returns 0 thus allowing this function to be tail-called at the end
63239 * of, say, sys_setgid().
63240 */
63241-int commit_creds(struct cred *new)
63242+static int __commit_creds(struct cred *new)
63243 {
63244 struct task_struct *task = current;
63245 const struct cred *old = task->real_cred;
63246@@ -489,6 +498,8 @@ int commit_creds(struct cred *new)
63247
63248 get_cred(new); /* we will require a ref for the subj creds too */
63249
63250+ gr_set_role_label(task, new->uid, new->gid);
63251+
63252 /* dumpability changes */
63253 if (old->euid != new->euid ||
63254 old->egid != new->egid ||
63255@@ -538,6 +549,92 @@ int commit_creds(struct cred *new)
63256 put_cred(old);
63257 return 0;
63258 }
63259+#ifdef CONFIG_GRKERNSEC_SETXID
63260+extern int set_user(struct cred *new);
63261+
63262+void gr_delayed_cred_worker(void)
63263+{
63264+ const struct cred *new = current->delayed_cred;
63265+ struct cred *ncred;
63266+
63267+ current->delayed_cred = NULL;
63268+
63269+ if (current_uid() && new != NULL) {
63270+ // from doing get_cred on it when queueing this
63271+ put_cred(new);
63272+ return;
63273+ } else if (new == NULL)
63274+ return;
63275+
63276+ ncred = prepare_creds();
63277+ if (!ncred)
63278+ goto die;
63279+ // uids
63280+ ncred->uid = new->uid;
63281+ ncred->euid = new->euid;
63282+ ncred->suid = new->suid;
63283+ ncred->fsuid = new->fsuid;
63284+ // gids
63285+ ncred->gid = new->gid;
63286+ ncred->egid = new->egid;
63287+ ncred->sgid = new->sgid;
63288+ ncred->fsgid = new->fsgid;
63289+ // groups
63290+ if (set_groups(ncred, new->group_info) < 0) {
63291+ abort_creds(ncred);
63292+ goto die;
63293+ }
63294+ // caps
63295+ ncred->securebits = new->securebits;
63296+ ncred->cap_inheritable = new->cap_inheritable;
63297+ ncred->cap_permitted = new->cap_permitted;
63298+ ncred->cap_effective = new->cap_effective;
63299+ ncred->cap_bset = new->cap_bset;
63300+
63301+ if (set_user(ncred)) {
63302+ abort_creds(ncred);
63303+ goto die;
63304+ }
63305+
63306+ // from doing get_cred on it when queueing this
63307+ put_cred(new);
63308+
63309+ __commit_creds(ncred);
63310+ return;
63311+die:
63312+ // from doing get_cred on it when queueing this
63313+ put_cred(new);
63314+ do_group_exit(SIGKILL);
63315+}
63316+#endif
63317+
63318+int commit_creds(struct cred *new)
63319+{
63320+#ifdef CONFIG_GRKERNSEC_SETXID
63321+ struct task_struct *t;
63322+
63323+ /* we won't get called with tasklist_lock held for writing
63324+ and interrupts disabled as the cred struct in that case is
63325+ init_cred
63326+ */
63327+ if (grsec_enable_setxid && !current_is_single_threaded() &&
63328+ !current_uid() && new->uid) {
63329+ rcu_read_lock();
63330+ read_lock(&tasklist_lock);
63331+ for (t = next_thread(current); t != current;
63332+ t = next_thread(t)) {
63333+ if (t->delayed_cred == NULL) {
63334+ t->delayed_cred = get_cred(new);
63335+ set_tsk_need_resched(t);
63336+ }
63337+ }
63338+ read_unlock(&tasklist_lock);
63339+ rcu_read_unlock();
63340+ }
63341+#endif
63342+ return __commit_creds(new);
63343+}
63344+
63345 EXPORT_SYMBOL(commit_creds);
63346
63347 /**
63348diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
63349index 0d7c087..01b8cef 100644
63350--- a/kernel/debug/debug_core.c
63351+++ b/kernel/debug/debug_core.c
63352@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
63353 */
63354 static atomic_t masters_in_kgdb;
63355 static atomic_t slaves_in_kgdb;
63356-static atomic_t kgdb_break_tasklet_var;
63357+static atomic_unchecked_t kgdb_break_tasklet_var;
63358 atomic_t kgdb_setting_breakpoint;
63359
63360 struct task_struct *kgdb_usethread;
63361@@ -129,7 +129,7 @@ int kgdb_single_step;
63362 static pid_t kgdb_sstep_pid;
63363
63364 /* to keep track of the CPU which is doing the single stepping*/
63365-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63366+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63367
63368 /*
63369 * If you are debugging a problem where roundup (the collection of
63370@@ -542,7 +542,7 @@ return_normal:
63371 * kernel will only try for the value of sstep_tries before
63372 * giving up and continuing on.
63373 */
63374- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
63375+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
63376 (kgdb_info[cpu].task &&
63377 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
63378 atomic_set(&kgdb_active, -1);
63379@@ -636,8 +636,8 @@ cpu_master_loop:
63380 }
63381
63382 kgdb_restore:
63383- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
63384- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
63385+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
63386+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
63387 if (kgdb_info[sstep_cpu].task)
63388 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
63389 else
63390@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void)
63391 static void kgdb_tasklet_bpt(unsigned long ing)
63392 {
63393 kgdb_breakpoint();
63394- atomic_set(&kgdb_break_tasklet_var, 0);
63395+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
63396 }
63397
63398 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
63399
63400 void kgdb_schedule_breakpoint(void)
63401 {
63402- if (atomic_read(&kgdb_break_tasklet_var) ||
63403+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
63404 atomic_read(&kgdb_active) != -1 ||
63405 atomic_read(&kgdb_setting_breakpoint))
63406 return;
63407- atomic_inc(&kgdb_break_tasklet_var);
63408+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
63409 tasklet_schedule(&kgdb_tasklet_breakpoint);
63410 }
63411 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
63412diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
63413index 63786e7..0780cac 100644
63414--- a/kernel/debug/kdb/kdb_main.c
63415+++ b/kernel/debug/kdb/kdb_main.c
63416@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
63417 list_for_each_entry(mod, kdb_modules, list) {
63418
63419 kdb_printf("%-20s%8u 0x%p ", mod->name,
63420- mod->core_size, (void *)mod);
63421+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
63422 #ifdef CONFIG_MODULE_UNLOAD
63423 kdb_printf("%4d ", module_refcount(mod));
63424 #endif
63425@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
63426 kdb_printf(" (Loading)");
63427 else
63428 kdb_printf(" (Live)");
63429- kdb_printf(" 0x%p", mod->module_core);
63430+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
63431
63432 #ifdef CONFIG_MODULE_UNLOAD
63433 {
63434diff --git a/kernel/events/core.c b/kernel/events/core.c
63435index 58690af..d903d75 100644
63436--- a/kernel/events/core.c
63437+++ b/kernel/events/core.c
63438@@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
63439 return 0;
63440 }
63441
63442-static atomic64_t perf_event_id;
63443+static atomic64_unchecked_t perf_event_id;
63444
63445 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
63446 enum event_type_t event_type);
63447@@ -2540,7 +2540,7 @@ static void __perf_event_read(void *info)
63448
63449 static inline u64 perf_event_count(struct perf_event *event)
63450 {
63451- return local64_read(&event->count) + atomic64_read(&event->child_count);
63452+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
63453 }
63454
63455 static u64 perf_event_read(struct perf_event *event)
63456@@ -3065,9 +3065,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
63457 mutex_lock(&event->child_mutex);
63458 total += perf_event_read(event);
63459 *enabled += event->total_time_enabled +
63460- atomic64_read(&event->child_total_time_enabled);
63461+ atomic64_read_unchecked(&event->child_total_time_enabled);
63462 *running += event->total_time_running +
63463- atomic64_read(&event->child_total_time_running);
63464+ atomic64_read_unchecked(&event->child_total_time_running);
63465
63466 list_for_each_entry(child, &event->child_list, child_list) {
63467 total += perf_event_read(child);
63468@@ -3474,10 +3474,10 @@ void perf_event_update_userpage(struct perf_event *event)
63469 userpg->offset -= local64_read(&event->hw.prev_count);
63470
63471 userpg->time_enabled = enabled +
63472- atomic64_read(&event->child_total_time_enabled);
63473+ atomic64_read_unchecked(&event->child_total_time_enabled);
63474
63475 userpg->time_running = running +
63476- atomic64_read(&event->child_total_time_running);
63477+ atomic64_read_unchecked(&event->child_total_time_running);
63478
63479 barrier();
63480 ++userpg->lock;
63481@@ -3906,11 +3906,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
63482 values[n++] = perf_event_count(event);
63483 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
63484 values[n++] = enabled +
63485- atomic64_read(&event->child_total_time_enabled);
63486+ atomic64_read_unchecked(&event->child_total_time_enabled);
63487 }
63488 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
63489 values[n++] = running +
63490- atomic64_read(&event->child_total_time_running);
63491+ atomic64_read_unchecked(&event->child_total_time_running);
63492 }
63493 if (read_format & PERF_FORMAT_ID)
63494 values[n++] = primary_event_id(event);
63495@@ -4561,12 +4561,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
63496 * need to add enough zero bytes after the string to handle
63497 * the 64bit alignment we do later.
63498 */
63499- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
63500+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
63501 if (!buf) {
63502 name = strncpy(tmp, "//enomem", sizeof(tmp));
63503 goto got_name;
63504 }
63505- name = d_path(&file->f_path, buf, PATH_MAX);
63506+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
63507 if (IS_ERR(name)) {
63508 name = strncpy(tmp, "//toolong", sizeof(tmp));
63509 goto got_name;
63510@@ -5921,7 +5921,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
63511 event->parent = parent_event;
63512
63513 event->ns = get_pid_ns(current->nsproxy->pid_ns);
63514- event->id = atomic64_inc_return(&perf_event_id);
63515+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
63516
63517 event->state = PERF_EVENT_STATE_INACTIVE;
63518
63519@@ -6443,10 +6443,10 @@ static void sync_child_event(struct perf_event *child_event,
63520 /*
63521 * Add back the child's count to the parent's count:
63522 */
63523- atomic64_add(child_val, &parent_event->child_count);
63524- atomic64_add(child_event->total_time_enabled,
63525+ atomic64_add_unchecked(child_val, &parent_event->child_count);
63526+ atomic64_add_unchecked(child_event->total_time_enabled,
63527 &parent_event->child_total_time_enabled);
63528- atomic64_add(child_event->total_time_running,
63529+ atomic64_add_unchecked(child_event->total_time_running,
63530 &parent_event->child_total_time_running);
63531
63532 /*
63533diff --git a/kernel/exit.c b/kernel/exit.c
63534index e6e01b9..619f837 100644
63535--- a/kernel/exit.c
63536+++ b/kernel/exit.c
63537@@ -57,6 +57,10 @@
63538 #include <asm/pgtable.h>
63539 #include <asm/mmu_context.h>
63540
63541+#ifdef CONFIG_GRKERNSEC
63542+extern rwlock_t grsec_exec_file_lock;
63543+#endif
63544+
63545 static void exit_mm(struct task_struct * tsk);
63546
63547 static void __unhash_process(struct task_struct *p, bool group_dead)
63548@@ -168,6 +172,10 @@ void release_task(struct task_struct * p)
63549 struct task_struct *leader;
63550 int zap_leader;
63551 repeat:
63552+#ifdef CONFIG_NET
63553+ gr_del_task_from_ip_table(p);
63554+#endif
63555+
63556 /* don't need to get the RCU readlock here - the process is dead and
63557 * can't be modifying its own credentials. But shut RCU-lockdep up */
63558 rcu_read_lock();
63559@@ -380,7 +388,7 @@ int allow_signal(int sig)
63560 * know it'll be handled, so that they don't get converted to
63561 * SIGKILL or just silently dropped.
63562 */
63563- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
63564+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
63565 recalc_sigpending();
63566 spin_unlock_irq(&current->sighand->siglock);
63567 return 0;
63568@@ -416,6 +424,17 @@ void daemonize(const char *name, ...)
63569 vsnprintf(current->comm, sizeof(current->comm), name, args);
63570 va_end(args);
63571
63572+#ifdef CONFIG_GRKERNSEC
63573+ write_lock(&grsec_exec_file_lock);
63574+ if (current->exec_file) {
63575+ fput(current->exec_file);
63576+ current->exec_file = NULL;
63577+ }
63578+ write_unlock(&grsec_exec_file_lock);
63579+#endif
63580+
63581+ gr_set_kernel_label(current);
63582+
63583 /*
63584 * If we were started as result of loading a module, close all of the
63585 * user space pages. We don't need them, and if we didn't close them
63586@@ -893,6 +912,8 @@ NORET_TYPE void do_exit(long code)
63587 struct task_struct *tsk = current;
63588 int group_dead;
63589
63590+ set_fs(USER_DS);
63591+
63592 profile_task_exit(tsk);
63593
63594 WARN_ON(blk_needs_flush_plug(tsk));
63595@@ -909,7 +930,6 @@ NORET_TYPE void do_exit(long code)
63596 * mm_release()->clear_child_tid() from writing to a user-controlled
63597 * kernel address.
63598 */
63599- set_fs(USER_DS);
63600
63601 ptrace_event(PTRACE_EVENT_EXIT, code);
63602
63603@@ -971,6 +991,9 @@ NORET_TYPE void do_exit(long code)
63604 tsk->exit_code = code;
63605 taskstats_exit(tsk, group_dead);
63606
63607+ gr_acl_handle_psacct(tsk, code);
63608+ gr_acl_handle_exit();
63609+
63610 exit_mm(tsk);
63611
63612 if (group_dead)
63613diff --git a/kernel/fork.c b/kernel/fork.c
63614index 0acf42c0..9e40e2e 100644
63615--- a/kernel/fork.c
63616+++ b/kernel/fork.c
63617@@ -281,7 +281,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
63618 *stackend = STACK_END_MAGIC; /* for overflow detection */
63619
63620 #ifdef CONFIG_CC_STACKPROTECTOR
63621- tsk->stack_canary = get_random_int();
63622+ tsk->stack_canary = pax_get_random_long();
63623 #endif
63624
63625 /*
63626@@ -305,13 +305,77 @@ out:
63627 }
63628
63629 #ifdef CONFIG_MMU
63630+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
63631+{
63632+ struct vm_area_struct *tmp;
63633+ unsigned long charge;
63634+ struct mempolicy *pol;
63635+ struct file *file;
63636+
63637+ charge = 0;
63638+ if (mpnt->vm_flags & VM_ACCOUNT) {
63639+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63640+ if (security_vm_enough_memory(len))
63641+ goto fail_nomem;
63642+ charge = len;
63643+ }
63644+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63645+ if (!tmp)
63646+ goto fail_nomem;
63647+ *tmp = *mpnt;
63648+ tmp->vm_mm = mm;
63649+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
63650+ pol = mpol_dup(vma_policy(mpnt));
63651+ if (IS_ERR(pol))
63652+ goto fail_nomem_policy;
63653+ vma_set_policy(tmp, pol);
63654+ if (anon_vma_fork(tmp, mpnt))
63655+ goto fail_nomem_anon_vma_fork;
63656+ tmp->vm_flags &= ~VM_LOCKED;
63657+ tmp->vm_next = tmp->vm_prev = NULL;
63658+ tmp->vm_mirror = NULL;
63659+ file = tmp->vm_file;
63660+ if (file) {
63661+ struct inode *inode = file->f_path.dentry->d_inode;
63662+ struct address_space *mapping = file->f_mapping;
63663+
63664+ get_file(file);
63665+ if (tmp->vm_flags & VM_DENYWRITE)
63666+ atomic_dec(&inode->i_writecount);
63667+ mutex_lock(&mapping->i_mmap_mutex);
63668+ if (tmp->vm_flags & VM_SHARED)
63669+ mapping->i_mmap_writable++;
63670+ flush_dcache_mmap_lock(mapping);
63671+ /* insert tmp into the share list, just after mpnt */
63672+ vma_prio_tree_add(tmp, mpnt);
63673+ flush_dcache_mmap_unlock(mapping);
63674+ mutex_unlock(&mapping->i_mmap_mutex);
63675+ }
63676+
63677+ /*
63678+ * Clear hugetlb-related page reserves for children. This only
63679+ * affects MAP_PRIVATE mappings. Faults generated by the child
63680+ * are not guaranteed to succeed, even if read-only
63681+ */
63682+ if (is_vm_hugetlb_page(tmp))
63683+ reset_vma_resv_huge_pages(tmp);
63684+
63685+ return tmp;
63686+
63687+fail_nomem_anon_vma_fork:
63688+ mpol_put(pol);
63689+fail_nomem_policy:
63690+ kmem_cache_free(vm_area_cachep, tmp);
63691+fail_nomem:
63692+ vm_unacct_memory(charge);
63693+ return NULL;
63694+}
63695+
63696 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63697 {
63698 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
63699 struct rb_node **rb_link, *rb_parent;
63700 int retval;
63701- unsigned long charge;
63702- struct mempolicy *pol;
63703
63704 down_write(&oldmm->mmap_sem);
63705 flush_cache_dup_mm(oldmm);
63706@@ -323,8 +387,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63707 mm->locked_vm = 0;
63708 mm->mmap = NULL;
63709 mm->mmap_cache = NULL;
63710- mm->free_area_cache = oldmm->mmap_base;
63711- mm->cached_hole_size = ~0UL;
63712+ mm->free_area_cache = oldmm->free_area_cache;
63713+ mm->cached_hole_size = oldmm->cached_hole_size;
63714 mm->map_count = 0;
63715 cpumask_clear(mm_cpumask(mm));
63716 mm->mm_rb = RB_ROOT;
63717@@ -340,8 +404,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63718
63719 prev = NULL;
63720 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
63721- struct file *file;
63722-
63723 if (mpnt->vm_flags & VM_DONTCOPY) {
63724 long pages = vma_pages(mpnt);
63725 mm->total_vm -= pages;
63726@@ -349,53 +411,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63727 -pages);
63728 continue;
63729 }
63730- charge = 0;
63731- if (mpnt->vm_flags & VM_ACCOUNT) {
63732- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63733- if (security_vm_enough_memory(len))
63734- goto fail_nomem;
63735- charge = len;
63736+ tmp = dup_vma(mm, mpnt);
63737+ if (!tmp) {
63738+ retval = -ENOMEM;
63739+ goto out;
63740 }
63741- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63742- if (!tmp)
63743- goto fail_nomem;
63744- *tmp = *mpnt;
63745- INIT_LIST_HEAD(&tmp->anon_vma_chain);
63746- pol = mpol_dup(vma_policy(mpnt));
63747- retval = PTR_ERR(pol);
63748- if (IS_ERR(pol))
63749- goto fail_nomem_policy;
63750- vma_set_policy(tmp, pol);
63751- tmp->vm_mm = mm;
63752- if (anon_vma_fork(tmp, mpnt))
63753- goto fail_nomem_anon_vma_fork;
63754- tmp->vm_flags &= ~VM_LOCKED;
63755- tmp->vm_next = tmp->vm_prev = NULL;
63756- file = tmp->vm_file;
63757- if (file) {
63758- struct inode *inode = file->f_path.dentry->d_inode;
63759- struct address_space *mapping = file->f_mapping;
63760-
63761- get_file(file);
63762- if (tmp->vm_flags & VM_DENYWRITE)
63763- atomic_dec(&inode->i_writecount);
63764- mutex_lock(&mapping->i_mmap_mutex);
63765- if (tmp->vm_flags & VM_SHARED)
63766- mapping->i_mmap_writable++;
63767- flush_dcache_mmap_lock(mapping);
63768- /* insert tmp into the share list, just after mpnt */
63769- vma_prio_tree_add(tmp, mpnt);
63770- flush_dcache_mmap_unlock(mapping);
63771- mutex_unlock(&mapping->i_mmap_mutex);
63772- }
63773-
63774- /*
63775- * Clear hugetlb-related page reserves for children. This only
63776- * affects MAP_PRIVATE mappings. Faults generated by the child
63777- * are not guaranteed to succeed, even if read-only
63778- */
63779- if (is_vm_hugetlb_page(tmp))
63780- reset_vma_resv_huge_pages(tmp);
63781
63782 /*
63783 * Link in the new vma and copy the page table entries.
63784@@ -418,6 +438,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63785 if (retval)
63786 goto out;
63787 }
63788+
63789+#ifdef CONFIG_PAX_SEGMEXEC
63790+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
63791+ struct vm_area_struct *mpnt_m;
63792+
63793+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
63794+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
63795+
63796+ if (!mpnt->vm_mirror)
63797+ continue;
63798+
63799+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
63800+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
63801+ mpnt->vm_mirror = mpnt_m;
63802+ } else {
63803+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
63804+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
63805+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
63806+ mpnt->vm_mirror->vm_mirror = mpnt;
63807+ }
63808+ }
63809+ BUG_ON(mpnt_m);
63810+ }
63811+#endif
63812+
63813 /* a new mm has just been created */
63814 arch_dup_mmap(oldmm, mm);
63815 retval = 0;
63816@@ -426,14 +471,6 @@ out:
63817 flush_tlb_mm(oldmm);
63818 up_write(&oldmm->mmap_sem);
63819 return retval;
63820-fail_nomem_anon_vma_fork:
63821- mpol_put(pol);
63822-fail_nomem_policy:
63823- kmem_cache_free(vm_area_cachep, tmp);
63824-fail_nomem:
63825- retval = -ENOMEM;
63826- vm_unacct_memory(charge);
63827- goto out;
63828 }
63829
63830 static inline int mm_alloc_pgd(struct mm_struct *mm)
63831@@ -645,6 +682,26 @@ struct mm_struct *get_task_mm(struct task_struct *task)
63832 }
63833 EXPORT_SYMBOL_GPL(get_task_mm);
63834
63835+struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
63836+{
63837+ struct mm_struct *mm;
63838+ int err;
63839+
63840+ err = mutex_lock_killable(&task->signal->cred_guard_mutex);
63841+ if (err)
63842+ return ERR_PTR(err);
63843+
63844+ mm = get_task_mm(task);
63845+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
63846+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
63847+ mmput(mm);
63848+ mm = ERR_PTR(-EACCES);
63849+ }
63850+ mutex_unlock(&task->signal->cred_guard_mutex);
63851+
63852+ return mm;
63853+}
63854+
63855 /* Please note the differences between mmput and mm_release.
63856 * mmput is called whenever we stop holding onto a mm_struct,
63857 * error success whatever.
63858@@ -830,13 +887,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
63859 spin_unlock(&fs->lock);
63860 return -EAGAIN;
63861 }
63862- fs->users++;
63863+ atomic_inc(&fs->users);
63864 spin_unlock(&fs->lock);
63865 return 0;
63866 }
63867 tsk->fs = copy_fs_struct(fs);
63868 if (!tsk->fs)
63869 return -ENOMEM;
63870+ gr_set_chroot_entries(tsk, &tsk->fs->root);
63871 return 0;
63872 }
63873
63874@@ -1100,6 +1158,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63875 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
63876 #endif
63877 retval = -EAGAIN;
63878+
63879+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
63880+
63881 if (atomic_read(&p->real_cred->user->processes) >=
63882 task_rlimit(p, RLIMIT_NPROC)) {
63883 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
63884@@ -1259,6 +1320,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63885 if (clone_flags & CLONE_THREAD)
63886 p->tgid = current->tgid;
63887
63888+ gr_copy_label(p);
63889+
63890 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
63891 /*
63892 * Clear TID on mm_release()?
63893@@ -1421,6 +1484,8 @@ bad_fork_cleanup_count:
63894 bad_fork_free:
63895 free_task(p);
63896 fork_out:
63897+ gr_log_forkfail(retval);
63898+
63899 return ERR_PTR(retval);
63900 }
63901
63902@@ -1521,6 +1586,8 @@ long do_fork(unsigned long clone_flags,
63903 if (clone_flags & CLONE_PARENT_SETTID)
63904 put_user(nr, parent_tidptr);
63905
63906+ gr_handle_brute_check();
63907+
63908 if (clone_flags & CLONE_VFORK) {
63909 p->vfork_done = &vfork;
63910 init_completion(&vfork);
63911@@ -1630,7 +1697,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
63912 return 0;
63913
63914 /* don't need lock here; in the worst case we'll do useless copy */
63915- if (fs->users == 1)
63916+ if (atomic_read(&fs->users) == 1)
63917 return 0;
63918
63919 *new_fsp = copy_fs_struct(fs);
63920@@ -1719,7 +1786,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
63921 fs = current->fs;
63922 spin_lock(&fs->lock);
63923 current->fs = new_fs;
63924- if (--fs->users)
63925+ gr_set_chroot_entries(current, &current->fs->root);
63926+ if (atomic_dec_return(&fs->users))
63927 new_fs = NULL;
63928 else
63929 new_fs = fs;
63930diff --git a/kernel/futex.c b/kernel/futex.c
63931index 1614be2..37abc7e 100644
63932--- a/kernel/futex.c
63933+++ b/kernel/futex.c
63934@@ -54,6 +54,7 @@
63935 #include <linux/mount.h>
63936 #include <linux/pagemap.h>
63937 #include <linux/syscalls.h>
63938+#include <linux/ptrace.h>
63939 #include <linux/signal.h>
63940 #include <linux/export.h>
63941 #include <linux/magic.h>
63942@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
63943 struct page *page, *page_head;
63944 int err, ro = 0;
63945
63946+#ifdef CONFIG_PAX_SEGMEXEC
63947+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
63948+ return -EFAULT;
63949+#endif
63950+
63951 /*
63952 * The futex address must be "naturally" aligned.
63953 */
63954@@ -2459,6 +2465,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
63955 if (!p)
63956 goto err_unlock;
63957 ret = -EPERM;
63958+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63959+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
63960+ goto err_unlock;
63961+#endif
63962 pcred = __task_cred(p);
63963 /* If victim is in different user_ns, then uids are not
63964 comparable, so we must have CAP_SYS_PTRACE */
63965@@ -2724,6 +2734,7 @@ static int __init futex_init(void)
63966 {
63967 u32 curval;
63968 int i;
63969+ mm_segment_t oldfs;
63970
63971 /*
63972 * This will fail and we want it. Some arch implementations do
63973@@ -2735,8 +2746,11 @@ static int __init futex_init(void)
63974 * implementation, the non-functional ones will return
63975 * -ENOSYS.
63976 */
63977+ oldfs = get_fs();
63978+ set_fs(USER_DS);
63979 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
63980 futex_cmpxchg_enabled = 1;
63981+ set_fs(oldfs);
63982
63983 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
63984 plist_head_init(&futex_queues[i].chain);
63985diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
63986index 5f9e689..582d46d 100644
63987--- a/kernel/futex_compat.c
63988+++ b/kernel/futex_compat.c
63989@@ -10,6 +10,7 @@
63990 #include <linux/compat.h>
63991 #include <linux/nsproxy.h>
63992 #include <linux/futex.h>
63993+#include <linux/ptrace.h>
63994
63995 #include <asm/uaccess.h>
63996
63997@@ -136,7 +137,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
63998 {
63999 struct compat_robust_list_head __user *head;
64000 unsigned long ret;
64001- const struct cred *cred = current_cred(), *pcred;
64002+ const struct cred *cred = current_cred();
64003+ const struct cred *pcred;
64004
64005 if (!futex_cmpxchg_enabled)
64006 return -ENOSYS;
64007@@ -152,6 +154,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
64008 if (!p)
64009 goto err_unlock;
64010 ret = -EPERM;
64011+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64012+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
64013+ goto err_unlock;
64014+#endif
64015 pcred = __task_cred(p);
64016 /* If victim is in different user_ns, then uids are not
64017 comparable, so we must have CAP_SYS_PTRACE */
64018diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
64019index 9b22d03..6295b62 100644
64020--- a/kernel/gcov/base.c
64021+++ b/kernel/gcov/base.c
64022@@ -102,11 +102,6 @@ void gcov_enable_events(void)
64023 }
64024
64025 #ifdef CONFIG_MODULES
64026-static inline int within(void *addr, void *start, unsigned long size)
64027-{
64028- return ((addr >= start) && (addr < start + size));
64029-}
64030-
64031 /* Update list and generate events when modules are unloaded. */
64032 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
64033 void *data)
64034@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
64035 prev = NULL;
64036 /* Remove entries located in module from linked list. */
64037 for (info = gcov_info_head; info; info = info->next) {
64038- if (within(info, mod->module_core, mod->core_size)) {
64039+ if (within_module_core_rw((unsigned long)info, mod)) {
64040 if (prev)
64041 prev->next = info->next;
64042 else
64043diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
64044index ae34bf5..4e2f3d0 100644
64045--- a/kernel/hrtimer.c
64046+++ b/kernel/hrtimer.c
64047@@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
64048 local_irq_restore(flags);
64049 }
64050
64051-static void run_hrtimer_softirq(struct softirq_action *h)
64052+static void run_hrtimer_softirq(void)
64053 {
64054 hrtimer_peek_ahead_timers();
64055 }
64056diff --git a/kernel/jump_label.c b/kernel/jump_label.c
64057index 66ff710..05a5128 100644
64058--- a/kernel/jump_label.c
64059+++ b/kernel/jump_label.c
64060@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
64061
64062 size = (((unsigned long)stop - (unsigned long)start)
64063 / sizeof(struct jump_entry));
64064+ pax_open_kernel();
64065 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
64066+ pax_close_kernel();
64067 }
64068
64069 static void jump_label_update(struct jump_label_key *key, int enable);
64070@@ -303,10 +305,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
64071 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
64072 struct jump_entry *iter;
64073
64074+ pax_open_kernel();
64075 for (iter = iter_start; iter < iter_stop; iter++) {
64076 if (within_module_init(iter->code, mod))
64077 iter->code = 0;
64078 }
64079+ pax_close_kernel();
64080 }
64081
64082 static int
64083diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
64084index 079f1d3..a407562 100644
64085--- a/kernel/kallsyms.c
64086+++ b/kernel/kallsyms.c
64087@@ -11,6 +11,9 @@
64088 * Changed the compression method from stem compression to "table lookup"
64089 * compression (see scripts/kallsyms.c for a more complete description)
64090 */
64091+#ifdef CONFIG_GRKERNSEC_HIDESYM
64092+#define __INCLUDED_BY_HIDESYM 1
64093+#endif
64094 #include <linux/kallsyms.h>
64095 #include <linux/module.h>
64096 #include <linux/init.h>
64097@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
64098
64099 static inline int is_kernel_inittext(unsigned long addr)
64100 {
64101+ if (system_state != SYSTEM_BOOTING)
64102+ return 0;
64103+
64104 if (addr >= (unsigned long)_sinittext
64105 && addr <= (unsigned long)_einittext)
64106 return 1;
64107 return 0;
64108 }
64109
64110+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64111+#ifdef CONFIG_MODULES
64112+static inline int is_module_text(unsigned long addr)
64113+{
64114+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
64115+ return 1;
64116+
64117+ addr = ktla_ktva(addr);
64118+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
64119+}
64120+#else
64121+static inline int is_module_text(unsigned long addr)
64122+{
64123+ return 0;
64124+}
64125+#endif
64126+#endif
64127+
64128 static inline int is_kernel_text(unsigned long addr)
64129 {
64130 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
64131@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
64132
64133 static inline int is_kernel(unsigned long addr)
64134 {
64135+
64136+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64137+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
64138+ return 1;
64139+
64140+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
64141+#else
64142 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
64143+#endif
64144+
64145 return 1;
64146 return in_gate_area_no_mm(addr);
64147 }
64148
64149 static int is_ksym_addr(unsigned long addr)
64150 {
64151+
64152+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64153+ if (is_module_text(addr))
64154+ return 0;
64155+#endif
64156+
64157 if (all_var)
64158 return is_kernel(addr);
64159
64160@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
64161
64162 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
64163 {
64164- iter->name[0] = '\0';
64165 iter->nameoff = get_symbol_offset(new_pos);
64166 iter->pos = new_pos;
64167 }
64168@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
64169 {
64170 struct kallsym_iter *iter = m->private;
64171
64172+#ifdef CONFIG_GRKERNSEC_HIDESYM
64173+ if (current_uid())
64174+ return 0;
64175+#endif
64176+
64177 /* Some debugging symbols have no name. Ignore them. */
64178 if (!iter->name[0])
64179 return 0;
64180@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
64181 struct kallsym_iter *iter;
64182 int ret;
64183
64184- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
64185+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
64186 if (!iter)
64187 return -ENOMEM;
64188 reset_iter(iter, 0);
64189diff --git a/kernel/kexec.c b/kernel/kexec.c
64190index dc7bc08..4601964 100644
64191--- a/kernel/kexec.c
64192+++ b/kernel/kexec.c
64193@@ -1048,7 +1048,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
64194 unsigned long flags)
64195 {
64196 struct compat_kexec_segment in;
64197- struct kexec_segment out, __user *ksegments;
64198+ struct kexec_segment out;
64199+ struct kexec_segment __user *ksegments;
64200 unsigned long i, result;
64201
64202 /* Don't allow clients that don't understand the native
64203diff --git a/kernel/kmod.c b/kernel/kmod.c
64204index a4bea97..7a1ae9a 100644
64205--- a/kernel/kmod.c
64206+++ b/kernel/kmod.c
64207@@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
64208 * If module auto-loading support is disabled then this function
64209 * becomes a no-operation.
64210 */
64211-int __request_module(bool wait, const char *fmt, ...)
64212+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
64213 {
64214- va_list args;
64215 char module_name[MODULE_NAME_LEN];
64216 unsigned int max_modprobes;
64217 int ret;
64218- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
64219+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
64220 static char *envp[] = { "HOME=/",
64221 "TERM=linux",
64222 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
64223@@ -88,9 +87,7 @@ int __request_module(bool wait, const char *fmt, ...)
64224 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
64225 static int kmod_loop_msg;
64226
64227- va_start(args, fmt);
64228- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
64229- va_end(args);
64230+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
64231 if (ret >= MODULE_NAME_LEN)
64232 return -ENAMETOOLONG;
64233
64234@@ -98,6 +95,20 @@ int __request_module(bool wait, const char *fmt, ...)
64235 if (ret)
64236 return ret;
64237
64238+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64239+ if (!current_uid()) {
64240+ /* hack to workaround consolekit/udisks stupidity */
64241+ read_lock(&tasklist_lock);
64242+ if (!strcmp(current->comm, "mount") &&
64243+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
64244+ read_unlock(&tasklist_lock);
64245+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
64246+ return -EPERM;
64247+ }
64248+ read_unlock(&tasklist_lock);
64249+ }
64250+#endif
64251+
64252 /* If modprobe needs a service that is in a module, we get a recursive
64253 * loop. Limit the number of running kmod threads to max_threads/2 or
64254 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
64255@@ -133,6 +144,47 @@ int __request_module(bool wait, const char *fmt, ...)
64256 atomic_dec(&kmod_concurrent);
64257 return ret;
64258 }
64259+
64260+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
64261+{
64262+ va_list args;
64263+ int ret;
64264+
64265+ va_start(args, fmt);
64266+ ret = ____request_module(wait, module_param, fmt, args);
64267+ va_end(args);
64268+
64269+ return ret;
64270+}
64271+
64272+int __request_module(bool wait, const char *fmt, ...)
64273+{
64274+ va_list args;
64275+ int ret;
64276+
64277+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64278+ if (current_uid()) {
64279+ char module_param[MODULE_NAME_LEN];
64280+
64281+ memset(module_param, 0, sizeof(module_param));
64282+
64283+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
64284+
64285+ va_start(args, fmt);
64286+ ret = ____request_module(wait, module_param, fmt, args);
64287+ va_end(args);
64288+
64289+ return ret;
64290+ }
64291+#endif
64292+
64293+ va_start(args, fmt);
64294+ ret = ____request_module(wait, NULL, fmt, args);
64295+ va_end(args);
64296+
64297+ return ret;
64298+}
64299+
64300 EXPORT_SYMBOL(__request_module);
64301 #endif /* CONFIG_MODULES */
64302
64303@@ -222,7 +274,7 @@ static int wait_for_helper(void *data)
64304 *
64305 * Thus the __user pointer cast is valid here.
64306 */
64307- sys_wait4(pid, (int __user *)&ret, 0, NULL);
64308+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
64309
64310 /*
64311 * If ret is 0, either ____call_usermodehelper failed and the
64312diff --git a/kernel/kprobes.c b/kernel/kprobes.c
64313index faa39d1..d7ad37e 100644
64314--- a/kernel/kprobes.c
64315+++ b/kernel/kprobes.c
64316@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
64317 * kernel image and loaded module images reside. This is required
64318 * so x86_64 can correctly handle the %rip-relative fixups.
64319 */
64320- kip->insns = module_alloc(PAGE_SIZE);
64321+ kip->insns = module_alloc_exec(PAGE_SIZE);
64322 if (!kip->insns) {
64323 kfree(kip);
64324 return NULL;
64325@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
64326 */
64327 if (!list_is_singular(&kip->list)) {
64328 list_del(&kip->list);
64329- module_free(NULL, kip->insns);
64330+ module_free_exec(NULL, kip->insns);
64331 kfree(kip);
64332 }
64333 return 1;
64334@@ -1953,7 +1953,7 @@ static int __init init_kprobes(void)
64335 {
64336 int i, err = 0;
64337 unsigned long offset = 0, size = 0;
64338- char *modname, namebuf[128];
64339+ char *modname, namebuf[KSYM_NAME_LEN];
64340 const char *symbol_name;
64341 void *addr;
64342 struct kprobe_blackpoint *kb;
64343@@ -2079,7 +2079,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
64344 const char *sym = NULL;
64345 unsigned int i = *(loff_t *) v;
64346 unsigned long offset = 0;
64347- char *modname, namebuf[128];
64348+ char *modname, namebuf[KSYM_NAME_LEN];
64349
64350 head = &kprobe_table[i];
64351 preempt_disable();
64352diff --git a/kernel/lockdep.c b/kernel/lockdep.c
64353index b2e08c9..01d8049 100644
64354--- a/kernel/lockdep.c
64355+++ b/kernel/lockdep.c
64356@@ -592,6 +592,10 @@ static int static_obj(void *obj)
64357 end = (unsigned long) &_end,
64358 addr = (unsigned long) obj;
64359
64360+#ifdef CONFIG_PAX_KERNEXEC
64361+ start = ktla_ktva(start);
64362+#endif
64363+
64364 /*
64365 * static variable?
64366 */
64367@@ -731,6 +735,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
64368 if (!static_obj(lock->key)) {
64369 debug_locks_off();
64370 printk("INFO: trying to register non-static key.\n");
64371+ printk("lock:%pS key:%pS.\n", lock, lock->key);
64372 printk("the code is fine but needs lockdep annotation.\n");
64373 printk("turning off the locking correctness validator.\n");
64374 dump_stack();
64375@@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
64376 if (!class)
64377 return 0;
64378 }
64379- atomic_inc((atomic_t *)&class->ops);
64380+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
64381 if (very_verbose(class)) {
64382 printk("\nacquire class [%p] %s", class->key, class->name);
64383 if (class->name_version > 1)
64384diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
64385index 91c32a0..b2c71c5 100644
64386--- a/kernel/lockdep_proc.c
64387+++ b/kernel/lockdep_proc.c
64388@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
64389
64390 static void print_name(struct seq_file *m, struct lock_class *class)
64391 {
64392- char str[128];
64393+ char str[KSYM_NAME_LEN];
64394 const char *name = class->name;
64395
64396 if (!name) {
64397diff --git a/kernel/module.c b/kernel/module.c
64398index 178333c..04e3408 100644
64399--- a/kernel/module.c
64400+++ b/kernel/module.c
64401@@ -58,6 +58,7 @@
64402 #include <linux/jump_label.h>
64403 #include <linux/pfn.h>
64404 #include <linux/bsearch.h>
64405+#include <linux/grsecurity.h>
64406
64407 #define CREATE_TRACE_POINTS
64408 #include <trace/events/module.h>
64409@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
64410
64411 /* Bounds of module allocation, for speeding __module_address.
64412 * Protected by module_mutex. */
64413-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
64414+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
64415+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
64416
64417 int register_module_notifier(struct notifier_block * nb)
64418 {
64419@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
64420 return true;
64421
64422 list_for_each_entry_rcu(mod, &modules, list) {
64423- struct symsearch arr[] = {
64424+ struct symsearch modarr[] = {
64425 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
64426 NOT_GPL_ONLY, false },
64427 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
64428@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
64429 #endif
64430 };
64431
64432- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
64433+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
64434 return true;
64435 }
64436 return false;
64437@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
64438 static int percpu_modalloc(struct module *mod,
64439 unsigned long size, unsigned long align)
64440 {
64441- if (align > PAGE_SIZE) {
64442+ if (align-1 >= PAGE_SIZE) {
64443 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
64444 mod->name, align, PAGE_SIZE);
64445 align = PAGE_SIZE;
64446@@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
64447 */
64448 #ifdef CONFIG_SYSFS
64449
64450-#ifdef CONFIG_KALLSYMS
64451+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
64452 static inline bool sect_empty(const Elf_Shdr *sect)
64453 {
64454 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
64455@@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base,
64456
64457 static void unset_module_core_ro_nx(struct module *mod)
64458 {
64459- set_page_attributes(mod->module_core + mod->core_text_size,
64460- mod->module_core + mod->core_size,
64461+ set_page_attributes(mod->module_core_rw,
64462+ mod->module_core_rw + mod->core_size_rw,
64463 set_memory_x);
64464- set_page_attributes(mod->module_core,
64465- mod->module_core + mod->core_ro_size,
64466+ set_page_attributes(mod->module_core_rx,
64467+ mod->module_core_rx + mod->core_size_rx,
64468 set_memory_rw);
64469 }
64470
64471 static void unset_module_init_ro_nx(struct module *mod)
64472 {
64473- set_page_attributes(mod->module_init + mod->init_text_size,
64474- mod->module_init + mod->init_size,
64475+ set_page_attributes(mod->module_init_rw,
64476+ mod->module_init_rw + mod->init_size_rw,
64477 set_memory_x);
64478- set_page_attributes(mod->module_init,
64479- mod->module_init + mod->init_ro_size,
64480+ set_page_attributes(mod->module_init_rx,
64481+ mod->module_init_rx + mod->init_size_rx,
64482 set_memory_rw);
64483 }
64484
64485@@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
64486
64487 mutex_lock(&module_mutex);
64488 list_for_each_entry_rcu(mod, &modules, list) {
64489- if ((mod->module_core) && (mod->core_text_size)) {
64490- set_page_attributes(mod->module_core,
64491- mod->module_core + mod->core_text_size,
64492+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
64493+ set_page_attributes(mod->module_core_rx,
64494+ mod->module_core_rx + mod->core_size_rx,
64495 set_memory_rw);
64496 }
64497- if ((mod->module_init) && (mod->init_text_size)) {
64498- set_page_attributes(mod->module_init,
64499- mod->module_init + mod->init_text_size,
64500+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
64501+ set_page_attributes(mod->module_init_rx,
64502+ mod->module_init_rx + mod->init_size_rx,
64503 set_memory_rw);
64504 }
64505 }
64506@@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
64507
64508 mutex_lock(&module_mutex);
64509 list_for_each_entry_rcu(mod, &modules, list) {
64510- if ((mod->module_core) && (mod->core_text_size)) {
64511- set_page_attributes(mod->module_core,
64512- mod->module_core + mod->core_text_size,
64513+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
64514+ set_page_attributes(mod->module_core_rx,
64515+ mod->module_core_rx + mod->core_size_rx,
64516 set_memory_ro);
64517 }
64518- if ((mod->module_init) && (mod->init_text_size)) {
64519- set_page_attributes(mod->module_init,
64520- mod->module_init + mod->init_text_size,
64521+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
64522+ set_page_attributes(mod->module_init_rx,
64523+ mod->module_init_rx + mod->init_size_rx,
64524 set_memory_ro);
64525 }
64526 }
64527@@ -1748,16 +1750,19 @@ static void free_module(struct module *mod)
64528
64529 /* This may be NULL, but that's OK */
64530 unset_module_init_ro_nx(mod);
64531- module_free(mod, mod->module_init);
64532+ module_free(mod, mod->module_init_rw);
64533+ module_free_exec(mod, mod->module_init_rx);
64534 kfree(mod->args);
64535 percpu_modfree(mod);
64536
64537 /* Free lock-classes: */
64538- lockdep_free_key_range(mod->module_core, mod->core_size);
64539+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
64540+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
64541
64542 /* Finally, free the core (containing the module structure) */
64543 unset_module_core_ro_nx(mod);
64544- module_free(mod, mod->module_core);
64545+ module_free_exec(mod, mod->module_core_rx);
64546+ module_free(mod, mod->module_core_rw);
64547
64548 #ifdef CONFIG_MPU
64549 update_protections(current->mm);
64550@@ -1826,10 +1831,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64551 unsigned int i;
64552 int ret = 0;
64553 const struct kernel_symbol *ksym;
64554+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64555+ int is_fs_load = 0;
64556+ int register_filesystem_found = 0;
64557+ char *p;
64558+
64559+ p = strstr(mod->args, "grsec_modharden_fs");
64560+ if (p) {
64561+ char *endptr = p + strlen("grsec_modharden_fs");
64562+ /* copy \0 as well */
64563+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
64564+ is_fs_load = 1;
64565+ }
64566+#endif
64567
64568 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
64569 const char *name = info->strtab + sym[i].st_name;
64570
64571+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64572+ /* it's a real shame this will never get ripped and copied
64573+ upstream! ;(
64574+ */
64575+ if (is_fs_load && !strcmp(name, "register_filesystem"))
64576+ register_filesystem_found = 1;
64577+#endif
64578+
64579 switch (sym[i].st_shndx) {
64580 case SHN_COMMON:
64581 /* We compiled with -fno-common. These are not
64582@@ -1850,7 +1876,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64583 ksym = resolve_symbol_wait(mod, info, name);
64584 /* Ok if resolved. */
64585 if (ksym && !IS_ERR(ksym)) {
64586+ pax_open_kernel();
64587 sym[i].st_value = ksym->value;
64588+ pax_close_kernel();
64589 break;
64590 }
64591
64592@@ -1869,11 +1897,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64593 secbase = (unsigned long)mod_percpu(mod);
64594 else
64595 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
64596+ pax_open_kernel();
64597 sym[i].st_value += secbase;
64598+ pax_close_kernel();
64599 break;
64600 }
64601 }
64602
64603+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64604+ if (is_fs_load && !register_filesystem_found) {
64605+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
64606+ ret = -EPERM;
64607+ }
64608+#endif
64609+
64610 return ret;
64611 }
64612
64613@@ -1977,22 +2014,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
64614 || s->sh_entsize != ~0UL
64615 || strstarts(sname, ".init"))
64616 continue;
64617- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
64618+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64619+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
64620+ else
64621+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
64622 DEBUGP("\t%s\n", name);
64623 }
64624- switch (m) {
64625- case 0: /* executable */
64626- mod->core_size = debug_align(mod->core_size);
64627- mod->core_text_size = mod->core_size;
64628- break;
64629- case 1: /* RO: text and ro-data */
64630- mod->core_size = debug_align(mod->core_size);
64631- mod->core_ro_size = mod->core_size;
64632- break;
64633- case 3: /* whole core */
64634- mod->core_size = debug_align(mod->core_size);
64635- break;
64636- }
64637 }
64638
64639 DEBUGP("Init section allocation order:\n");
64640@@ -2006,23 +2033,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
64641 || s->sh_entsize != ~0UL
64642 || !strstarts(sname, ".init"))
64643 continue;
64644- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
64645- | INIT_OFFSET_MASK);
64646+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64647+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
64648+ else
64649+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
64650+ s->sh_entsize |= INIT_OFFSET_MASK;
64651 DEBUGP("\t%s\n", sname);
64652 }
64653- switch (m) {
64654- case 0: /* executable */
64655- mod->init_size = debug_align(mod->init_size);
64656- mod->init_text_size = mod->init_size;
64657- break;
64658- case 1: /* RO: text and ro-data */
64659- mod->init_size = debug_align(mod->init_size);
64660- mod->init_ro_size = mod->init_size;
64661- break;
64662- case 3: /* whole init */
64663- mod->init_size = debug_align(mod->init_size);
64664- break;
64665- }
64666 }
64667 }
64668
64669@@ -2187,7 +2204,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
64670
64671 /* Put symbol section at end of init part of module. */
64672 symsect->sh_flags |= SHF_ALLOC;
64673- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
64674+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
64675 info->index.sym) | INIT_OFFSET_MASK;
64676 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
64677
64678@@ -2204,19 +2221,19 @@ static void layout_symtab(struct module *mod, struct load_info *info)
64679 }
64680
64681 /* Append room for core symbols at end of core part. */
64682- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
64683- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
64684+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
64685+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
64686
64687 /* Put string table section at end of init part of module. */
64688 strsect->sh_flags |= SHF_ALLOC;
64689- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
64690+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
64691 info->index.str) | INIT_OFFSET_MASK;
64692 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
64693
64694 /* Append room for core symbols' strings at end of core part. */
64695- info->stroffs = mod->core_size;
64696+ info->stroffs = mod->core_size_rx;
64697 __set_bit(0, info->strmap);
64698- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
64699+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
64700 }
64701
64702 static void add_kallsyms(struct module *mod, const struct load_info *info)
64703@@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
64704 /* Make sure we get permanent strtab: don't use info->strtab. */
64705 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
64706
64707+ pax_open_kernel();
64708+
64709 /* Set types up while we still have access to sections. */
64710 for (i = 0; i < mod->num_symtab; i++)
64711 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
64712
64713- mod->core_symtab = dst = mod->module_core + info->symoffs;
64714+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
64715 src = mod->symtab;
64716 *dst = *src;
64717 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
64718@@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
64719 }
64720 mod->core_num_syms = ndst;
64721
64722- mod->core_strtab = s = mod->module_core + info->stroffs;
64723+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
64724 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
64725 if (test_bit(i, info->strmap))
64726 *++s = mod->strtab[i];
64727+
64728+ pax_close_kernel();
64729 }
64730 #else
64731 static inline void layout_symtab(struct module *mod, struct load_info *info)
64732@@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long size)
64733 return size == 0 ? NULL : vmalloc_exec(size);
64734 }
64735
64736-static void *module_alloc_update_bounds(unsigned long size)
64737+static void *module_alloc_update_bounds_rw(unsigned long size)
64738 {
64739 void *ret = module_alloc(size);
64740
64741 if (ret) {
64742 mutex_lock(&module_mutex);
64743 /* Update module bounds. */
64744- if ((unsigned long)ret < module_addr_min)
64745- module_addr_min = (unsigned long)ret;
64746- if ((unsigned long)ret + size > module_addr_max)
64747- module_addr_max = (unsigned long)ret + size;
64748+ if ((unsigned long)ret < module_addr_min_rw)
64749+ module_addr_min_rw = (unsigned long)ret;
64750+ if ((unsigned long)ret + size > module_addr_max_rw)
64751+ module_addr_max_rw = (unsigned long)ret + size;
64752+ mutex_unlock(&module_mutex);
64753+ }
64754+ return ret;
64755+}
64756+
64757+static void *module_alloc_update_bounds_rx(unsigned long size)
64758+{
64759+ void *ret = module_alloc_exec(size);
64760+
64761+ if (ret) {
64762+ mutex_lock(&module_mutex);
64763+ /* Update module bounds. */
64764+ if ((unsigned long)ret < module_addr_min_rx)
64765+ module_addr_min_rx = (unsigned long)ret;
64766+ if ((unsigned long)ret + size > module_addr_max_rx)
64767+ module_addr_max_rx = (unsigned long)ret + size;
64768 mutex_unlock(&module_mutex);
64769 }
64770 return ret;
64771@@ -2474,8 +2511,14 @@ static struct module *setup_load_info(struct load_info *info)
64772 static int check_modinfo(struct module *mod, struct load_info *info)
64773 {
64774 const char *modmagic = get_modinfo(info, "vermagic");
64775+ const char *license = get_modinfo(info, "license");
64776 int err;
64777
64778+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
64779+ if (!license || !license_is_gpl_compatible(license))
64780+ return -ENOEXEC;
64781+#endif
64782+
64783 /* This is allowed: modprobe --force will invalidate it. */
64784 if (!modmagic) {
64785 err = try_to_force_load(mod, "bad vermagic");
64786@@ -2498,7 +2541,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
64787 }
64788
64789 /* Set up license info based on the info section */
64790- set_license(mod, get_modinfo(info, "license"));
64791+ set_license(mod, license);
64792
64793 return 0;
64794 }
64795@@ -2592,7 +2635,7 @@ static int move_module(struct module *mod, struct load_info *info)
64796 void *ptr;
64797
64798 /* Do the allocs. */
64799- ptr = module_alloc_update_bounds(mod->core_size);
64800+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
64801 /*
64802 * The pointer to this block is stored in the module structure
64803 * which is inside the block. Just mark it as not being a
64804@@ -2602,23 +2645,50 @@ static int move_module(struct module *mod, struct load_info *info)
64805 if (!ptr)
64806 return -ENOMEM;
64807
64808- memset(ptr, 0, mod->core_size);
64809- mod->module_core = ptr;
64810+ memset(ptr, 0, mod->core_size_rw);
64811+ mod->module_core_rw = ptr;
64812
64813- ptr = module_alloc_update_bounds(mod->init_size);
64814+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
64815 /*
64816 * The pointer to this block is stored in the module structure
64817 * which is inside the block. This block doesn't need to be
64818 * scanned as it contains data and code that will be freed
64819 * after the module is initialized.
64820 */
64821- kmemleak_ignore(ptr);
64822- if (!ptr && mod->init_size) {
64823- module_free(mod, mod->module_core);
64824+ kmemleak_not_leak(ptr);
64825+ if (!ptr && mod->init_size_rw) {
64826+ module_free(mod, mod->module_core_rw);
64827 return -ENOMEM;
64828 }
64829- memset(ptr, 0, mod->init_size);
64830- mod->module_init = ptr;
64831+ memset(ptr, 0, mod->init_size_rw);
64832+ mod->module_init_rw = ptr;
64833+
64834+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
64835+ kmemleak_not_leak(ptr);
64836+ if (!ptr) {
64837+ module_free(mod, mod->module_init_rw);
64838+ module_free(mod, mod->module_core_rw);
64839+ return -ENOMEM;
64840+ }
64841+
64842+ pax_open_kernel();
64843+ memset(ptr, 0, mod->core_size_rx);
64844+ pax_close_kernel();
64845+ mod->module_core_rx = ptr;
64846+
64847+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
64848+ kmemleak_not_leak(ptr);
64849+ if (!ptr && mod->init_size_rx) {
64850+ module_free_exec(mod, mod->module_core_rx);
64851+ module_free(mod, mod->module_init_rw);
64852+ module_free(mod, mod->module_core_rw);
64853+ return -ENOMEM;
64854+ }
64855+
64856+ pax_open_kernel();
64857+ memset(ptr, 0, mod->init_size_rx);
64858+ pax_close_kernel();
64859+ mod->module_init_rx = ptr;
64860
64861 /* Transfer each section which specifies SHF_ALLOC */
64862 DEBUGP("final section addresses:\n");
64863@@ -2629,16 +2699,45 @@ static int move_module(struct module *mod, struct load_info *info)
64864 if (!(shdr->sh_flags & SHF_ALLOC))
64865 continue;
64866
64867- if (shdr->sh_entsize & INIT_OFFSET_MASK)
64868- dest = mod->module_init
64869- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64870- else
64871- dest = mod->module_core + shdr->sh_entsize;
64872+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
64873+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64874+ dest = mod->module_init_rw
64875+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64876+ else
64877+ dest = mod->module_init_rx
64878+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64879+ } else {
64880+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64881+ dest = mod->module_core_rw + shdr->sh_entsize;
64882+ else
64883+ dest = mod->module_core_rx + shdr->sh_entsize;
64884+ }
64885+
64886+ if (shdr->sh_type != SHT_NOBITS) {
64887+
64888+#ifdef CONFIG_PAX_KERNEXEC
64889+#ifdef CONFIG_X86_64
64890+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
64891+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
64892+#endif
64893+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
64894+ pax_open_kernel();
64895+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64896+ pax_close_kernel();
64897+ } else
64898+#endif
64899
64900- if (shdr->sh_type != SHT_NOBITS)
64901 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64902+ }
64903 /* Update sh_addr to point to copy in image. */
64904- shdr->sh_addr = (unsigned long)dest;
64905+
64906+#ifdef CONFIG_PAX_KERNEXEC
64907+ if (shdr->sh_flags & SHF_EXECINSTR)
64908+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
64909+ else
64910+#endif
64911+
64912+ shdr->sh_addr = (unsigned long)dest;
64913 DEBUGP("\t0x%lx %s\n",
64914 shdr->sh_addr, info->secstrings + shdr->sh_name);
64915 }
64916@@ -2689,12 +2788,12 @@ static void flush_module_icache(const struct module *mod)
64917 * Do it before processing of module parameters, so the module
64918 * can provide parameter accessor functions of its own.
64919 */
64920- if (mod->module_init)
64921- flush_icache_range((unsigned long)mod->module_init,
64922- (unsigned long)mod->module_init
64923- + mod->init_size);
64924- flush_icache_range((unsigned long)mod->module_core,
64925- (unsigned long)mod->module_core + mod->core_size);
64926+ if (mod->module_init_rx)
64927+ flush_icache_range((unsigned long)mod->module_init_rx,
64928+ (unsigned long)mod->module_init_rx
64929+ + mod->init_size_rx);
64930+ flush_icache_range((unsigned long)mod->module_core_rx,
64931+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
64932
64933 set_fs(old_fs);
64934 }
64935@@ -2774,8 +2873,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
64936 {
64937 kfree(info->strmap);
64938 percpu_modfree(mod);
64939- module_free(mod, mod->module_init);
64940- module_free(mod, mod->module_core);
64941+ module_free_exec(mod, mod->module_init_rx);
64942+ module_free_exec(mod, mod->module_core_rx);
64943+ module_free(mod, mod->module_init_rw);
64944+ module_free(mod, mod->module_core_rw);
64945 }
64946
64947 int __weak module_finalize(const Elf_Ehdr *hdr,
64948@@ -2839,9 +2940,38 @@ static struct module *load_module(void __user *umod,
64949 if (err)
64950 goto free_unload;
64951
64952+ /* Now copy in args */
64953+ mod->args = strndup_user(uargs, ~0UL >> 1);
64954+ if (IS_ERR(mod->args)) {
64955+ err = PTR_ERR(mod->args);
64956+ goto free_unload;
64957+ }
64958+
64959 /* Set up MODINFO_ATTR fields */
64960 setup_modinfo(mod, &info);
64961
64962+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64963+ {
64964+ char *p, *p2;
64965+
64966+ if (strstr(mod->args, "grsec_modharden_netdev")) {
64967+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
64968+ err = -EPERM;
64969+ goto free_modinfo;
64970+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
64971+ p += strlen("grsec_modharden_normal");
64972+ p2 = strstr(p, "_");
64973+ if (p2) {
64974+ *p2 = '\0';
64975+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
64976+ *p2 = '_';
64977+ }
64978+ err = -EPERM;
64979+ goto free_modinfo;
64980+ }
64981+ }
64982+#endif
64983+
64984 /* Fix up syms, so that st_value is a pointer to location. */
64985 err = simplify_symbols(mod, &info);
64986 if (err < 0)
64987@@ -2857,13 +2987,6 @@ static struct module *load_module(void __user *umod,
64988
64989 flush_module_icache(mod);
64990
64991- /* Now copy in args */
64992- mod->args = strndup_user(uargs, ~0UL >> 1);
64993- if (IS_ERR(mod->args)) {
64994- err = PTR_ERR(mod->args);
64995- goto free_arch_cleanup;
64996- }
64997-
64998 /* Mark state as coming so strong_try_module_get() ignores us. */
64999 mod->state = MODULE_STATE_COMING;
65000
65001@@ -2921,11 +3044,10 @@ static struct module *load_module(void __user *umod,
65002 unlock:
65003 mutex_unlock(&module_mutex);
65004 synchronize_sched();
65005- kfree(mod->args);
65006- free_arch_cleanup:
65007 module_arch_cleanup(mod);
65008 free_modinfo:
65009 free_modinfo(mod);
65010+ kfree(mod->args);
65011 free_unload:
65012 module_unload_free(mod);
65013 free_module:
65014@@ -2966,16 +3088,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
65015 MODULE_STATE_COMING, mod);
65016
65017 /* Set RO and NX regions for core */
65018- set_section_ro_nx(mod->module_core,
65019- mod->core_text_size,
65020- mod->core_ro_size,
65021- mod->core_size);
65022+ set_section_ro_nx(mod->module_core_rx,
65023+ mod->core_size_rx,
65024+ mod->core_size_rx,
65025+ mod->core_size_rx);
65026
65027 /* Set RO and NX regions for init */
65028- set_section_ro_nx(mod->module_init,
65029- mod->init_text_size,
65030- mod->init_ro_size,
65031- mod->init_size);
65032+ set_section_ro_nx(mod->module_init_rx,
65033+ mod->init_size_rx,
65034+ mod->init_size_rx,
65035+ mod->init_size_rx);
65036
65037 do_mod_ctors(mod);
65038 /* Start the module */
65039@@ -3021,11 +3143,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
65040 mod->strtab = mod->core_strtab;
65041 #endif
65042 unset_module_init_ro_nx(mod);
65043- module_free(mod, mod->module_init);
65044- mod->module_init = NULL;
65045- mod->init_size = 0;
65046- mod->init_ro_size = 0;
65047- mod->init_text_size = 0;
65048+ module_free(mod, mod->module_init_rw);
65049+ module_free_exec(mod, mod->module_init_rx);
65050+ mod->module_init_rw = NULL;
65051+ mod->module_init_rx = NULL;
65052+ mod->init_size_rw = 0;
65053+ mod->init_size_rx = 0;
65054 mutex_unlock(&module_mutex);
65055
65056 return 0;
65057@@ -3056,10 +3179,16 @@ static const char *get_ksymbol(struct module *mod,
65058 unsigned long nextval;
65059
65060 /* At worse, next value is at end of module */
65061- if (within_module_init(addr, mod))
65062- nextval = (unsigned long)mod->module_init+mod->init_text_size;
65063+ if (within_module_init_rx(addr, mod))
65064+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
65065+ else if (within_module_init_rw(addr, mod))
65066+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
65067+ else if (within_module_core_rx(addr, mod))
65068+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
65069+ else if (within_module_core_rw(addr, mod))
65070+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
65071 else
65072- nextval = (unsigned long)mod->module_core+mod->core_text_size;
65073+ return NULL;
65074
65075 /* Scan for closest preceding symbol, and next symbol. (ELF
65076 starts real symbols at 1). */
65077@@ -3307,7 +3436,7 @@ static int m_show(struct seq_file *m, void *p)
65078 char buf[8];
65079
65080 seq_printf(m, "%s %u",
65081- mod->name, mod->init_size + mod->core_size);
65082+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
65083 print_unload_info(m, mod);
65084
65085 /* Informative for users. */
65086@@ -3316,7 +3445,7 @@ static int m_show(struct seq_file *m, void *p)
65087 mod->state == MODULE_STATE_COMING ? "Loading":
65088 "Live");
65089 /* Used by oprofile and other similar tools. */
65090- seq_printf(m, " 0x%pK", mod->module_core);
65091+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
65092
65093 /* Taints info */
65094 if (mod->taints)
65095@@ -3352,7 +3481,17 @@ static const struct file_operations proc_modules_operations = {
65096
65097 static int __init proc_modules_init(void)
65098 {
65099+#ifndef CONFIG_GRKERNSEC_HIDESYM
65100+#ifdef CONFIG_GRKERNSEC_PROC_USER
65101+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
65102+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65103+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
65104+#else
65105 proc_create("modules", 0, NULL, &proc_modules_operations);
65106+#endif
65107+#else
65108+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
65109+#endif
65110 return 0;
65111 }
65112 module_init(proc_modules_init);
65113@@ -3411,12 +3550,12 @@ struct module *__module_address(unsigned long addr)
65114 {
65115 struct module *mod;
65116
65117- if (addr < module_addr_min || addr > module_addr_max)
65118+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
65119+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
65120 return NULL;
65121
65122 list_for_each_entry_rcu(mod, &modules, list)
65123- if (within_module_core(addr, mod)
65124- || within_module_init(addr, mod))
65125+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
65126 return mod;
65127 return NULL;
65128 }
65129@@ -3450,11 +3589,20 @@ bool is_module_text_address(unsigned long addr)
65130 */
65131 struct module *__module_text_address(unsigned long addr)
65132 {
65133- struct module *mod = __module_address(addr);
65134+ struct module *mod;
65135+
65136+#ifdef CONFIG_X86_32
65137+ addr = ktla_ktva(addr);
65138+#endif
65139+
65140+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
65141+ return NULL;
65142+
65143+ mod = __module_address(addr);
65144+
65145 if (mod) {
65146 /* Make sure it's within the text section. */
65147- if (!within(addr, mod->module_init, mod->init_text_size)
65148- && !within(addr, mod->module_core, mod->core_text_size))
65149+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
65150 mod = NULL;
65151 }
65152 return mod;
65153diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
65154index 7e3443f..b2a1e6b 100644
65155--- a/kernel/mutex-debug.c
65156+++ b/kernel/mutex-debug.c
65157@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
65158 }
65159
65160 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
65161- struct thread_info *ti)
65162+ struct task_struct *task)
65163 {
65164 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
65165
65166 /* Mark the current thread as blocked on the lock: */
65167- ti->task->blocked_on = waiter;
65168+ task->blocked_on = waiter;
65169 }
65170
65171 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
65172- struct thread_info *ti)
65173+ struct task_struct *task)
65174 {
65175 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
65176- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
65177- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
65178- ti->task->blocked_on = NULL;
65179+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
65180+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
65181+ task->blocked_on = NULL;
65182
65183 list_del_init(&waiter->list);
65184 waiter->task = NULL;
65185diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
65186index 0799fd3..d06ae3b 100644
65187--- a/kernel/mutex-debug.h
65188+++ b/kernel/mutex-debug.h
65189@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
65190 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
65191 extern void debug_mutex_add_waiter(struct mutex *lock,
65192 struct mutex_waiter *waiter,
65193- struct thread_info *ti);
65194+ struct task_struct *task);
65195 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
65196- struct thread_info *ti);
65197+ struct task_struct *task);
65198 extern void debug_mutex_unlock(struct mutex *lock);
65199 extern void debug_mutex_init(struct mutex *lock, const char *name,
65200 struct lock_class_key *key);
65201diff --git a/kernel/mutex.c b/kernel/mutex.c
65202index 89096dd..f91ebc5 100644
65203--- a/kernel/mutex.c
65204+++ b/kernel/mutex.c
65205@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
65206 spin_lock_mutex(&lock->wait_lock, flags);
65207
65208 debug_mutex_lock_common(lock, &waiter);
65209- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
65210+ debug_mutex_add_waiter(lock, &waiter, task);
65211
65212 /* add waiting tasks to the end of the waitqueue (FIFO): */
65213 list_add_tail(&waiter.list, &lock->wait_list);
65214@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
65215 * TASK_UNINTERRUPTIBLE case.)
65216 */
65217 if (unlikely(signal_pending_state(state, task))) {
65218- mutex_remove_waiter(lock, &waiter,
65219- task_thread_info(task));
65220+ mutex_remove_waiter(lock, &waiter, task);
65221 mutex_release(&lock->dep_map, 1, ip);
65222 spin_unlock_mutex(&lock->wait_lock, flags);
65223
65224@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
65225 done:
65226 lock_acquired(&lock->dep_map, ip);
65227 /* got the lock - rejoice! */
65228- mutex_remove_waiter(lock, &waiter, current_thread_info());
65229+ mutex_remove_waiter(lock, &waiter, task);
65230 mutex_set_owner(lock);
65231
65232 /* set it to 0 if there are no waiters left: */
65233diff --git a/kernel/padata.c b/kernel/padata.c
65234index b452599..5d68f4e 100644
65235--- a/kernel/padata.c
65236+++ b/kernel/padata.c
65237@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
65238 padata->pd = pd;
65239 padata->cb_cpu = cb_cpu;
65240
65241- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
65242- atomic_set(&pd->seq_nr, -1);
65243+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
65244+ atomic_set_unchecked(&pd->seq_nr, -1);
65245
65246- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
65247+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
65248
65249 target_cpu = padata_cpu_hash(padata);
65250 queue = per_cpu_ptr(pd->pqueue, target_cpu);
65251@@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
65252 padata_init_pqueues(pd);
65253 padata_init_squeues(pd);
65254 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
65255- atomic_set(&pd->seq_nr, -1);
65256+ atomic_set_unchecked(&pd->seq_nr, -1);
65257 atomic_set(&pd->reorder_objects, 0);
65258 atomic_set(&pd->refcnt, 0);
65259 pd->pinst = pinst;
65260diff --git a/kernel/panic.c b/kernel/panic.c
65261index 3458469..342c500 100644
65262--- a/kernel/panic.c
65263+++ b/kernel/panic.c
65264@@ -78,7 +78,11 @@ NORET_TYPE void panic(const char * fmt, ...)
65265 va_end(args);
65266 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
65267 #ifdef CONFIG_DEBUG_BUGVERBOSE
65268- dump_stack();
65269+ /*
65270+ * Avoid nested stack-dumping if a panic occurs during oops processing
65271+ */
65272+ if (!oops_in_progress)
65273+ dump_stack();
65274 #endif
65275
65276 /*
65277@@ -382,7 +386,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
65278 const char *board;
65279
65280 printk(KERN_WARNING "------------[ cut here ]------------\n");
65281- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
65282+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
65283 board = dmi_get_system_info(DMI_PRODUCT_NAME);
65284 if (board)
65285 printk(KERN_WARNING "Hardware name: %s\n", board);
65286@@ -437,7 +441,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
65287 */
65288 void __stack_chk_fail(void)
65289 {
65290- panic("stack-protector: Kernel stack is corrupted in: %p\n",
65291+ dump_stack();
65292+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
65293 __builtin_return_address(0));
65294 }
65295 EXPORT_SYMBOL(__stack_chk_fail);
65296diff --git a/kernel/pid.c b/kernel/pid.c
65297index fa5f722..0c93e57 100644
65298--- a/kernel/pid.c
65299+++ b/kernel/pid.c
65300@@ -33,6 +33,7 @@
65301 #include <linux/rculist.h>
65302 #include <linux/bootmem.h>
65303 #include <linux/hash.h>
65304+#include <linux/security.h>
65305 #include <linux/pid_namespace.h>
65306 #include <linux/init_task.h>
65307 #include <linux/syscalls.h>
65308@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
65309
65310 int pid_max = PID_MAX_DEFAULT;
65311
65312-#define RESERVED_PIDS 300
65313+#define RESERVED_PIDS 500
65314
65315 int pid_max_min = RESERVED_PIDS + 1;
65316 int pid_max_max = PID_MAX_LIMIT;
65317@@ -418,10 +419,18 @@ EXPORT_SYMBOL(pid_task);
65318 */
65319 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
65320 {
65321+ struct task_struct *task;
65322+
65323 rcu_lockdep_assert(rcu_read_lock_held(),
65324 "find_task_by_pid_ns() needs rcu_read_lock()"
65325 " protection");
65326- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65327+
65328+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65329+
65330+ if (gr_pid_is_chrooted(task))
65331+ return NULL;
65332+
65333+ return task;
65334 }
65335
65336 struct task_struct *find_task_by_vpid(pid_t vnr)
65337@@ -429,6 +438,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
65338 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
65339 }
65340
65341+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
65342+{
65343+ rcu_lockdep_assert(rcu_read_lock_held(),
65344+ "find_task_by_pid_ns() needs rcu_read_lock()"
65345+ " protection");
65346+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
65347+}
65348+
65349 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
65350 {
65351 struct pid *pid;
65352diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
65353index e7cb76d..75eceb3 100644
65354--- a/kernel/posix-cpu-timers.c
65355+++ b/kernel/posix-cpu-timers.c
65356@@ -6,6 +6,7 @@
65357 #include <linux/posix-timers.h>
65358 #include <linux/errno.h>
65359 #include <linux/math64.h>
65360+#include <linux/security.h>
65361 #include <asm/uaccess.h>
65362 #include <linux/kernel_stat.h>
65363 #include <trace/events/timer.h>
65364@@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
65365
65366 static __init int init_posix_cpu_timers(void)
65367 {
65368- struct k_clock process = {
65369+ static struct k_clock process = {
65370 .clock_getres = process_cpu_clock_getres,
65371 .clock_get = process_cpu_clock_get,
65372 .timer_create = process_cpu_timer_create,
65373 .nsleep = process_cpu_nsleep,
65374 .nsleep_restart = process_cpu_nsleep_restart,
65375 };
65376- struct k_clock thread = {
65377+ static struct k_clock thread = {
65378 .clock_getres = thread_cpu_clock_getres,
65379 .clock_get = thread_cpu_clock_get,
65380 .timer_create = thread_cpu_timer_create,
65381diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
65382index 69185ae..cc2847a 100644
65383--- a/kernel/posix-timers.c
65384+++ b/kernel/posix-timers.c
65385@@ -43,6 +43,7 @@
65386 #include <linux/idr.h>
65387 #include <linux/posix-clock.h>
65388 #include <linux/posix-timers.h>
65389+#include <linux/grsecurity.h>
65390 #include <linux/syscalls.h>
65391 #include <linux/wait.h>
65392 #include <linux/workqueue.h>
65393@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
65394 * which we beg off on and pass to do_sys_settimeofday().
65395 */
65396
65397-static struct k_clock posix_clocks[MAX_CLOCKS];
65398+static struct k_clock *posix_clocks[MAX_CLOCKS];
65399
65400 /*
65401 * These ones are defined below.
65402@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
65403 */
65404 static __init int init_posix_timers(void)
65405 {
65406- struct k_clock clock_realtime = {
65407+ static struct k_clock clock_realtime = {
65408 .clock_getres = hrtimer_get_res,
65409 .clock_get = posix_clock_realtime_get,
65410 .clock_set = posix_clock_realtime_set,
65411@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
65412 .timer_get = common_timer_get,
65413 .timer_del = common_timer_del,
65414 };
65415- struct k_clock clock_monotonic = {
65416+ static struct k_clock clock_monotonic = {
65417 .clock_getres = hrtimer_get_res,
65418 .clock_get = posix_ktime_get_ts,
65419 .nsleep = common_nsleep,
65420@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
65421 .timer_get = common_timer_get,
65422 .timer_del = common_timer_del,
65423 };
65424- struct k_clock clock_monotonic_raw = {
65425+ static struct k_clock clock_monotonic_raw = {
65426 .clock_getres = hrtimer_get_res,
65427 .clock_get = posix_get_monotonic_raw,
65428 };
65429- struct k_clock clock_realtime_coarse = {
65430+ static struct k_clock clock_realtime_coarse = {
65431 .clock_getres = posix_get_coarse_res,
65432 .clock_get = posix_get_realtime_coarse,
65433 };
65434- struct k_clock clock_monotonic_coarse = {
65435+ static struct k_clock clock_monotonic_coarse = {
65436 .clock_getres = posix_get_coarse_res,
65437 .clock_get = posix_get_monotonic_coarse,
65438 };
65439- struct k_clock clock_boottime = {
65440+ static struct k_clock clock_boottime = {
65441 .clock_getres = hrtimer_get_res,
65442 .clock_get = posix_get_boottime,
65443 .nsleep = common_nsleep,
65444@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
65445 return;
65446 }
65447
65448- posix_clocks[clock_id] = *new_clock;
65449+ posix_clocks[clock_id] = new_clock;
65450 }
65451 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
65452
65453@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
65454 return (id & CLOCKFD_MASK) == CLOCKFD ?
65455 &clock_posix_dynamic : &clock_posix_cpu;
65456
65457- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
65458+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
65459 return NULL;
65460- return &posix_clocks[id];
65461+ return posix_clocks[id];
65462 }
65463
65464 static int common_timer_create(struct k_itimer *new_timer)
65465@@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
65466 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
65467 return -EFAULT;
65468
65469+ /* only the CLOCK_REALTIME clock can be set, all other clocks
65470+ have their clock_set fptr set to a nosettime dummy function
65471+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
65472+ call common_clock_set, which calls do_sys_settimeofday, which
65473+ we hook
65474+ */
65475+
65476 return kc->clock_set(which_clock, &new_tp);
65477 }
65478
65479diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
65480index d523593..68197a4 100644
65481--- a/kernel/power/poweroff.c
65482+++ b/kernel/power/poweroff.c
65483@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
65484 .enable_mask = SYSRQ_ENABLE_BOOT,
65485 };
65486
65487-static int pm_sysrq_init(void)
65488+static int __init pm_sysrq_init(void)
65489 {
65490 register_sysrq_key('o', &sysrq_poweroff_op);
65491 return 0;
65492diff --git a/kernel/power/process.c b/kernel/power/process.c
65493index 3d4b954..11af930 100644
65494--- a/kernel/power/process.c
65495+++ b/kernel/power/process.c
65496@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_only)
65497 u64 elapsed_csecs64;
65498 unsigned int elapsed_csecs;
65499 bool wakeup = false;
65500+ bool timedout = false;
65501
65502 do_gettimeofday(&start);
65503
65504@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_only)
65505
65506 while (true) {
65507 todo = 0;
65508+ if (time_after(jiffies, end_time))
65509+ timedout = true;
65510 read_lock(&tasklist_lock);
65511 do_each_thread(g, p) {
65512 if (frozen(p) || !freezable(p))
65513@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_only)
65514 * try_to_stop() after schedule() in ptrace/signal
65515 * stop sees TIF_FREEZE.
65516 */
65517- if (!task_is_stopped_or_traced(p) &&
65518- !freezer_should_skip(p))
65519+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
65520 todo++;
65521+ if (timedout) {
65522+ printk(KERN_ERR "Task refusing to freeze:\n");
65523+ sched_show_task(p);
65524+ }
65525+ }
65526 } while_each_thread(g, p);
65527 read_unlock(&tasklist_lock);
65528
65529@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_only)
65530 todo += wq_busy;
65531 }
65532
65533- if (!todo || time_after(jiffies, end_time))
65534+ if (!todo || timedout)
65535 break;
65536
65537 if (pm_wakeup_pending()) {
65538diff --git a/kernel/printk.c b/kernel/printk.c
65539index 7982a0a..2095fdc 100644
65540--- a/kernel/printk.c
65541+++ b/kernel/printk.c
65542@@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
65543 if (from_file && type != SYSLOG_ACTION_OPEN)
65544 return 0;
65545
65546+#ifdef CONFIG_GRKERNSEC_DMESG
65547+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
65548+ return -EPERM;
65549+#endif
65550+
65551 if (syslog_action_restricted(type)) {
65552 if (capable(CAP_SYSLOG))
65553 return 0;
65554diff --git a/kernel/profile.c b/kernel/profile.c
65555index 76b8e77..a2930e8 100644
65556--- a/kernel/profile.c
65557+++ b/kernel/profile.c
65558@@ -39,7 +39,7 @@ struct profile_hit {
65559 /* Oprofile timer tick hook */
65560 static int (*timer_hook)(struct pt_regs *) __read_mostly;
65561
65562-static atomic_t *prof_buffer;
65563+static atomic_unchecked_t *prof_buffer;
65564 static unsigned long prof_len, prof_shift;
65565
65566 int prof_on __read_mostly;
65567@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
65568 hits[i].pc = 0;
65569 continue;
65570 }
65571- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65572+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65573 hits[i].hits = hits[i].pc = 0;
65574 }
65575 }
65576@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
65577 * Add the current hit(s) and flush the write-queue out
65578 * to the global buffer:
65579 */
65580- atomic_add(nr_hits, &prof_buffer[pc]);
65581+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
65582 for (i = 0; i < NR_PROFILE_HIT; ++i) {
65583- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65584+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65585 hits[i].pc = hits[i].hits = 0;
65586 }
65587 out:
65588@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
65589 {
65590 unsigned long pc;
65591 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
65592- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65593+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65594 }
65595 #endif /* !CONFIG_SMP */
65596
65597@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
65598 return -EFAULT;
65599 buf++; p++; count--; read++;
65600 }
65601- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
65602+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
65603 if (copy_to_user(buf, (void *)pnt, count))
65604 return -EFAULT;
65605 read += count;
65606@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
65607 }
65608 #endif
65609 profile_discard_flip_buffers();
65610- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
65611+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
65612 return count;
65613 }
65614
65615diff --git a/kernel/ptrace.c b/kernel/ptrace.c
65616index 78ab24a..332c915 100644
65617--- a/kernel/ptrace.c
65618+++ b/kernel/ptrace.c
65619@@ -172,7 +172,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
65620 return ret;
65621 }
65622
65623-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65624+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
65625+ unsigned int log)
65626 {
65627 const struct cred *cred = current_cred(), *tcred;
65628
65629@@ -198,7 +199,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65630 cred->gid == tcred->sgid &&
65631 cred->gid == tcred->gid))
65632 goto ok;
65633- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
65634+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
65635+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
65636 goto ok;
65637 rcu_read_unlock();
65638 return -EPERM;
65639@@ -207,7 +209,9 @@ ok:
65640 smp_rmb();
65641 if (task->mm)
65642 dumpable = get_dumpable(task->mm);
65643- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
65644+ if (!dumpable &&
65645+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
65646+ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
65647 return -EPERM;
65648
65649 return security_ptrace_access_check(task, mode);
65650@@ -217,7 +221,21 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
65651 {
65652 int err;
65653 task_lock(task);
65654- err = __ptrace_may_access(task, mode);
65655+ err = __ptrace_may_access(task, mode, 0);
65656+ task_unlock(task);
65657+ return !err;
65658+}
65659+
65660+bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode)
65661+{
65662+ return __ptrace_may_access(task, mode, 0);
65663+}
65664+
65665+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
65666+{
65667+ int err;
65668+ task_lock(task);
65669+ err = __ptrace_may_access(task, mode, 1);
65670 task_unlock(task);
65671 return !err;
65672 }
65673@@ -262,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
65674 goto out;
65675
65676 task_lock(task);
65677- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
65678+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
65679 task_unlock(task);
65680 if (retval)
65681 goto unlock_creds;
65682@@ -277,7 +295,7 @@ static int ptrace_attach(struct task_struct *task, long request,
65683 task->ptrace = PT_PTRACED;
65684 if (seize)
65685 task->ptrace |= PT_SEIZED;
65686- if (task_ns_capable(task, CAP_SYS_PTRACE))
65687+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
65688 task->ptrace |= PT_PTRACE_CAP;
65689
65690 __ptrace_link(task, current);
65691@@ -483,7 +501,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
65692 break;
65693 return -EIO;
65694 }
65695- if (copy_to_user(dst, buf, retval))
65696+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
65697 return -EFAULT;
65698 copied += retval;
65699 src += retval;
65700@@ -680,7 +698,7 @@ int ptrace_request(struct task_struct *child, long request,
65701 bool seized = child->ptrace & PT_SEIZED;
65702 int ret = -EIO;
65703 siginfo_t siginfo, *si;
65704- void __user *datavp = (void __user *) data;
65705+ void __user *datavp = (__force void __user *) data;
65706 unsigned long __user *datalp = datavp;
65707 unsigned long flags;
65708
65709@@ -882,14 +900,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
65710 goto out;
65711 }
65712
65713+ if (gr_handle_ptrace(child, request)) {
65714+ ret = -EPERM;
65715+ goto out_put_task_struct;
65716+ }
65717+
65718 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65719 ret = ptrace_attach(child, request, data);
65720 /*
65721 * Some architectures need to do book-keeping after
65722 * a ptrace attach.
65723 */
65724- if (!ret)
65725+ if (!ret) {
65726 arch_ptrace_attach(child);
65727+ gr_audit_ptrace(child);
65728+ }
65729 goto out_put_task_struct;
65730 }
65731
65732@@ -915,7 +940,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
65733 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
65734 if (copied != sizeof(tmp))
65735 return -EIO;
65736- return put_user(tmp, (unsigned long __user *)data);
65737+ return put_user(tmp, (__force unsigned long __user *)data);
65738 }
65739
65740 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
65741@@ -1025,14 +1050,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
65742 goto out;
65743 }
65744
65745+ if (gr_handle_ptrace(child, request)) {
65746+ ret = -EPERM;
65747+ goto out_put_task_struct;
65748+ }
65749+
65750 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65751 ret = ptrace_attach(child, request, data);
65752 /*
65753 * Some architectures need to do book-keeping after
65754 * a ptrace attach.
65755 */
65756- if (!ret)
65757+ if (!ret) {
65758 arch_ptrace_attach(child);
65759+ gr_audit_ptrace(child);
65760+ }
65761 goto out_put_task_struct;
65762 }
65763
65764diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
65765index 764825c..3aa6ac4 100644
65766--- a/kernel/rcutorture.c
65767+++ b/kernel/rcutorture.c
65768@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
65769 { 0 };
65770 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65771 { 0 };
65772-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65773-static atomic_t n_rcu_torture_alloc;
65774-static atomic_t n_rcu_torture_alloc_fail;
65775-static atomic_t n_rcu_torture_free;
65776-static atomic_t n_rcu_torture_mberror;
65777-static atomic_t n_rcu_torture_error;
65778+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65779+static atomic_unchecked_t n_rcu_torture_alloc;
65780+static atomic_unchecked_t n_rcu_torture_alloc_fail;
65781+static atomic_unchecked_t n_rcu_torture_free;
65782+static atomic_unchecked_t n_rcu_torture_mberror;
65783+static atomic_unchecked_t n_rcu_torture_error;
65784 static long n_rcu_torture_boost_ktrerror;
65785 static long n_rcu_torture_boost_rterror;
65786 static long n_rcu_torture_boost_failure;
65787@@ -223,11 +223,11 @@ rcu_torture_alloc(void)
65788
65789 spin_lock_bh(&rcu_torture_lock);
65790 if (list_empty(&rcu_torture_freelist)) {
65791- atomic_inc(&n_rcu_torture_alloc_fail);
65792+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65793 spin_unlock_bh(&rcu_torture_lock);
65794 return NULL;
65795 }
65796- atomic_inc(&n_rcu_torture_alloc);
65797+ atomic_inc_unchecked(&n_rcu_torture_alloc);
65798 p = rcu_torture_freelist.next;
65799 list_del_init(p);
65800 spin_unlock_bh(&rcu_torture_lock);
65801@@ -240,7 +240,7 @@ rcu_torture_alloc(void)
65802 static void
65803 rcu_torture_free(struct rcu_torture *p)
65804 {
65805- atomic_inc(&n_rcu_torture_free);
65806+ atomic_inc_unchecked(&n_rcu_torture_free);
65807 spin_lock_bh(&rcu_torture_lock);
65808 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65809 spin_unlock_bh(&rcu_torture_lock);
65810@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
65811 i = rp->rtort_pipe_count;
65812 if (i > RCU_TORTURE_PIPE_LEN)
65813 i = RCU_TORTURE_PIPE_LEN;
65814- atomic_inc(&rcu_torture_wcount[i]);
65815+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65816 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65817 rp->rtort_mbtest = 0;
65818 rcu_torture_free(rp);
65819@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
65820 i = rp->rtort_pipe_count;
65821 if (i > RCU_TORTURE_PIPE_LEN)
65822 i = RCU_TORTURE_PIPE_LEN;
65823- atomic_inc(&rcu_torture_wcount[i]);
65824+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65825 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65826 rp->rtort_mbtest = 0;
65827 list_del(&rp->rtort_free);
65828@@ -872,7 +872,7 @@ rcu_torture_writer(void *arg)
65829 i = old_rp->rtort_pipe_count;
65830 if (i > RCU_TORTURE_PIPE_LEN)
65831 i = RCU_TORTURE_PIPE_LEN;
65832- atomic_inc(&rcu_torture_wcount[i]);
65833+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65834 old_rp->rtort_pipe_count++;
65835 cur_ops->deferred_free(old_rp);
65836 }
65837@@ -940,7 +940,7 @@ static void rcu_torture_timer(unsigned long unused)
65838 return;
65839 }
65840 if (p->rtort_mbtest == 0)
65841- atomic_inc(&n_rcu_torture_mberror);
65842+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65843 spin_lock(&rand_lock);
65844 cur_ops->read_delay(&rand);
65845 n_rcu_torture_timers++;
65846@@ -1001,7 +1001,7 @@ rcu_torture_reader(void *arg)
65847 continue;
65848 }
65849 if (p->rtort_mbtest == 0)
65850- atomic_inc(&n_rcu_torture_mberror);
65851+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65852 cur_ops->read_delay(&rand);
65853 preempt_disable();
65854 pipe_count = p->rtort_pipe_count;
65855@@ -1060,16 +1060,16 @@ rcu_torture_printk(char *page)
65856 rcu_torture_current,
65857 rcu_torture_current_version,
65858 list_empty(&rcu_torture_freelist),
65859- atomic_read(&n_rcu_torture_alloc),
65860- atomic_read(&n_rcu_torture_alloc_fail),
65861- atomic_read(&n_rcu_torture_free),
65862- atomic_read(&n_rcu_torture_mberror),
65863+ atomic_read_unchecked(&n_rcu_torture_alloc),
65864+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65865+ atomic_read_unchecked(&n_rcu_torture_free),
65866+ atomic_read_unchecked(&n_rcu_torture_mberror),
65867 n_rcu_torture_boost_ktrerror,
65868 n_rcu_torture_boost_rterror,
65869 n_rcu_torture_boost_failure,
65870 n_rcu_torture_boosts,
65871 n_rcu_torture_timers);
65872- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
65873+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
65874 n_rcu_torture_boost_ktrerror != 0 ||
65875 n_rcu_torture_boost_rterror != 0 ||
65876 n_rcu_torture_boost_failure != 0)
65877@@ -1077,7 +1077,7 @@ rcu_torture_printk(char *page)
65878 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65879 if (i > 1) {
65880 cnt += sprintf(&page[cnt], "!!! ");
65881- atomic_inc(&n_rcu_torture_error);
65882+ atomic_inc_unchecked(&n_rcu_torture_error);
65883 WARN_ON_ONCE(1);
65884 }
65885 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65886@@ -1091,7 +1091,7 @@ rcu_torture_printk(char *page)
65887 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65888 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65889 cnt += sprintf(&page[cnt], " %d",
65890- atomic_read(&rcu_torture_wcount[i]));
65891+ atomic_read_unchecked(&rcu_torture_wcount[i]));
65892 }
65893 cnt += sprintf(&page[cnt], "\n");
65894 if (cur_ops->stats)
65895@@ -1401,7 +1401,7 @@ rcu_torture_cleanup(void)
65896
65897 if (cur_ops->cleanup)
65898 cur_ops->cleanup();
65899- if (atomic_read(&n_rcu_torture_error))
65900+ if (atomic_read_unchecked(&n_rcu_torture_error))
65901 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
65902 else
65903 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
65904@@ -1465,17 +1465,17 @@ rcu_torture_init(void)
65905
65906 rcu_torture_current = NULL;
65907 rcu_torture_current_version = 0;
65908- atomic_set(&n_rcu_torture_alloc, 0);
65909- atomic_set(&n_rcu_torture_alloc_fail, 0);
65910- atomic_set(&n_rcu_torture_free, 0);
65911- atomic_set(&n_rcu_torture_mberror, 0);
65912- atomic_set(&n_rcu_torture_error, 0);
65913+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65914+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65915+ atomic_set_unchecked(&n_rcu_torture_free, 0);
65916+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65917+ atomic_set_unchecked(&n_rcu_torture_error, 0);
65918 n_rcu_torture_boost_ktrerror = 0;
65919 n_rcu_torture_boost_rterror = 0;
65920 n_rcu_torture_boost_failure = 0;
65921 n_rcu_torture_boosts = 0;
65922 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65923- atomic_set(&rcu_torture_wcount[i], 0);
65924+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65925 for_each_possible_cpu(cpu) {
65926 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65927 per_cpu(rcu_torture_count, cpu)[i] = 0;
65928diff --git a/kernel/rcutree.c b/kernel/rcutree.c
65929index 6b76d81..7afc1b3 100644
65930--- a/kernel/rcutree.c
65931+++ b/kernel/rcutree.c
65932@@ -367,9 +367,9 @@ void rcu_enter_nohz(void)
65933 trace_rcu_dyntick("Start");
65934 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65935 smp_mb__before_atomic_inc(); /* See above. */
65936- atomic_inc(&rdtp->dynticks);
65937+ atomic_inc_unchecked(&rdtp->dynticks);
65938 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
65939- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65940+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65941 local_irq_restore(flags);
65942 }
65943
65944@@ -391,10 +391,10 @@ void rcu_exit_nohz(void)
65945 return;
65946 }
65947 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
65948- atomic_inc(&rdtp->dynticks);
65949+ atomic_inc_unchecked(&rdtp->dynticks);
65950 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65951 smp_mb__after_atomic_inc(); /* See above. */
65952- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65953+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65954 trace_rcu_dyntick("End");
65955 local_irq_restore(flags);
65956 }
65957@@ -411,14 +411,14 @@ void rcu_nmi_enter(void)
65958 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
65959
65960 if (rdtp->dynticks_nmi_nesting == 0 &&
65961- (atomic_read(&rdtp->dynticks) & 0x1))
65962+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
65963 return;
65964 rdtp->dynticks_nmi_nesting++;
65965 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
65966- atomic_inc(&rdtp->dynticks);
65967+ atomic_inc_unchecked(&rdtp->dynticks);
65968 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65969 smp_mb__after_atomic_inc(); /* See above. */
65970- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65971+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65972 }
65973
65974 /**
65975@@ -437,9 +437,9 @@ void rcu_nmi_exit(void)
65976 return;
65977 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65978 smp_mb__before_atomic_inc(); /* See above. */
65979- atomic_inc(&rdtp->dynticks);
65980+ atomic_inc_unchecked(&rdtp->dynticks);
65981 smp_mb__after_atomic_inc(); /* Force delay to next write. */
65982- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65983+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65984 }
65985
65986 /**
65987@@ -474,7 +474,7 @@ void rcu_irq_exit(void)
65988 */
65989 static int dyntick_save_progress_counter(struct rcu_data *rdp)
65990 {
65991- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
65992+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65993 return 0;
65994 }
65995
65996@@ -489,7 +489,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
65997 unsigned int curr;
65998 unsigned int snap;
65999
66000- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
66001+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
66002 snap = (unsigned int)rdp->dynticks_snap;
66003
66004 /*
66005@@ -1552,7 +1552,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
66006 /*
66007 * Do RCU core processing for the current CPU.
66008 */
66009-static void rcu_process_callbacks(struct softirq_action *unused)
66010+static void rcu_process_callbacks(void)
66011 {
66012 trace_rcu_utilization("Start RCU core");
66013 __rcu_process_callbacks(&rcu_sched_state,
66014diff --git a/kernel/rcutree.h b/kernel/rcutree.h
66015index 849ce9e..74bc9de 100644
66016--- a/kernel/rcutree.h
66017+++ b/kernel/rcutree.h
66018@@ -86,7 +86,7 @@
66019 struct rcu_dynticks {
66020 int dynticks_nesting; /* Track irq/process nesting level. */
66021 int dynticks_nmi_nesting; /* Track NMI nesting level. */
66022- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
66023+ atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
66024 };
66025
66026 /* RCU's kthread states for tracing. */
66027diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
66028index 4b9b9f8..2326053 100644
66029--- a/kernel/rcutree_plugin.h
66030+++ b/kernel/rcutree_plugin.h
66031@@ -842,7 +842,7 @@ void synchronize_rcu_expedited(void)
66032
66033 /* Clean up and exit. */
66034 smp_mb(); /* ensure expedited GP seen before counter increment. */
66035- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
66036+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
66037 unlock_mb_ret:
66038 mutex_unlock(&sync_rcu_preempt_exp_mutex);
66039 mb_ret:
66040@@ -1815,8 +1815,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
66041
66042 #else /* #ifndef CONFIG_SMP */
66043
66044-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
66045-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
66046+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
66047+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
66048
66049 static int synchronize_sched_expedited_cpu_stop(void *data)
66050 {
66051@@ -1871,7 +1871,7 @@ void synchronize_sched_expedited(void)
66052 int firstsnap, s, snap, trycount = 0;
66053
66054 /* Note that atomic_inc_return() implies full memory barrier. */
66055- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
66056+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
66057 get_online_cpus();
66058
66059 /*
66060@@ -1892,7 +1892,7 @@ void synchronize_sched_expedited(void)
66061 }
66062
66063 /* Check to see if someone else did our work for us. */
66064- s = atomic_read(&sync_sched_expedited_done);
66065+ s = atomic_read_unchecked(&sync_sched_expedited_done);
66066 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
66067 smp_mb(); /* ensure test happens before caller kfree */
66068 return;
66069@@ -1907,7 +1907,7 @@ void synchronize_sched_expedited(void)
66070 * grace period works for us.
66071 */
66072 get_online_cpus();
66073- snap = atomic_read(&sync_sched_expedited_started) - 1;
66074+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
66075 smp_mb(); /* ensure read is before try_stop_cpus(). */
66076 }
66077
66078@@ -1918,12 +1918,12 @@ void synchronize_sched_expedited(void)
66079 * than we did beat us to the punch.
66080 */
66081 do {
66082- s = atomic_read(&sync_sched_expedited_done);
66083+ s = atomic_read_unchecked(&sync_sched_expedited_done);
66084 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
66085 smp_mb(); /* ensure test happens before caller kfree */
66086 break;
66087 }
66088- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
66089+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
66090
66091 put_online_cpus();
66092 }
66093@@ -1985,7 +1985,7 @@ int rcu_needs_cpu(int cpu)
66094 for_each_online_cpu(thatcpu) {
66095 if (thatcpu == cpu)
66096 continue;
66097- snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
66098+ snap = atomic_add_return_unchecked(0, &per_cpu(rcu_dynticks,
66099 thatcpu).dynticks);
66100 smp_mb(); /* Order sampling of snap with end of grace period. */
66101 if ((snap & 0x1) != 0) {
66102diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
66103index 9feffa4..54058df 100644
66104--- a/kernel/rcutree_trace.c
66105+++ b/kernel/rcutree_trace.c
66106@@ -69,7 +69,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
66107 rdp->qs_pending);
66108 #ifdef CONFIG_NO_HZ
66109 seq_printf(m, " dt=%d/%d/%d df=%lu",
66110- atomic_read(&rdp->dynticks->dynticks),
66111+ atomic_read_unchecked(&rdp->dynticks->dynticks),
66112 rdp->dynticks->dynticks_nesting,
66113 rdp->dynticks->dynticks_nmi_nesting,
66114 rdp->dynticks_fqs);
66115@@ -143,7 +143,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
66116 rdp->qs_pending);
66117 #ifdef CONFIG_NO_HZ
66118 seq_printf(m, ",%d,%d,%d,%lu",
66119- atomic_read(&rdp->dynticks->dynticks),
66120+ atomic_read_unchecked(&rdp->dynticks->dynticks),
66121 rdp->dynticks->dynticks_nesting,
66122 rdp->dynticks->dynticks_nmi_nesting,
66123 rdp->dynticks_fqs);
66124diff --git a/kernel/resource.c b/kernel/resource.c
66125index 7640b3a..5879283 100644
66126--- a/kernel/resource.c
66127+++ b/kernel/resource.c
66128@@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
66129
66130 static int __init ioresources_init(void)
66131 {
66132+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66133+#ifdef CONFIG_GRKERNSEC_PROC_USER
66134+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
66135+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
66136+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66137+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
66138+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
66139+#endif
66140+#else
66141 proc_create("ioports", 0, NULL, &proc_ioports_operations);
66142 proc_create("iomem", 0, NULL, &proc_iomem_operations);
66143+#endif
66144 return 0;
66145 }
66146 __initcall(ioresources_init);
66147diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
66148index 3d9f31c..7fefc9e 100644
66149--- a/kernel/rtmutex-tester.c
66150+++ b/kernel/rtmutex-tester.c
66151@@ -20,7 +20,7 @@
66152 #define MAX_RT_TEST_MUTEXES 8
66153
66154 static spinlock_t rttest_lock;
66155-static atomic_t rttest_event;
66156+static atomic_unchecked_t rttest_event;
66157
66158 struct test_thread_data {
66159 int opcode;
66160@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
66161
66162 case RTTEST_LOCKCONT:
66163 td->mutexes[td->opdata] = 1;
66164- td->event = atomic_add_return(1, &rttest_event);
66165+ td->event = atomic_add_return_unchecked(1, &rttest_event);
66166 return 0;
66167
66168 case RTTEST_RESET:
66169@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
66170 return 0;
66171
66172 case RTTEST_RESETEVENT:
66173- atomic_set(&rttest_event, 0);
66174+ atomic_set_unchecked(&rttest_event, 0);
66175 return 0;
66176
66177 default:
66178@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
66179 return ret;
66180
66181 td->mutexes[id] = 1;
66182- td->event = atomic_add_return(1, &rttest_event);
66183+ td->event = atomic_add_return_unchecked(1, &rttest_event);
66184 rt_mutex_lock(&mutexes[id]);
66185- td->event = atomic_add_return(1, &rttest_event);
66186+ td->event = atomic_add_return_unchecked(1, &rttest_event);
66187 td->mutexes[id] = 4;
66188 return 0;
66189
66190@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
66191 return ret;
66192
66193 td->mutexes[id] = 1;
66194- td->event = atomic_add_return(1, &rttest_event);
66195+ td->event = atomic_add_return_unchecked(1, &rttest_event);
66196 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
66197- td->event = atomic_add_return(1, &rttest_event);
66198+ td->event = atomic_add_return_unchecked(1, &rttest_event);
66199 td->mutexes[id] = ret ? 0 : 4;
66200 return ret ? -EINTR : 0;
66201
66202@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
66203 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
66204 return ret;
66205
66206- td->event = atomic_add_return(1, &rttest_event);
66207+ td->event = atomic_add_return_unchecked(1, &rttest_event);
66208 rt_mutex_unlock(&mutexes[id]);
66209- td->event = atomic_add_return(1, &rttest_event);
66210+ td->event = atomic_add_return_unchecked(1, &rttest_event);
66211 td->mutexes[id] = 0;
66212 return 0;
66213
66214@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
66215 break;
66216
66217 td->mutexes[dat] = 2;
66218- td->event = atomic_add_return(1, &rttest_event);
66219+ td->event = atomic_add_return_unchecked(1, &rttest_event);
66220 break;
66221
66222 default:
66223@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
66224 return;
66225
66226 td->mutexes[dat] = 3;
66227- td->event = atomic_add_return(1, &rttest_event);
66228+ td->event = atomic_add_return_unchecked(1, &rttest_event);
66229 break;
66230
66231 case RTTEST_LOCKNOWAIT:
66232@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
66233 return;
66234
66235 td->mutexes[dat] = 1;
66236- td->event = atomic_add_return(1, &rttest_event);
66237+ td->event = atomic_add_return_unchecked(1, &rttest_event);
66238 return;
66239
66240 default:
66241diff --git a/kernel/sched.c b/kernel/sched.c
66242index d6b149c..896cbb8 100644
66243--- a/kernel/sched.c
66244+++ b/kernel/sched.c
66245@@ -4389,6 +4389,19 @@ pick_next_task(struct rq *rq)
66246 BUG(); /* the idle class will always have a runnable task */
66247 }
66248
66249+#ifdef CONFIG_GRKERNSEC_SETXID
66250+extern void gr_delayed_cred_worker(void);
66251+static inline void gr_cred_schedule(void)
66252+{
66253+ if (unlikely(current->delayed_cred))
66254+ gr_delayed_cred_worker();
66255+}
66256+#else
66257+static inline void gr_cred_schedule(void)
66258+{
66259+}
66260+#endif
66261+
66262 /*
66263 * __schedule() is the main scheduler function.
66264 */
66265@@ -4408,6 +4421,8 @@ need_resched:
66266
66267 schedule_debug(prev);
66268
66269+ gr_cred_schedule();
66270+
66271 if (sched_feat(HRTICK))
66272 hrtick_clear(rq);
66273
66274@@ -5098,6 +5113,8 @@ int can_nice(const struct task_struct *p, const int nice)
66275 /* convert nice value [19,-20] to rlimit style value [1,40] */
66276 int nice_rlim = 20 - nice;
66277
66278+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
66279+
66280 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
66281 capable(CAP_SYS_NICE));
66282 }
66283@@ -5131,7 +5148,8 @@ SYSCALL_DEFINE1(nice, int, increment)
66284 if (nice > 19)
66285 nice = 19;
66286
66287- if (increment < 0 && !can_nice(current, nice))
66288+ if (increment < 0 && (!can_nice(current, nice) ||
66289+ gr_handle_chroot_nice()))
66290 return -EPERM;
66291
66292 retval = security_task_setnice(current, nice);
66293@@ -5288,6 +5306,7 @@ recheck:
66294 unsigned long rlim_rtprio =
66295 task_rlimit(p, RLIMIT_RTPRIO);
66296
66297+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
66298 /* can't set/change the rt policy */
66299 if (policy != p->policy && !rlim_rtprio)
66300 return -EPERM;
66301diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
66302index 429242f..d7cca82 100644
66303--- a/kernel/sched_autogroup.c
66304+++ b/kernel/sched_autogroup.c
66305@@ -7,7 +7,7 @@
66306
66307 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
66308 static struct autogroup autogroup_default;
66309-static atomic_t autogroup_seq_nr;
66310+static atomic_unchecked_t autogroup_seq_nr;
66311
66312 static void __init autogroup_init(struct task_struct *init_task)
66313 {
66314@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
66315
66316 kref_init(&ag->kref);
66317 init_rwsem(&ag->lock);
66318- ag->id = atomic_inc_return(&autogroup_seq_nr);
66319+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
66320 ag->tg = tg;
66321 #ifdef CONFIG_RT_GROUP_SCHED
66322 /*
66323diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
66324index 8a39fa3..34f3dbc 100644
66325--- a/kernel/sched_fair.c
66326+++ b/kernel/sched_fair.c
66327@@ -4801,7 +4801,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
66328 * run_rebalance_domains is triggered when needed from the scheduler tick.
66329 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
66330 */
66331-static void run_rebalance_domains(struct softirq_action *h)
66332+static void run_rebalance_domains(void)
66333 {
66334 int this_cpu = smp_processor_id();
66335 struct rq *this_rq = cpu_rq(this_cpu);
66336diff --git a/kernel/signal.c b/kernel/signal.c
66337index 2065515..aed2987 100644
66338--- a/kernel/signal.c
66339+++ b/kernel/signal.c
66340@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
66341
66342 int print_fatal_signals __read_mostly;
66343
66344-static void __user *sig_handler(struct task_struct *t, int sig)
66345+static __sighandler_t sig_handler(struct task_struct *t, int sig)
66346 {
66347 return t->sighand->action[sig - 1].sa.sa_handler;
66348 }
66349
66350-static int sig_handler_ignored(void __user *handler, int sig)
66351+static int sig_handler_ignored(__sighandler_t handler, int sig)
66352 {
66353 /* Is it explicitly or implicitly ignored? */
66354 return handler == SIG_IGN ||
66355@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
66356 static int sig_task_ignored(struct task_struct *t, int sig,
66357 int from_ancestor_ns)
66358 {
66359- void __user *handler;
66360+ __sighandler_t handler;
66361
66362 handler = sig_handler(t, sig);
66363
66364@@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
66365 atomic_inc(&user->sigpending);
66366 rcu_read_unlock();
66367
66368+ if (!override_rlimit)
66369+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
66370+
66371 if (override_rlimit ||
66372 atomic_read(&user->sigpending) <=
66373 task_rlimit(t, RLIMIT_SIGPENDING)) {
66374@@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
66375
66376 int unhandled_signal(struct task_struct *tsk, int sig)
66377 {
66378- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
66379+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
66380 if (is_global_init(tsk))
66381 return 1;
66382 if (handler != SIG_IGN && handler != SIG_DFL)
66383@@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
66384 }
66385 }
66386
66387+ /* allow glibc communication via tgkill to other threads in our
66388+ thread group */
66389+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
66390+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
66391+ && gr_handle_signal(t, sig))
66392+ return -EPERM;
66393+
66394 return security_task_kill(t, info, sig, 0);
66395 }
66396
66397@@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
66398 return send_signal(sig, info, p, 1);
66399 }
66400
66401-static int
66402+int
66403 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66404 {
66405 return send_signal(sig, info, t, 0);
66406@@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66407 unsigned long int flags;
66408 int ret, blocked, ignored;
66409 struct k_sigaction *action;
66410+ int is_unhandled = 0;
66411
66412 spin_lock_irqsave(&t->sighand->siglock, flags);
66413 action = &t->sighand->action[sig-1];
66414@@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66415 }
66416 if (action->sa.sa_handler == SIG_DFL)
66417 t->signal->flags &= ~SIGNAL_UNKILLABLE;
66418+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
66419+ is_unhandled = 1;
66420 ret = specific_send_sig_info(sig, info, t);
66421 spin_unlock_irqrestore(&t->sighand->siglock, flags);
66422
66423+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
66424+ normal operation */
66425+ if (is_unhandled) {
66426+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
66427+ gr_handle_crash(t, sig);
66428+ }
66429+
66430 return ret;
66431 }
66432
66433@@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
66434 ret = check_kill_permission(sig, info, p);
66435 rcu_read_unlock();
66436
66437- if (!ret && sig)
66438+ if (!ret && sig) {
66439 ret = do_send_sig_info(sig, info, p, true);
66440+ if (!ret)
66441+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
66442+ }
66443
66444 return ret;
66445 }
66446@@ -2754,7 +2777,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
66447 int error = -ESRCH;
66448
66449 rcu_read_lock();
66450- p = find_task_by_vpid(pid);
66451+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
66452+ /* allow glibc communication via tgkill to other threads in our
66453+ thread group */
66454+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
66455+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
66456+ p = find_task_by_vpid_unrestricted(pid);
66457+ else
66458+#endif
66459+ p = find_task_by_vpid(pid);
66460 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
66461 error = check_kill_permission(sig, info, p);
66462 /*
66463diff --git a/kernel/smp.c b/kernel/smp.c
66464index db197d6..17aef0b 100644
66465--- a/kernel/smp.c
66466+++ b/kernel/smp.c
66467@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
66468 }
66469 EXPORT_SYMBOL(smp_call_function);
66470
66471-void ipi_call_lock(void)
66472+void ipi_call_lock(void) __acquires(call_function.lock)
66473 {
66474 raw_spin_lock(&call_function.lock);
66475 }
66476
66477-void ipi_call_unlock(void)
66478+void ipi_call_unlock(void) __releases(call_function.lock)
66479 {
66480 raw_spin_unlock(&call_function.lock);
66481 }
66482
66483-void ipi_call_lock_irq(void)
66484+void ipi_call_lock_irq(void) __acquires(call_function.lock)
66485 {
66486 raw_spin_lock_irq(&call_function.lock);
66487 }
66488
66489-void ipi_call_unlock_irq(void)
66490+void ipi_call_unlock_irq(void) __releases(call_function.lock)
66491 {
66492 raw_spin_unlock_irq(&call_function.lock);
66493 }
66494diff --git a/kernel/softirq.c b/kernel/softirq.c
66495index 2c71d91..1021f81 100644
66496--- a/kernel/softirq.c
66497+++ b/kernel/softirq.c
66498@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
66499
66500 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
66501
66502-char *softirq_to_name[NR_SOFTIRQS] = {
66503+const char * const softirq_to_name[NR_SOFTIRQS] = {
66504 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
66505 "TASKLET", "SCHED", "HRTIMER", "RCU"
66506 };
66507@@ -235,7 +235,7 @@ restart:
66508 kstat_incr_softirqs_this_cpu(vec_nr);
66509
66510 trace_softirq_entry(vec_nr);
66511- h->action(h);
66512+ h->action();
66513 trace_softirq_exit(vec_nr);
66514 if (unlikely(prev_count != preempt_count())) {
66515 printk(KERN_ERR "huh, entered softirq %u %s %p"
66516@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
66517 local_irq_restore(flags);
66518 }
66519
66520-void open_softirq(int nr, void (*action)(struct softirq_action *))
66521+void open_softirq(int nr, void (*action)(void))
66522 {
66523- softirq_vec[nr].action = action;
66524+ pax_open_kernel();
66525+ *(void **)&softirq_vec[nr].action = action;
66526+ pax_close_kernel();
66527 }
66528
66529 /*
66530@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
66531
66532 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
66533
66534-static void tasklet_action(struct softirq_action *a)
66535+static void tasklet_action(void)
66536 {
66537 struct tasklet_struct *list;
66538
66539@@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
66540 }
66541 }
66542
66543-static void tasklet_hi_action(struct softirq_action *a)
66544+static void tasklet_hi_action(void)
66545 {
66546 struct tasklet_struct *list;
66547
66548diff --git a/kernel/sys.c b/kernel/sys.c
66549index 481611f..0754d86 100644
66550--- a/kernel/sys.c
66551+++ b/kernel/sys.c
66552@@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
66553 error = -EACCES;
66554 goto out;
66555 }
66556+
66557+ if (gr_handle_chroot_setpriority(p, niceval)) {
66558+ error = -EACCES;
66559+ goto out;
66560+ }
66561+
66562 no_nice = security_task_setnice(p, niceval);
66563 if (no_nice) {
66564 error = no_nice;
66565@@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
66566 goto error;
66567 }
66568
66569+ if (gr_check_group_change(new->gid, new->egid, -1))
66570+ goto error;
66571+
66572 if (rgid != (gid_t) -1 ||
66573 (egid != (gid_t) -1 && egid != old->gid))
66574 new->sgid = new->egid;
66575@@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
66576 old = current_cred();
66577
66578 retval = -EPERM;
66579+
66580+ if (gr_check_group_change(gid, gid, gid))
66581+ goto error;
66582+
66583 if (nsown_capable(CAP_SETGID))
66584 new->gid = new->egid = new->sgid = new->fsgid = gid;
66585 else if (gid == old->gid || gid == old->sgid)
66586@@ -618,7 +631,7 @@ error:
66587 /*
66588 * change the user struct in a credentials set to match the new UID
66589 */
66590-static int set_user(struct cred *new)
66591+int set_user(struct cred *new)
66592 {
66593 struct user_struct *new_user;
66594
66595@@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
66596 goto error;
66597 }
66598
66599+ if (gr_check_user_change(new->uid, new->euid, -1))
66600+ goto error;
66601+
66602 if (new->uid != old->uid) {
66603 retval = set_user(new);
66604 if (retval < 0)
66605@@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
66606 old = current_cred();
66607
66608 retval = -EPERM;
66609+
66610+ if (gr_check_crash_uid(uid))
66611+ goto error;
66612+ if (gr_check_user_change(uid, uid, uid))
66613+ goto error;
66614+
66615 if (nsown_capable(CAP_SETUID)) {
66616 new->suid = new->uid = uid;
66617 if (uid != old->uid) {
66618@@ -786,6 +808,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
66619 goto error;
66620 }
66621
66622+ if (gr_check_user_change(ruid, euid, -1))
66623+ goto error;
66624+
66625 if (ruid != (uid_t) -1) {
66626 new->uid = ruid;
66627 if (ruid != old->uid) {
66628@@ -850,6 +875,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
66629 goto error;
66630 }
66631
66632+ if (gr_check_group_change(rgid, egid, -1))
66633+ goto error;
66634+
66635 if (rgid != (gid_t) -1)
66636 new->gid = rgid;
66637 if (egid != (gid_t) -1)
66638@@ -896,6 +924,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66639 old = current_cred();
66640 old_fsuid = old->fsuid;
66641
66642+ if (gr_check_user_change(-1, -1, uid))
66643+ goto error;
66644+
66645 if (uid == old->uid || uid == old->euid ||
66646 uid == old->suid || uid == old->fsuid ||
66647 nsown_capable(CAP_SETUID)) {
66648@@ -906,6 +937,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66649 }
66650 }
66651
66652+error:
66653 abort_creds(new);
66654 return old_fsuid;
66655
66656@@ -932,12 +964,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
66657 if (gid == old->gid || gid == old->egid ||
66658 gid == old->sgid || gid == old->fsgid ||
66659 nsown_capable(CAP_SETGID)) {
66660+ if (gr_check_group_change(-1, -1, gid))
66661+ goto error;
66662+
66663 if (gid != old_fsgid) {
66664 new->fsgid = gid;
66665 goto change_okay;
66666 }
66667 }
66668
66669+error:
66670 abort_creds(new);
66671 return old_fsgid;
66672
66673@@ -1189,7 +1225,10 @@ static int override_release(char __user *release, int len)
66674 }
66675 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
66676 snprintf(buf, len, "2.6.%u%s", v, rest);
66677- ret = copy_to_user(release, buf, len);
66678+ if (len > sizeof(buf))
66679+ ret = -EFAULT;
66680+ else
66681+ ret = copy_to_user(release, buf, len);
66682 }
66683 return ret;
66684 }
66685@@ -1243,19 +1282,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
66686 return -EFAULT;
66687
66688 down_read(&uts_sem);
66689- error = __copy_to_user(&name->sysname, &utsname()->sysname,
66690+ error = __copy_to_user(name->sysname, &utsname()->sysname,
66691 __OLD_UTS_LEN);
66692 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
66693- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
66694+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
66695 __OLD_UTS_LEN);
66696 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
66697- error |= __copy_to_user(&name->release, &utsname()->release,
66698+ error |= __copy_to_user(name->release, &utsname()->release,
66699 __OLD_UTS_LEN);
66700 error |= __put_user(0, name->release + __OLD_UTS_LEN);
66701- error |= __copy_to_user(&name->version, &utsname()->version,
66702+ error |= __copy_to_user(name->version, &utsname()->version,
66703 __OLD_UTS_LEN);
66704 error |= __put_user(0, name->version + __OLD_UTS_LEN);
66705- error |= __copy_to_user(&name->machine, &utsname()->machine,
66706+ error |= __copy_to_user(name->machine, &utsname()->machine,
66707 __OLD_UTS_LEN);
66708 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
66709 up_read(&uts_sem);
66710@@ -1720,7 +1759,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
66711 error = get_dumpable(me->mm);
66712 break;
66713 case PR_SET_DUMPABLE:
66714- if (arg2 < 0 || arg2 > 1) {
66715+ if (arg2 > 1) {
66716 error = -EINVAL;
66717 break;
66718 }
66719diff --git a/kernel/sysctl.c b/kernel/sysctl.c
66720index ae27196..7506d69 100644
66721--- a/kernel/sysctl.c
66722+++ b/kernel/sysctl.c
66723@@ -86,6 +86,13 @@
66724
66725
66726 #if defined(CONFIG_SYSCTL)
66727+#include <linux/grsecurity.h>
66728+#include <linux/grinternal.h>
66729+
66730+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
66731+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66732+ const int op);
66733+extern int gr_handle_chroot_sysctl(const int op);
66734
66735 /* External variables not in a header file. */
66736 extern int sysctl_overcommit_memory;
66737@@ -191,6 +198,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
66738 }
66739
66740 #endif
66741+extern struct ctl_table grsecurity_table[];
66742
66743 static struct ctl_table root_table[];
66744 static struct ctl_table_root sysctl_table_root;
66745@@ -220,6 +228,20 @@ extern struct ctl_table epoll_table[];
66746 int sysctl_legacy_va_layout;
66747 #endif
66748
66749+#ifdef CONFIG_PAX_SOFTMODE
66750+static ctl_table pax_table[] = {
66751+ {
66752+ .procname = "softmode",
66753+ .data = &pax_softmode,
66754+ .maxlen = sizeof(unsigned int),
66755+ .mode = 0600,
66756+ .proc_handler = &proc_dointvec,
66757+ },
66758+
66759+ { }
66760+};
66761+#endif
66762+
66763 /* The default sysctl tables: */
66764
66765 static struct ctl_table root_table[] = {
66766@@ -266,6 +288,22 @@ static int max_extfrag_threshold = 1000;
66767 #endif
66768
66769 static struct ctl_table kern_table[] = {
66770+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66771+ {
66772+ .procname = "grsecurity",
66773+ .mode = 0500,
66774+ .child = grsecurity_table,
66775+ },
66776+#endif
66777+
66778+#ifdef CONFIG_PAX_SOFTMODE
66779+ {
66780+ .procname = "pax",
66781+ .mode = 0500,
66782+ .child = pax_table,
66783+ },
66784+#endif
66785+
66786 {
66787 .procname = "sched_child_runs_first",
66788 .data = &sysctl_sched_child_runs_first,
66789@@ -550,7 +588,7 @@ static struct ctl_table kern_table[] = {
66790 .data = &modprobe_path,
66791 .maxlen = KMOD_PATH_LEN,
66792 .mode = 0644,
66793- .proc_handler = proc_dostring,
66794+ .proc_handler = proc_dostring_modpriv,
66795 },
66796 {
66797 .procname = "modules_disabled",
66798@@ -717,16 +755,20 @@ static struct ctl_table kern_table[] = {
66799 .extra1 = &zero,
66800 .extra2 = &one,
66801 },
66802+#endif
66803 {
66804 .procname = "kptr_restrict",
66805 .data = &kptr_restrict,
66806 .maxlen = sizeof(int),
66807 .mode = 0644,
66808 .proc_handler = proc_dmesg_restrict,
66809+#ifdef CONFIG_GRKERNSEC_HIDESYM
66810+ .extra1 = &two,
66811+#else
66812 .extra1 = &zero,
66813+#endif
66814 .extra2 = &two,
66815 },
66816-#endif
66817 {
66818 .procname = "ngroups_max",
66819 .data = &ngroups_max,
66820@@ -1216,6 +1258,13 @@ static struct ctl_table vm_table[] = {
66821 .proc_handler = proc_dointvec_minmax,
66822 .extra1 = &zero,
66823 },
66824+ {
66825+ .procname = "heap_stack_gap",
66826+ .data = &sysctl_heap_stack_gap,
66827+ .maxlen = sizeof(sysctl_heap_stack_gap),
66828+ .mode = 0644,
66829+ .proc_handler = proc_doulongvec_minmax,
66830+ },
66831 #else
66832 {
66833 .procname = "nr_trim_pages",
66834@@ -1720,6 +1769,17 @@ static int test_perm(int mode, int op)
66835 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
66836 {
66837 int mode;
66838+ int error;
66839+
66840+ if (table->parent != NULL && table->parent->procname != NULL &&
66841+ table->procname != NULL &&
66842+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
66843+ return -EACCES;
66844+ if (gr_handle_chroot_sysctl(op))
66845+ return -EACCES;
66846+ error = gr_handle_sysctl(table, op);
66847+ if (error)
66848+ return error;
66849
66850 if (root->permissions)
66851 mode = root->permissions(root, current->nsproxy, table);
66852@@ -2124,6 +2184,16 @@ int proc_dostring(struct ctl_table *table, int write,
66853 buffer, lenp, ppos);
66854 }
66855
66856+int proc_dostring_modpriv(struct ctl_table *table, int write,
66857+ void __user *buffer, size_t *lenp, loff_t *ppos)
66858+{
66859+ if (write && !capable(CAP_SYS_MODULE))
66860+ return -EPERM;
66861+
66862+ return _proc_do_string(table->data, table->maxlen, write,
66863+ buffer, lenp, ppos);
66864+}
66865+
66866 static size_t proc_skip_spaces(char **buf)
66867 {
66868 size_t ret;
66869@@ -2229,6 +2299,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
66870 len = strlen(tmp);
66871 if (len > *size)
66872 len = *size;
66873+ if (len > sizeof(tmp))
66874+ len = sizeof(tmp);
66875 if (copy_to_user(*buf, tmp, len))
66876 return -EFAULT;
66877 *size -= len;
66878@@ -2545,8 +2617,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
66879 *i = val;
66880 } else {
66881 val = convdiv * (*i) / convmul;
66882- if (!first)
66883+ if (!first) {
66884 err = proc_put_char(&buffer, &left, '\t');
66885+ if (err)
66886+ break;
66887+ }
66888 err = proc_put_long(&buffer, &left, val, false);
66889 if (err)
66890 break;
66891@@ -2941,6 +3016,12 @@ int proc_dostring(struct ctl_table *table, int write,
66892 return -ENOSYS;
66893 }
66894
66895+int proc_dostring_modpriv(struct ctl_table *table, int write,
66896+ void __user *buffer, size_t *lenp, loff_t *ppos)
66897+{
66898+ return -ENOSYS;
66899+}
66900+
66901 int proc_dointvec(struct ctl_table *table, int write,
66902 void __user *buffer, size_t *lenp, loff_t *ppos)
66903 {
66904@@ -2997,6 +3078,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66905 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66906 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66907 EXPORT_SYMBOL(proc_dostring);
66908+EXPORT_SYMBOL(proc_dostring_modpriv);
66909 EXPORT_SYMBOL(proc_doulongvec_minmax);
66910 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66911 EXPORT_SYMBOL(register_sysctl_table);
66912diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
66913index a650694..aaeeb20 100644
66914--- a/kernel/sysctl_binary.c
66915+++ b/kernel/sysctl_binary.c
66916@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
66917 int i;
66918
66919 set_fs(KERNEL_DS);
66920- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66921+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66922 set_fs(old_fs);
66923 if (result < 0)
66924 goto out_kfree;
66925@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
66926 }
66927
66928 set_fs(KERNEL_DS);
66929- result = vfs_write(file, buffer, str - buffer, &pos);
66930+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66931 set_fs(old_fs);
66932 if (result < 0)
66933 goto out_kfree;
66934@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
66935 int i;
66936
66937 set_fs(KERNEL_DS);
66938- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66939+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66940 set_fs(old_fs);
66941 if (result < 0)
66942 goto out_kfree;
66943@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
66944 }
66945
66946 set_fs(KERNEL_DS);
66947- result = vfs_write(file, buffer, str - buffer, &pos);
66948+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66949 set_fs(old_fs);
66950 if (result < 0)
66951 goto out_kfree;
66952@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
66953 int i;
66954
66955 set_fs(KERNEL_DS);
66956- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66957+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66958 set_fs(old_fs);
66959 if (result < 0)
66960 goto out;
66961@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
66962 __le16 dnaddr;
66963
66964 set_fs(KERNEL_DS);
66965- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66966+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66967 set_fs(old_fs);
66968 if (result < 0)
66969 goto out;
66970@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
66971 le16_to_cpu(dnaddr) & 0x3ff);
66972
66973 set_fs(KERNEL_DS);
66974- result = vfs_write(file, buf, len, &pos);
66975+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
66976 set_fs(old_fs);
66977 if (result < 0)
66978 goto out;
66979diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
66980index 362da65..ab8ef8c 100644
66981--- a/kernel/sysctl_check.c
66982+++ b/kernel/sysctl_check.c
66983@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
66984 set_fail(&fail, table, "Directory with extra2");
66985 } else {
66986 if ((table->proc_handler == proc_dostring) ||
66987+ (table->proc_handler == proc_dostring_modpriv) ||
66988 (table->proc_handler == proc_dointvec) ||
66989 (table->proc_handler == proc_dointvec_minmax) ||
66990 (table->proc_handler == proc_dointvec_jiffies) ||
66991diff --git a/kernel/taskstats.c b/kernel/taskstats.c
66992index e660464..c8b9e67 100644
66993--- a/kernel/taskstats.c
66994+++ b/kernel/taskstats.c
66995@@ -27,9 +27,12 @@
66996 #include <linux/cgroup.h>
66997 #include <linux/fs.h>
66998 #include <linux/file.h>
66999+#include <linux/grsecurity.h>
67000 #include <net/genetlink.h>
67001 #include <linux/atomic.h>
67002
67003+extern int gr_is_taskstats_denied(int pid);
67004+
67005 /*
67006 * Maximum length of a cpumask that can be specified in
67007 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
67008@@ -556,6 +559,9 @@ err:
67009
67010 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
67011 {
67012+ if (gr_is_taskstats_denied(current->pid))
67013+ return -EACCES;
67014+
67015 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
67016 return cmd_attr_register_cpumask(info);
67017 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
67018diff --git a/kernel/time.c b/kernel/time.c
67019index 73e416d..cfc6f69 100644
67020--- a/kernel/time.c
67021+++ b/kernel/time.c
67022@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
67023 return error;
67024
67025 if (tz) {
67026+ /* we log in do_settimeofday called below, so don't log twice
67027+ */
67028+ if (!tv)
67029+ gr_log_timechange();
67030+
67031 /* SMP safe, global irq locking makes it work. */
67032 sys_tz = *tz;
67033 update_vsyscall_tz();
67034diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
67035index 8a46f5d..bbe6f9c 100644
67036--- a/kernel/time/alarmtimer.c
67037+++ b/kernel/time/alarmtimer.c
67038@@ -773,7 +773,7 @@ static int __init alarmtimer_init(void)
67039 struct platform_device *pdev;
67040 int error = 0;
67041 int i;
67042- struct k_clock alarm_clock = {
67043+ static struct k_clock alarm_clock = {
67044 .clock_getres = alarm_clock_getres,
67045 .clock_get = alarm_clock_get,
67046 .timer_create = alarm_timer_create,
67047diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
67048index fd4a7b1..fae5c2a 100644
67049--- a/kernel/time/tick-broadcast.c
67050+++ b/kernel/time/tick-broadcast.c
67051@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
67052 * then clear the broadcast bit.
67053 */
67054 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
67055- int cpu = smp_processor_id();
67056+ cpu = smp_processor_id();
67057
67058 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
67059 tick_broadcast_clear_oneshot(cpu);
67060diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
67061index 2378413..be455fd 100644
67062--- a/kernel/time/timekeeping.c
67063+++ b/kernel/time/timekeeping.c
67064@@ -14,6 +14,7 @@
67065 #include <linux/init.h>
67066 #include <linux/mm.h>
67067 #include <linux/sched.h>
67068+#include <linux/grsecurity.h>
67069 #include <linux/syscore_ops.h>
67070 #include <linux/clocksource.h>
67071 #include <linux/jiffies.h>
67072@@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
67073 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
67074 return -EINVAL;
67075
67076+ gr_log_timechange();
67077+
67078 write_seqlock_irqsave(&xtime_lock, flags);
67079
67080 timekeeping_forward_now();
67081diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
67082index 3258455..f35227d 100644
67083--- a/kernel/time/timer_list.c
67084+++ b/kernel/time/timer_list.c
67085@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
67086
67087 static void print_name_offset(struct seq_file *m, void *sym)
67088 {
67089+#ifdef CONFIG_GRKERNSEC_HIDESYM
67090+ SEQ_printf(m, "<%p>", NULL);
67091+#else
67092 char symname[KSYM_NAME_LEN];
67093
67094 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
67095 SEQ_printf(m, "<%pK>", sym);
67096 else
67097 SEQ_printf(m, "%s", symname);
67098+#endif
67099 }
67100
67101 static void
67102@@ -112,7 +116,11 @@ next_one:
67103 static void
67104 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
67105 {
67106+#ifdef CONFIG_GRKERNSEC_HIDESYM
67107+ SEQ_printf(m, " .base: %p\n", NULL);
67108+#else
67109 SEQ_printf(m, " .base: %pK\n", base);
67110+#endif
67111 SEQ_printf(m, " .index: %d\n",
67112 base->index);
67113 SEQ_printf(m, " .resolution: %Lu nsecs\n",
67114@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
67115 {
67116 struct proc_dir_entry *pe;
67117
67118+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67119+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
67120+#else
67121 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
67122+#endif
67123 if (!pe)
67124 return -ENOMEM;
67125 return 0;
67126diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
67127index 0b537f2..9e71eca 100644
67128--- a/kernel/time/timer_stats.c
67129+++ b/kernel/time/timer_stats.c
67130@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
67131 static unsigned long nr_entries;
67132 static struct entry entries[MAX_ENTRIES];
67133
67134-static atomic_t overflow_count;
67135+static atomic_unchecked_t overflow_count;
67136
67137 /*
67138 * The entries are in a hash-table, for fast lookup:
67139@@ -140,7 +140,7 @@ static void reset_entries(void)
67140 nr_entries = 0;
67141 memset(entries, 0, sizeof(entries));
67142 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
67143- atomic_set(&overflow_count, 0);
67144+ atomic_set_unchecked(&overflow_count, 0);
67145 }
67146
67147 static struct entry *alloc_entry(void)
67148@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
67149 if (likely(entry))
67150 entry->count++;
67151 else
67152- atomic_inc(&overflow_count);
67153+ atomic_inc_unchecked(&overflow_count);
67154
67155 out_unlock:
67156 raw_spin_unlock_irqrestore(lock, flags);
67157@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
67158
67159 static void print_name_offset(struct seq_file *m, unsigned long addr)
67160 {
67161+#ifdef CONFIG_GRKERNSEC_HIDESYM
67162+ seq_printf(m, "<%p>", NULL);
67163+#else
67164 char symname[KSYM_NAME_LEN];
67165
67166 if (lookup_symbol_name(addr, symname) < 0)
67167 seq_printf(m, "<%p>", (void *)addr);
67168 else
67169 seq_printf(m, "%s", symname);
67170+#endif
67171 }
67172
67173 static int tstats_show(struct seq_file *m, void *v)
67174@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
67175
67176 seq_puts(m, "Timer Stats Version: v0.2\n");
67177 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
67178- if (atomic_read(&overflow_count))
67179+ if (atomic_read_unchecked(&overflow_count))
67180 seq_printf(m, "Overflow: %d entries\n",
67181- atomic_read(&overflow_count));
67182+ atomic_read_unchecked(&overflow_count));
67183
67184 for (i = 0; i < nr_entries; i++) {
67185 entry = entries + i;
67186@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
67187 {
67188 struct proc_dir_entry *pe;
67189
67190+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67191+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
67192+#else
67193 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
67194+#endif
67195 if (!pe)
67196 return -ENOMEM;
67197 return 0;
67198diff --git a/kernel/timer.c b/kernel/timer.c
67199index 9c3c62b..441690e 100644
67200--- a/kernel/timer.c
67201+++ b/kernel/timer.c
67202@@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
67203 /*
67204 * This function runs timers and the timer-tq in bottom half context.
67205 */
67206-static void run_timer_softirq(struct softirq_action *h)
67207+static void run_timer_softirq(void)
67208 {
67209 struct tvec_base *base = __this_cpu_read(tvec_bases);
67210
67211diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
67212index 16fc34a..efd8bb8 100644
67213--- a/kernel/trace/blktrace.c
67214+++ b/kernel/trace/blktrace.c
67215@@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
67216 struct blk_trace *bt = filp->private_data;
67217 char buf[16];
67218
67219- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
67220+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
67221
67222 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
67223 }
67224@@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
67225 return 1;
67226
67227 bt = buf->chan->private_data;
67228- atomic_inc(&bt->dropped);
67229+ atomic_inc_unchecked(&bt->dropped);
67230 return 0;
67231 }
67232
67233@@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
67234
67235 bt->dir = dir;
67236 bt->dev = dev;
67237- atomic_set(&bt->dropped, 0);
67238+ atomic_set_unchecked(&bt->dropped, 0);
67239
67240 ret = -EIO;
67241 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
67242diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
67243index 25b4f4d..6f4772d 100644
67244--- a/kernel/trace/ftrace.c
67245+++ b/kernel/trace/ftrace.c
67246@@ -1587,12 +1587,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
67247 if (unlikely(ftrace_disabled))
67248 return 0;
67249
67250+ ret = ftrace_arch_code_modify_prepare();
67251+ FTRACE_WARN_ON(ret);
67252+ if (ret)
67253+ return 0;
67254+
67255 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
67256+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
67257 if (ret) {
67258 ftrace_bug(ret, ip);
67259- return 0;
67260 }
67261- return 1;
67262+ return ret ? 0 : 1;
67263 }
67264
67265 /*
67266@@ -2608,7 +2613,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
67267
67268 int
67269 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
67270- void *data)
67271+ void *data)
67272 {
67273 struct ftrace_func_probe *entry;
67274 struct ftrace_page *pg;
67275diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
67276index f2bd275..adaf3a2 100644
67277--- a/kernel/trace/trace.c
67278+++ b/kernel/trace/trace.c
67279@@ -4201,10 +4201,9 @@ static const struct file_operations tracing_dyn_info_fops = {
67280 };
67281 #endif
67282
67283-static struct dentry *d_tracer;
67284-
67285 struct dentry *tracing_init_dentry(void)
67286 {
67287+ static struct dentry *d_tracer;
67288 static int once;
67289
67290 if (d_tracer)
67291@@ -4224,10 +4223,9 @@ struct dentry *tracing_init_dentry(void)
67292 return d_tracer;
67293 }
67294
67295-static struct dentry *d_percpu;
67296-
67297 struct dentry *tracing_dentry_percpu(void)
67298 {
67299+ static struct dentry *d_percpu;
67300 static int once;
67301 struct dentry *d_tracer;
67302
67303diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
67304index c212a7f..7b02394 100644
67305--- a/kernel/trace/trace_events.c
67306+++ b/kernel/trace/trace_events.c
67307@@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
67308 struct ftrace_module_file_ops {
67309 struct list_head list;
67310 struct module *mod;
67311- struct file_operations id;
67312- struct file_operations enable;
67313- struct file_operations format;
67314- struct file_operations filter;
67315 };
67316
67317 static struct ftrace_module_file_ops *
67318@@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
67319
67320 file_ops->mod = mod;
67321
67322- file_ops->id = ftrace_event_id_fops;
67323- file_ops->id.owner = mod;
67324-
67325- file_ops->enable = ftrace_enable_fops;
67326- file_ops->enable.owner = mod;
67327-
67328- file_ops->filter = ftrace_event_filter_fops;
67329- file_ops->filter.owner = mod;
67330-
67331- file_ops->format = ftrace_event_format_fops;
67332- file_ops->format.owner = mod;
67333+ pax_open_kernel();
67334+ *(void **)&mod->trace_id.owner = mod;
67335+ *(void **)&mod->trace_enable.owner = mod;
67336+ *(void **)&mod->trace_filter.owner = mod;
67337+ *(void **)&mod->trace_format.owner = mod;
67338+ pax_close_kernel();
67339
67340 list_add(&file_ops->list, &ftrace_module_file_list);
67341
67342@@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
67343
67344 for_each_event(call, start, end) {
67345 __trace_add_event_call(*call, mod,
67346- &file_ops->id, &file_ops->enable,
67347- &file_ops->filter, &file_ops->format);
67348+ &mod->trace_id, &mod->trace_enable,
67349+ &mod->trace_filter, &mod->trace_format);
67350 }
67351 }
67352
67353diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
67354index 00d527c..7c5b1a3 100644
67355--- a/kernel/trace/trace_kprobe.c
67356+++ b/kernel/trace/trace_kprobe.c
67357@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67358 long ret;
67359 int maxlen = get_rloc_len(*(u32 *)dest);
67360 u8 *dst = get_rloc_data(dest);
67361- u8 *src = addr;
67362+ const u8 __user *src = (const u8 __force_user *)addr;
67363 mm_segment_t old_fs = get_fs();
67364 if (!maxlen)
67365 return;
67366@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67367 pagefault_disable();
67368 do
67369 ret = __copy_from_user_inatomic(dst++, src++, 1);
67370- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
67371+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
67372 dst[-1] = '\0';
67373 pagefault_enable();
67374 set_fs(old_fs);
67375@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67376 ((u8 *)get_rloc_data(dest))[0] = '\0';
67377 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
67378 } else
67379- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
67380+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
67381 get_rloc_offs(*(u32 *)dest));
67382 }
67383 /* Return the length of string -- including null terminal byte */
67384@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
67385 set_fs(KERNEL_DS);
67386 pagefault_disable();
67387 do {
67388- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
67389+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
67390 len++;
67391 } while (c && ret == 0 && len < MAX_STRING_SIZE);
67392 pagefault_enable();
67393diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
67394index fd3c8aa..5f324a6 100644
67395--- a/kernel/trace/trace_mmiotrace.c
67396+++ b/kernel/trace/trace_mmiotrace.c
67397@@ -24,7 +24,7 @@ struct header_iter {
67398 static struct trace_array *mmio_trace_array;
67399 static bool overrun_detected;
67400 static unsigned long prev_overruns;
67401-static atomic_t dropped_count;
67402+static atomic_unchecked_t dropped_count;
67403
67404 static void mmio_reset_data(struct trace_array *tr)
67405 {
67406@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
67407
67408 static unsigned long count_overruns(struct trace_iterator *iter)
67409 {
67410- unsigned long cnt = atomic_xchg(&dropped_count, 0);
67411+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
67412 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
67413
67414 if (over > prev_overruns)
67415@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
67416 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
67417 sizeof(*entry), 0, pc);
67418 if (!event) {
67419- atomic_inc(&dropped_count);
67420+ atomic_inc_unchecked(&dropped_count);
67421 return;
67422 }
67423 entry = ring_buffer_event_data(event);
67424@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
67425 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
67426 sizeof(*entry), 0, pc);
67427 if (!event) {
67428- atomic_inc(&dropped_count);
67429+ atomic_inc_unchecked(&dropped_count);
67430 return;
67431 }
67432 entry = ring_buffer_event_data(event);
67433diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
67434index 5199930..26c73a0 100644
67435--- a/kernel/trace/trace_output.c
67436+++ b/kernel/trace/trace_output.c
67437@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
67438
67439 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
67440 if (!IS_ERR(p)) {
67441- p = mangle_path(s->buffer + s->len, p, "\n");
67442+ p = mangle_path(s->buffer + s->len, p, "\n\\");
67443 if (p) {
67444 s->len = p - s->buffer;
67445 return 1;
67446diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
67447index 77575b3..6e623d1 100644
67448--- a/kernel/trace/trace_stack.c
67449+++ b/kernel/trace/trace_stack.c
67450@@ -50,7 +50,7 @@ static inline void check_stack(void)
67451 return;
67452
67453 /* we do not handle interrupt stacks yet */
67454- if (!object_is_on_stack(&this_size))
67455+ if (!object_starts_on_stack(&this_size))
67456 return;
67457
67458 local_irq_save(flags);
67459diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
67460index 209b379..7f76423 100644
67461--- a/kernel/trace/trace_workqueue.c
67462+++ b/kernel/trace/trace_workqueue.c
67463@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
67464 int cpu;
67465 pid_t pid;
67466 /* Can be inserted from interrupt or user context, need to be atomic */
67467- atomic_t inserted;
67468+ atomic_unchecked_t inserted;
67469 /*
67470 * Don't need to be atomic, works are serialized in a single workqueue thread
67471 * on a single CPU.
67472@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
67473 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
67474 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
67475 if (node->pid == wq_thread->pid) {
67476- atomic_inc(&node->inserted);
67477+ atomic_inc_unchecked(&node->inserted);
67478 goto found;
67479 }
67480 }
67481@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
67482 tsk = get_pid_task(pid, PIDTYPE_PID);
67483 if (tsk) {
67484 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
67485- atomic_read(&cws->inserted), cws->executed,
67486+ atomic_read_unchecked(&cws->inserted), cws->executed,
67487 tsk->comm);
67488 put_task_struct(tsk);
67489 }
67490diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
67491index 82928f5..92da771 100644
67492--- a/lib/Kconfig.debug
67493+++ b/lib/Kconfig.debug
67494@@ -1103,6 +1103,7 @@ config LATENCYTOP
67495 depends on DEBUG_KERNEL
67496 depends on STACKTRACE_SUPPORT
67497 depends on PROC_FS
67498+ depends on !GRKERNSEC_HIDESYM
67499 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
67500 select KALLSYMS
67501 select KALLSYMS_ALL
67502diff --git a/lib/bitmap.c b/lib/bitmap.c
67503index 0d4a127..33a06c7 100644
67504--- a/lib/bitmap.c
67505+++ b/lib/bitmap.c
67506@@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
67507 {
67508 int c, old_c, totaldigits, ndigits, nchunks, nbits;
67509 u32 chunk;
67510- const char __user __force *ubuf = (const char __user __force *)buf;
67511+ const char __user *ubuf = (const char __force_user *)buf;
67512
67513 bitmap_zero(maskp, nmaskbits);
67514
67515@@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
67516 {
67517 if (!access_ok(VERIFY_READ, ubuf, ulen))
67518 return -EFAULT;
67519- return __bitmap_parse((const char __force *)ubuf,
67520+ return __bitmap_parse((const char __force_kernel *)ubuf,
67521 ulen, 1, maskp, nmaskbits);
67522
67523 }
67524@@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
67525 {
67526 unsigned a, b;
67527 int c, old_c, totaldigits;
67528- const char __user __force *ubuf = (const char __user __force *)buf;
67529+ const char __user *ubuf = (const char __force_user *)buf;
67530 int exp_digit, in_range;
67531
67532 totaldigits = c = 0;
67533@@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
67534 {
67535 if (!access_ok(VERIFY_READ, ubuf, ulen))
67536 return -EFAULT;
67537- return __bitmap_parselist((const char __force *)ubuf,
67538+ return __bitmap_parselist((const char __force_kernel *)ubuf,
67539 ulen, 1, maskp, nmaskbits);
67540 }
67541 EXPORT_SYMBOL(bitmap_parselist_user);
67542diff --git a/lib/bug.c b/lib/bug.c
67543index 1955209..cbbb2ad 100644
67544--- a/lib/bug.c
67545+++ b/lib/bug.c
67546@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
67547 return BUG_TRAP_TYPE_NONE;
67548
67549 bug = find_bug(bugaddr);
67550+ if (!bug)
67551+ return BUG_TRAP_TYPE_NONE;
67552
67553 file = NULL;
67554 line = 0;
67555diff --git a/lib/debugobjects.c b/lib/debugobjects.c
67556index a78b7c6..2c73084 100644
67557--- a/lib/debugobjects.c
67558+++ b/lib/debugobjects.c
67559@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
67560 if (limit > 4)
67561 return;
67562
67563- is_on_stack = object_is_on_stack(addr);
67564+ is_on_stack = object_starts_on_stack(addr);
67565 if (is_on_stack == onstack)
67566 return;
67567
67568diff --git a/lib/devres.c b/lib/devres.c
67569index 7c0e953..f642b5c 100644
67570--- a/lib/devres.c
67571+++ b/lib/devres.c
67572@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
67573 void devm_iounmap(struct device *dev, void __iomem *addr)
67574 {
67575 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
67576- (void *)addr));
67577+ (void __force *)addr));
67578 iounmap(addr);
67579 }
67580 EXPORT_SYMBOL(devm_iounmap);
67581@@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
67582 {
67583 ioport_unmap(addr);
67584 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
67585- devm_ioport_map_match, (void *)addr));
67586+ devm_ioport_map_match, (void __force *)addr));
67587 }
67588 EXPORT_SYMBOL(devm_ioport_unmap);
67589
67590diff --git a/lib/dma-debug.c b/lib/dma-debug.c
67591index fea790a..ebb0e82 100644
67592--- a/lib/dma-debug.c
67593+++ b/lib/dma-debug.c
67594@@ -925,7 +925,7 @@ out:
67595
67596 static void check_for_stack(struct device *dev, void *addr)
67597 {
67598- if (object_is_on_stack(addr))
67599+ if (object_starts_on_stack(addr))
67600 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
67601 "stack [addr=%p]\n", addr);
67602 }
67603diff --git a/lib/extable.c b/lib/extable.c
67604index 4cac81e..63e9b8f 100644
67605--- a/lib/extable.c
67606+++ b/lib/extable.c
67607@@ -13,6 +13,7 @@
67608 #include <linux/init.h>
67609 #include <linux/sort.h>
67610 #include <asm/uaccess.h>
67611+#include <asm/pgtable.h>
67612
67613 #ifndef ARCH_HAS_SORT_EXTABLE
67614 /*
67615@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
67616 void sort_extable(struct exception_table_entry *start,
67617 struct exception_table_entry *finish)
67618 {
67619+ pax_open_kernel();
67620 sort(start, finish - start, sizeof(struct exception_table_entry),
67621 cmp_ex, NULL);
67622+ pax_close_kernel();
67623 }
67624
67625 #ifdef CONFIG_MODULES
67626diff --git a/lib/inflate.c b/lib/inflate.c
67627index 013a761..c28f3fc 100644
67628--- a/lib/inflate.c
67629+++ b/lib/inflate.c
67630@@ -269,7 +269,7 @@ static void free(void *where)
67631 malloc_ptr = free_mem_ptr;
67632 }
67633 #else
67634-#define malloc(a) kmalloc(a, GFP_KERNEL)
67635+#define malloc(a) kmalloc((a), GFP_KERNEL)
67636 #define free(a) kfree(a)
67637 #endif
67638
67639diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
67640index bd2bea9..6b3c95e 100644
67641--- a/lib/is_single_threaded.c
67642+++ b/lib/is_single_threaded.c
67643@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
67644 struct task_struct *p, *t;
67645 bool ret;
67646
67647+ if (!mm)
67648+ return true;
67649+
67650 if (atomic_read(&task->signal->live) != 1)
67651 return false;
67652
67653diff --git a/lib/kref.c b/lib/kref.c
67654index 3efb882..8492f4c 100644
67655--- a/lib/kref.c
67656+++ b/lib/kref.c
67657@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
67658 */
67659 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
67660 {
67661- WARN_ON(release == NULL);
67662+ BUG_ON(release == NULL);
67663 WARN_ON(release == (void (*)(struct kref *))kfree);
67664
67665 if (atomic_dec_and_test(&kref->refcount)) {
67666diff --git a/lib/radix-tree.c b/lib/radix-tree.c
67667index d9df745..e73c2fe 100644
67668--- a/lib/radix-tree.c
67669+++ b/lib/radix-tree.c
67670@@ -80,7 +80,7 @@ struct radix_tree_preload {
67671 int nr;
67672 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
67673 };
67674-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
67675+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
67676
67677 static inline void *ptr_to_indirect(void *ptr)
67678 {
67679diff --git a/lib/vsprintf.c b/lib/vsprintf.c
67680index 993599e..84dc70e 100644
67681--- a/lib/vsprintf.c
67682+++ b/lib/vsprintf.c
67683@@ -16,6 +16,9 @@
67684 * - scnprintf and vscnprintf
67685 */
67686
67687+#ifdef CONFIG_GRKERNSEC_HIDESYM
67688+#define __INCLUDED_BY_HIDESYM 1
67689+#endif
67690 #include <stdarg.h>
67691 #include <linux/module.h>
67692 #include <linux/types.h>
67693@@ -413,7 +416,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
67694 char sym[KSYM_SYMBOL_LEN];
67695 if (ext == 'B')
67696 sprint_backtrace(sym, value);
67697- else if (ext != 'f' && ext != 's')
67698+ else if (ext != 'f' && ext != 's' && ext != 'a')
67699 sprint_symbol(sym, value);
67700 else
67701 kallsyms_lookup(value, NULL, NULL, NULL, sym);
67702@@ -777,7 +780,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
67703 return string(buf, end, uuid, spec);
67704 }
67705
67706+#ifdef CONFIG_GRKERNSEC_HIDESYM
67707+int kptr_restrict __read_mostly = 2;
67708+#else
67709 int kptr_restrict __read_mostly;
67710+#endif
67711
67712 /*
67713 * Show a '%p' thing. A kernel extension is that the '%p' is followed
67714@@ -791,6 +798,8 @@ int kptr_restrict __read_mostly;
67715 * - 'S' For symbolic direct pointers with offset
67716 * - 's' For symbolic direct pointers without offset
67717 * - 'B' For backtraced symbolic direct pointers with offset
67718+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
67719+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
67720 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
67721 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
67722 * - 'M' For a 6-byte MAC address, it prints the address in the
67723@@ -835,12 +844,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
67724 {
67725 if (!ptr && *fmt != 'K') {
67726 /*
67727- * Print (null) with the same width as a pointer so it makes
67728+ * Print (nil) with the same width as a pointer so it makes
67729 * tabular output look nice.
67730 */
67731 if (spec.field_width == -1)
67732 spec.field_width = 2 * sizeof(void *);
67733- return string(buf, end, "(null)", spec);
67734+ return string(buf, end, "(nil)", spec);
67735 }
67736
67737 switch (*fmt) {
67738@@ -850,6 +859,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
67739 /* Fallthrough */
67740 case 'S':
67741 case 's':
67742+#ifdef CONFIG_GRKERNSEC_HIDESYM
67743+ break;
67744+#else
67745+ return symbol_string(buf, end, ptr, spec, *fmt);
67746+#endif
67747+ case 'A':
67748+ case 'a':
67749 case 'B':
67750 return symbol_string(buf, end, ptr, spec, *fmt);
67751 case 'R':
67752@@ -1608,11 +1624,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67753 typeof(type) value; \
67754 if (sizeof(type) == 8) { \
67755 args = PTR_ALIGN(args, sizeof(u32)); \
67756- *(u32 *)&value = *(u32 *)args; \
67757- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
67758+ *(u32 *)&value = *(const u32 *)args; \
67759+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
67760 } else { \
67761 args = PTR_ALIGN(args, sizeof(type)); \
67762- value = *(typeof(type) *)args; \
67763+ value = *(const typeof(type) *)args; \
67764 } \
67765 args += sizeof(type); \
67766 value; \
67767@@ -1675,7 +1691,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67768 case FORMAT_TYPE_STR: {
67769 const char *str_arg = args;
67770 args += strlen(str_arg) + 1;
67771- str = string(str, end, (char *)str_arg, spec);
67772+ str = string(str, end, str_arg, spec);
67773 break;
67774 }
67775
67776diff --git a/localversion-grsec b/localversion-grsec
67777new file mode 100644
67778index 0000000..7cd6065
67779--- /dev/null
67780+++ b/localversion-grsec
67781@@ -0,0 +1 @@
67782+-grsec
67783diff --git a/mm/Kconfig b/mm/Kconfig
67784index 011b110..b492af2 100644
67785--- a/mm/Kconfig
67786+++ b/mm/Kconfig
67787@@ -241,10 +241,10 @@ config KSM
67788 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
67789
67790 config DEFAULT_MMAP_MIN_ADDR
67791- int "Low address space to protect from user allocation"
67792+ int "Low address space to protect from user allocation"
67793 depends on MMU
67794- default 4096
67795- help
67796+ default 65536
67797+ help
67798 This is the portion of low virtual memory which should be protected
67799 from userspace allocation. Keeping a user from writing to low pages
67800 can help reduce the impact of kernel NULL pointer bugs.
67801diff --git a/mm/filemap.c b/mm/filemap.c
67802index 03c5b0e..a01e793 100644
67803--- a/mm/filemap.c
67804+++ b/mm/filemap.c
67805@@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
67806 struct address_space *mapping = file->f_mapping;
67807
67808 if (!mapping->a_ops->readpage)
67809- return -ENOEXEC;
67810+ return -ENODEV;
67811 file_accessed(file);
67812 vma->vm_ops = &generic_file_vm_ops;
67813 vma->vm_flags |= VM_CAN_NONLINEAR;
67814@@ -2176,6 +2176,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
67815 *pos = i_size_read(inode);
67816
67817 if (limit != RLIM_INFINITY) {
67818+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67819 if (*pos >= limit) {
67820 send_sig(SIGXFSZ, current, 0);
67821 return -EFBIG;
67822diff --git a/mm/fremap.c b/mm/fremap.c
67823index 9ed4fd4..c42648d 100644
67824--- a/mm/fremap.c
67825+++ b/mm/fremap.c
67826@@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
67827 retry:
67828 vma = find_vma(mm, start);
67829
67830+#ifdef CONFIG_PAX_SEGMEXEC
67831+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67832+ goto out;
67833+#endif
67834+
67835 /*
67836 * Make sure the vma is shared, that it supports prefaulting,
67837 * and that the remapped range is valid and fully within
67838diff --git a/mm/highmem.c b/mm/highmem.c
67839index 57d82c6..e9e0552 100644
67840--- a/mm/highmem.c
67841+++ b/mm/highmem.c
67842@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
67843 * So no dangers, even with speculative execution.
67844 */
67845 page = pte_page(pkmap_page_table[i]);
67846+ pax_open_kernel();
67847 pte_clear(&init_mm, (unsigned long)page_address(page),
67848 &pkmap_page_table[i]);
67849-
67850+ pax_close_kernel();
67851 set_page_address(page, NULL);
67852 need_flush = 1;
67853 }
67854@@ -186,9 +187,11 @@ start:
67855 }
67856 }
67857 vaddr = PKMAP_ADDR(last_pkmap_nr);
67858+
67859+ pax_open_kernel();
67860 set_pte_at(&init_mm, vaddr,
67861 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67862-
67863+ pax_close_kernel();
67864 pkmap_count[last_pkmap_nr] = 1;
67865 set_page_address(page, (void *)vaddr);
67866
67867diff --git a/mm/huge_memory.c b/mm/huge_memory.c
67868index 33141f5..e56bef9 100644
67869--- a/mm/huge_memory.c
67870+++ b/mm/huge_memory.c
67871@@ -703,7 +703,7 @@ out:
67872 * run pte_offset_map on the pmd, if an huge pmd could
67873 * materialize from under us from a different thread.
67874 */
67875- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
67876+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
67877 return VM_FAULT_OOM;
67878 /* if an huge pmd materialized from under us just retry later */
67879 if (unlikely(pmd_trans_huge(*pmd)))
67880diff --git a/mm/hugetlb.c b/mm/hugetlb.c
67881index 2316840..b418671 100644
67882--- a/mm/hugetlb.c
67883+++ b/mm/hugetlb.c
67884@@ -2347,6 +2347,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
67885 return 1;
67886 }
67887
67888+#ifdef CONFIG_PAX_SEGMEXEC
67889+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67890+{
67891+ struct mm_struct *mm = vma->vm_mm;
67892+ struct vm_area_struct *vma_m;
67893+ unsigned long address_m;
67894+ pte_t *ptep_m;
67895+
67896+ vma_m = pax_find_mirror_vma(vma);
67897+ if (!vma_m)
67898+ return;
67899+
67900+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67901+ address_m = address + SEGMEXEC_TASK_SIZE;
67902+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67903+ get_page(page_m);
67904+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
67905+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67906+}
67907+#endif
67908+
67909 /*
67910 * Hugetlb_cow() should be called with page lock of the original hugepage held.
67911 */
67912@@ -2450,6 +2471,11 @@ retry_avoidcopy:
67913 make_huge_pte(vma, new_page, 1));
67914 page_remove_rmap(old_page);
67915 hugepage_add_new_anon_rmap(new_page, vma, address);
67916+
67917+#ifdef CONFIG_PAX_SEGMEXEC
67918+ pax_mirror_huge_pte(vma, address, new_page);
67919+#endif
67920+
67921 /* Make the old page be freed below */
67922 new_page = old_page;
67923 mmu_notifier_invalidate_range_end(mm,
67924@@ -2601,6 +2627,10 @@ retry:
67925 && (vma->vm_flags & VM_SHARED)));
67926 set_huge_pte_at(mm, address, ptep, new_pte);
67927
67928+#ifdef CONFIG_PAX_SEGMEXEC
67929+ pax_mirror_huge_pte(vma, address, page);
67930+#endif
67931+
67932 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67933 /* Optimization, do the COW without a second fault */
67934 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67935@@ -2630,6 +2660,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67936 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67937 struct hstate *h = hstate_vma(vma);
67938
67939+#ifdef CONFIG_PAX_SEGMEXEC
67940+ struct vm_area_struct *vma_m;
67941+#endif
67942+
67943 ptep = huge_pte_offset(mm, address);
67944 if (ptep) {
67945 entry = huge_ptep_get(ptep);
67946@@ -2641,6 +2675,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67947 VM_FAULT_SET_HINDEX(h - hstates);
67948 }
67949
67950+#ifdef CONFIG_PAX_SEGMEXEC
67951+ vma_m = pax_find_mirror_vma(vma);
67952+ if (vma_m) {
67953+ unsigned long address_m;
67954+
67955+ if (vma->vm_start > vma_m->vm_start) {
67956+ address_m = address;
67957+ address -= SEGMEXEC_TASK_SIZE;
67958+ vma = vma_m;
67959+ h = hstate_vma(vma);
67960+ } else
67961+ address_m = address + SEGMEXEC_TASK_SIZE;
67962+
67963+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67964+ return VM_FAULT_OOM;
67965+ address_m &= HPAGE_MASK;
67966+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67967+ }
67968+#endif
67969+
67970 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67971 if (!ptep)
67972 return VM_FAULT_OOM;
67973diff --git a/mm/internal.h b/mm/internal.h
67974index 2189af4..f2ca332 100644
67975--- a/mm/internal.h
67976+++ b/mm/internal.h
67977@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
67978 * in mm/page_alloc.c
67979 */
67980 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67981+extern void free_compound_page(struct page *page);
67982 extern void prep_compound_page(struct page *page, unsigned long order);
67983 #ifdef CONFIG_MEMORY_FAILURE
67984 extern bool is_free_buddy_page(struct page *page);
67985diff --git a/mm/kmemleak.c b/mm/kmemleak.c
67986index f3b2a00..61da94d 100644
67987--- a/mm/kmemleak.c
67988+++ b/mm/kmemleak.c
67989@@ -357,7 +357,7 @@ static void print_unreferenced(struct seq_file *seq,
67990
67991 for (i = 0; i < object->trace_len; i++) {
67992 void *ptr = (void *)object->trace[i];
67993- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67994+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67995 }
67996 }
67997
67998diff --git a/mm/maccess.c b/mm/maccess.c
67999index d53adf9..03a24bf 100644
68000--- a/mm/maccess.c
68001+++ b/mm/maccess.c
68002@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
68003 set_fs(KERNEL_DS);
68004 pagefault_disable();
68005 ret = __copy_from_user_inatomic(dst,
68006- (__force const void __user *)src, size);
68007+ (const void __force_user *)src, size);
68008 pagefault_enable();
68009 set_fs(old_fs);
68010
68011@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
68012
68013 set_fs(KERNEL_DS);
68014 pagefault_disable();
68015- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
68016+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
68017 pagefault_enable();
68018 set_fs(old_fs);
68019
68020diff --git a/mm/madvise.c b/mm/madvise.c
68021index 74bf193..feb6fd3 100644
68022--- a/mm/madvise.c
68023+++ b/mm/madvise.c
68024@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
68025 pgoff_t pgoff;
68026 unsigned long new_flags = vma->vm_flags;
68027
68028+#ifdef CONFIG_PAX_SEGMEXEC
68029+ struct vm_area_struct *vma_m;
68030+#endif
68031+
68032 switch (behavior) {
68033 case MADV_NORMAL:
68034 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
68035@@ -110,6 +114,13 @@ success:
68036 /*
68037 * vm_flags is protected by the mmap_sem held in write mode.
68038 */
68039+
68040+#ifdef CONFIG_PAX_SEGMEXEC
68041+ vma_m = pax_find_mirror_vma(vma);
68042+ if (vma_m)
68043+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
68044+#endif
68045+
68046 vma->vm_flags = new_flags;
68047
68048 out:
68049@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
68050 struct vm_area_struct ** prev,
68051 unsigned long start, unsigned long end)
68052 {
68053+
68054+#ifdef CONFIG_PAX_SEGMEXEC
68055+ struct vm_area_struct *vma_m;
68056+#endif
68057+
68058 *prev = vma;
68059 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
68060 return -EINVAL;
68061@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
68062 zap_page_range(vma, start, end - start, &details);
68063 } else
68064 zap_page_range(vma, start, end - start, NULL);
68065+
68066+#ifdef CONFIG_PAX_SEGMEXEC
68067+ vma_m = pax_find_mirror_vma(vma);
68068+ if (vma_m) {
68069+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
68070+ struct zap_details details = {
68071+ .nonlinear_vma = vma_m,
68072+ .last_index = ULONG_MAX,
68073+ };
68074+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
68075+ } else
68076+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
68077+ }
68078+#endif
68079+
68080 return 0;
68081 }
68082
68083@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
68084 if (end < start)
68085 goto out;
68086
68087+#ifdef CONFIG_PAX_SEGMEXEC
68088+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
68089+ if (end > SEGMEXEC_TASK_SIZE)
68090+ goto out;
68091+ } else
68092+#endif
68093+
68094+ if (end > TASK_SIZE)
68095+ goto out;
68096+
68097 error = 0;
68098 if (end == start)
68099 goto out;
68100diff --git a/mm/memory-failure.c b/mm/memory-failure.c
68101index 06d3479..0778eef 100644
68102--- a/mm/memory-failure.c
68103+++ b/mm/memory-failure.c
68104@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
68105
68106 int sysctl_memory_failure_recovery __read_mostly = 1;
68107
68108-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
68109+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
68110
68111 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
68112
68113@@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
68114 si.si_signo = SIGBUS;
68115 si.si_errno = 0;
68116 si.si_code = BUS_MCEERR_AO;
68117- si.si_addr = (void *)addr;
68118+ si.si_addr = (void __user *)addr;
68119 #ifdef __ARCH_SI_TRAPNO
68120 si.si_trapno = trapno;
68121 #endif
68122@@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
68123 }
68124
68125 nr_pages = 1 << compound_trans_order(hpage);
68126- atomic_long_add(nr_pages, &mce_bad_pages);
68127+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
68128
68129 /*
68130 * We need/can do nothing about count=0 pages.
68131@@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
68132 if (!PageHWPoison(hpage)
68133 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
68134 || (p != hpage && TestSetPageHWPoison(hpage))) {
68135- atomic_long_sub(nr_pages, &mce_bad_pages);
68136+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68137 return 0;
68138 }
68139 set_page_hwpoison_huge_page(hpage);
68140@@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
68141 }
68142 if (hwpoison_filter(p)) {
68143 if (TestClearPageHWPoison(p))
68144- atomic_long_sub(nr_pages, &mce_bad_pages);
68145+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68146 unlock_page(hpage);
68147 put_page(hpage);
68148 return 0;
68149@@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn)
68150 return 0;
68151 }
68152 if (TestClearPageHWPoison(p))
68153- atomic_long_sub(nr_pages, &mce_bad_pages);
68154+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68155 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
68156 return 0;
68157 }
68158@@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn)
68159 */
68160 if (TestClearPageHWPoison(page)) {
68161 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
68162- atomic_long_sub(nr_pages, &mce_bad_pages);
68163+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68164 freeit = 1;
68165 if (PageHuge(page))
68166 clear_page_hwpoison_huge_page(page);
68167@@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
68168 }
68169 done:
68170 if (!PageHWPoison(hpage))
68171- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
68172+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
68173 set_page_hwpoison_huge_page(hpage);
68174 dequeue_hwpoisoned_huge_page(hpage);
68175 /* keep elevated page count for bad page */
68176@@ -1573,7 +1573,7 @@ int soft_offline_page(struct page *page, int flags)
68177 return ret;
68178
68179 done:
68180- atomic_long_add(1, &mce_bad_pages);
68181+ atomic_long_add_unchecked(1, &mce_bad_pages);
68182 SetPageHWPoison(page);
68183 /* keep elevated page count for bad page */
68184 return ret;
68185diff --git a/mm/memory.c b/mm/memory.c
68186index 829d437..3d3926a 100644
68187--- a/mm/memory.c
68188+++ b/mm/memory.c
68189@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
68190 return;
68191
68192 pmd = pmd_offset(pud, start);
68193+
68194+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
68195 pud_clear(pud);
68196 pmd_free_tlb(tlb, pmd, start);
68197+#endif
68198+
68199 }
68200
68201 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
68202@@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
68203 if (end - 1 > ceiling - 1)
68204 return;
68205
68206+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
68207 pud = pud_offset(pgd, start);
68208 pgd_clear(pgd);
68209 pud_free_tlb(tlb, pud, start);
68210+#endif
68211+
68212 }
68213
68214 /*
68215@@ -1566,12 +1573,6 @@ no_page_table:
68216 return page;
68217 }
68218
68219-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
68220-{
68221- return stack_guard_page_start(vma, addr) ||
68222- stack_guard_page_end(vma, addr+PAGE_SIZE);
68223-}
68224-
68225 /**
68226 * __get_user_pages() - pin user pages in memory
68227 * @tsk: task_struct of target task
68228@@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
68229 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
68230 i = 0;
68231
68232- do {
68233+ while (nr_pages) {
68234 struct vm_area_struct *vma;
68235
68236- vma = find_extend_vma(mm, start);
68237+ vma = find_vma(mm, start);
68238 if (!vma && in_gate_area(mm, start)) {
68239 unsigned long pg = start & PAGE_MASK;
68240 pgd_t *pgd;
68241@@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
68242 goto next_page;
68243 }
68244
68245- if (!vma ||
68246+ if (!vma || start < vma->vm_start ||
68247 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
68248 !(vm_flags & vma->vm_flags))
68249 return i ? : -EFAULT;
68250@@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
68251 int ret;
68252 unsigned int fault_flags = 0;
68253
68254- /* For mlock, just skip the stack guard page. */
68255- if (foll_flags & FOLL_MLOCK) {
68256- if (stack_guard_page(vma, start))
68257- goto next_page;
68258- }
68259 if (foll_flags & FOLL_WRITE)
68260 fault_flags |= FAULT_FLAG_WRITE;
68261 if (nonblocking)
68262@@ -1800,7 +1796,7 @@ next_page:
68263 start += PAGE_SIZE;
68264 nr_pages--;
68265 } while (nr_pages && start < vma->vm_end);
68266- } while (nr_pages);
68267+ }
68268 return i;
68269 }
68270 EXPORT_SYMBOL(__get_user_pages);
68271@@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
68272 page_add_file_rmap(page);
68273 set_pte_at(mm, addr, pte, mk_pte(page, prot));
68274
68275+#ifdef CONFIG_PAX_SEGMEXEC
68276+ pax_mirror_file_pte(vma, addr, page, ptl);
68277+#endif
68278+
68279 retval = 0;
68280 pte_unmap_unlock(pte, ptl);
68281 return retval;
68282@@ -2041,10 +2041,22 @@ out:
68283 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
68284 struct page *page)
68285 {
68286+
68287+#ifdef CONFIG_PAX_SEGMEXEC
68288+ struct vm_area_struct *vma_m;
68289+#endif
68290+
68291 if (addr < vma->vm_start || addr >= vma->vm_end)
68292 return -EFAULT;
68293 if (!page_count(page))
68294 return -EINVAL;
68295+
68296+#ifdef CONFIG_PAX_SEGMEXEC
68297+ vma_m = pax_find_mirror_vma(vma);
68298+ if (vma_m)
68299+ vma_m->vm_flags |= VM_INSERTPAGE;
68300+#endif
68301+
68302 vma->vm_flags |= VM_INSERTPAGE;
68303 return insert_page(vma, addr, page, vma->vm_page_prot);
68304 }
68305@@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
68306 unsigned long pfn)
68307 {
68308 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
68309+ BUG_ON(vma->vm_mirror);
68310
68311 if (addr < vma->vm_start || addr >= vma->vm_end)
68312 return -EFAULT;
68313@@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
68314 copy_user_highpage(dst, src, va, vma);
68315 }
68316
68317+#ifdef CONFIG_PAX_SEGMEXEC
68318+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
68319+{
68320+ struct mm_struct *mm = vma->vm_mm;
68321+ spinlock_t *ptl;
68322+ pte_t *pte, entry;
68323+
68324+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
68325+ entry = *pte;
68326+ if (!pte_present(entry)) {
68327+ if (!pte_none(entry)) {
68328+ BUG_ON(pte_file(entry));
68329+ free_swap_and_cache(pte_to_swp_entry(entry));
68330+ pte_clear_not_present_full(mm, address, pte, 0);
68331+ }
68332+ } else {
68333+ struct page *page;
68334+
68335+ flush_cache_page(vma, address, pte_pfn(entry));
68336+ entry = ptep_clear_flush(vma, address, pte);
68337+ BUG_ON(pte_dirty(entry));
68338+ page = vm_normal_page(vma, address, entry);
68339+ if (page) {
68340+ update_hiwater_rss(mm);
68341+ if (PageAnon(page))
68342+ dec_mm_counter_fast(mm, MM_ANONPAGES);
68343+ else
68344+ dec_mm_counter_fast(mm, MM_FILEPAGES);
68345+ page_remove_rmap(page);
68346+ page_cache_release(page);
68347+ }
68348+ }
68349+ pte_unmap_unlock(pte, ptl);
68350+}
68351+
68352+/* PaX: if vma is mirrored, synchronize the mirror's PTE
68353+ *
68354+ * the ptl of the lower mapped page is held on entry and is not released on exit
68355+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
68356+ */
68357+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68358+{
68359+ struct mm_struct *mm = vma->vm_mm;
68360+ unsigned long address_m;
68361+ spinlock_t *ptl_m;
68362+ struct vm_area_struct *vma_m;
68363+ pmd_t *pmd_m;
68364+ pte_t *pte_m, entry_m;
68365+
68366+ BUG_ON(!page_m || !PageAnon(page_m));
68367+
68368+ vma_m = pax_find_mirror_vma(vma);
68369+ if (!vma_m)
68370+ return;
68371+
68372+ BUG_ON(!PageLocked(page_m));
68373+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68374+ address_m = address + SEGMEXEC_TASK_SIZE;
68375+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68376+ pte_m = pte_offset_map(pmd_m, address_m);
68377+ ptl_m = pte_lockptr(mm, pmd_m);
68378+ if (ptl != ptl_m) {
68379+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68380+ if (!pte_none(*pte_m))
68381+ goto out;
68382+ }
68383+
68384+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68385+ page_cache_get(page_m);
68386+ page_add_anon_rmap(page_m, vma_m, address_m);
68387+ inc_mm_counter_fast(mm, MM_ANONPAGES);
68388+ set_pte_at(mm, address_m, pte_m, entry_m);
68389+ update_mmu_cache(vma_m, address_m, entry_m);
68390+out:
68391+ if (ptl != ptl_m)
68392+ spin_unlock(ptl_m);
68393+ pte_unmap(pte_m);
68394+ unlock_page(page_m);
68395+}
68396+
68397+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68398+{
68399+ struct mm_struct *mm = vma->vm_mm;
68400+ unsigned long address_m;
68401+ spinlock_t *ptl_m;
68402+ struct vm_area_struct *vma_m;
68403+ pmd_t *pmd_m;
68404+ pte_t *pte_m, entry_m;
68405+
68406+ BUG_ON(!page_m || PageAnon(page_m));
68407+
68408+ vma_m = pax_find_mirror_vma(vma);
68409+ if (!vma_m)
68410+ return;
68411+
68412+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68413+ address_m = address + SEGMEXEC_TASK_SIZE;
68414+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68415+ pte_m = pte_offset_map(pmd_m, address_m);
68416+ ptl_m = pte_lockptr(mm, pmd_m);
68417+ if (ptl != ptl_m) {
68418+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68419+ if (!pte_none(*pte_m))
68420+ goto out;
68421+ }
68422+
68423+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68424+ page_cache_get(page_m);
68425+ page_add_file_rmap(page_m);
68426+ inc_mm_counter_fast(mm, MM_FILEPAGES);
68427+ set_pte_at(mm, address_m, pte_m, entry_m);
68428+ update_mmu_cache(vma_m, address_m, entry_m);
68429+out:
68430+ if (ptl != ptl_m)
68431+ spin_unlock(ptl_m);
68432+ pte_unmap(pte_m);
68433+}
68434+
68435+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
68436+{
68437+ struct mm_struct *mm = vma->vm_mm;
68438+ unsigned long address_m;
68439+ spinlock_t *ptl_m;
68440+ struct vm_area_struct *vma_m;
68441+ pmd_t *pmd_m;
68442+ pte_t *pte_m, entry_m;
68443+
68444+ vma_m = pax_find_mirror_vma(vma);
68445+ if (!vma_m)
68446+ return;
68447+
68448+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68449+ address_m = address + SEGMEXEC_TASK_SIZE;
68450+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68451+ pte_m = pte_offset_map(pmd_m, address_m);
68452+ ptl_m = pte_lockptr(mm, pmd_m);
68453+ if (ptl != ptl_m) {
68454+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68455+ if (!pte_none(*pte_m))
68456+ goto out;
68457+ }
68458+
68459+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
68460+ set_pte_at(mm, address_m, pte_m, entry_m);
68461+out:
68462+ if (ptl != ptl_m)
68463+ spin_unlock(ptl_m);
68464+ pte_unmap(pte_m);
68465+}
68466+
68467+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
68468+{
68469+ struct page *page_m;
68470+ pte_t entry;
68471+
68472+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
68473+ goto out;
68474+
68475+ entry = *pte;
68476+ page_m = vm_normal_page(vma, address, entry);
68477+ if (!page_m)
68478+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
68479+ else if (PageAnon(page_m)) {
68480+ if (pax_find_mirror_vma(vma)) {
68481+ pte_unmap_unlock(pte, ptl);
68482+ lock_page(page_m);
68483+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
68484+ if (pte_same(entry, *pte))
68485+ pax_mirror_anon_pte(vma, address, page_m, ptl);
68486+ else
68487+ unlock_page(page_m);
68488+ }
68489+ } else
68490+ pax_mirror_file_pte(vma, address, page_m, ptl);
68491+
68492+out:
68493+ pte_unmap_unlock(pte, ptl);
68494+}
68495+#endif
68496+
68497 /*
68498 * This routine handles present pages, when users try to write
68499 * to a shared page. It is done by copying the page to a new address
68500@@ -2656,6 +2849,12 @@ gotten:
68501 */
68502 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68503 if (likely(pte_same(*page_table, orig_pte))) {
68504+
68505+#ifdef CONFIG_PAX_SEGMEXEC
68506+ if (pax_find_mirror_vma(vma))
68507+ BUG_ON(!trylock_page(new_page));
68508+#endif
68509+
68510 if (old_page) {
68511 if (!PageAnon(old_page)) {
68512 dec_mm_counter_fast(mm, MM_FILEPAGES);
68513@@ -2707,6 +2906,10 @@ gotten:
68514 page_remove_rmap(old_page);
68515 }
68516
68517+#ifdef CONFIG_PAX_SEGMEXEC
68518+ pax_mirror_anon_pte(vma, address, new_page, ptl);
68519+#endif
68520+
68521 /* Free the old page.. */
68522 new_page = old_page;
68523 ret |= VM_FAULT_WRITE;
68524@@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
68525 swap_free(entry);
68526 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
68527 try_to_free_swap(page);
68528+
68529+#ifdef CONFIG_PAX_SEGMEXEC
68530+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
68531+#endif
68532+
68533 unlock_page(page);
68534 if (swapcache) {
68535 /*
68536@@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
68537
68538 /* No need to invalidate - it was non-present before */
68539 update_mmu_cache(vma, address, page_table);
68540+
68541+#ifdef CONFIG_PAX_SEGMEXEC
68542+ pax_mirror_anon_pte(vma, address, page, ptl);
68543+#endif
68544+
68545 unlock:
68546 pte_unmap_unlock(page_table, ptl);
68547 out:
68548@@ -3028,40 +3241,6 @@ out_release:
68549 }
68550
68551 /*
68552- * This is like a special single-page "expand_{down|up}wards()",
68553- * except we must first make sure that 'address{-|+}PAGE_SIZE'
68554- * doesn't hit another vma.
68555- */
68556-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
68557-{
68558- address &= PAGE_MASK;
68559- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
68560- struct vm_area_struct *prev = vma->vm_prev;
68561-
68562- /*
68563- * Is there a mapping abutting this one below?
68564- *
68565- * That's only ok if it's the same stack mapping
68566- * that has gotten split..
68567- */
68568- if (prev && prev->vm_end == address)
68569- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
68570-
68571- expand_downwards(vma, address - PAGE_SIZE);
68572- }
68573- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
68574- struct vm_area_struct *next = vma->vm_next;
68575-
68576- /* As VM_GROWSDOWN but s/below/above/ */
68577- if (next && next->vm_start == address + PAGE_SIZE)
68578- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
68579-
68580- expand_upwards(vma, address + PAGE_SIZE);
68581- }
68582- return 0;
68583-}
68584-
68585-/*
68586 * We enter with non-exclusive mmap_sem (to exclude vma changes,
68587 * but allow concurrent faults), and pte mapped but not yet locked.
68588 * We return with mmap_sem still held, but pte unmapped and unlocked.
68589@@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
68590 unsigned long address, pte_t *page_table, pmd_t *pmd,
68591 unsigned int flags)
68592 {
68593- struct page *page;
68594+ struct page *page = NULL;
68595 spinlock_t *ptl;
68596 pte_t entry;
68597
68598- pte_unmap(page_table);
68599-
68600- /* Check if we need to add a guard page to the stack */
68601- if (check_stack_guard_page(vma, address) < 0)
68602- return VM_FAULT_SIGBUS;
68603-
68604- /* Use the zero-page for reads */
68605 if (!(flags & FAULT_FLAG_WRITE)) {
68606 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
68607 vma->vm_page_prot));
68608- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68609+ ptl = pte_lockptr(mm, pmd);
68610+ spin_lock(ptl);
68611 if (!pte_none(*page_table))
68612 goto unlock;
68613 goto setpte;
68614 }
68615
68616 /* Allocate our own private page. */
68617+ pte_unmap(page_table);
68618+
68619 if (unlikely(anon_vma_prepare(vma)))
68620 goto oom;
68621 page = alloc_zeroed_user_highpage_movable(vma, address);
68622@@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
68623 if (!pte_none(*page_table))
68624 goto release;
68625
68626+#ifdef CONFIG_PAX_SEGMEXEC
68627+ if (pax_find_mirror_vma(vma))
68628+ BUG_ON(!trylock_page(page));
68629+#endif
68630+
68631 inc_mm_counter_fast(mm, MM_ANONPAGES);
68632 page_add_new_anon_rmap(page, vma, address);
68633 setpte:
68634@@ -3116,6 +3296,12 @@ setpte:
68635
68636 /* No need to invalidate - it was non-present before */
68637 update_mmu_cache(vma, address, page_table);
68638+
68639+#ifdef CONFIG_PAX_SEGMEXEC
68640+ if (page)
68641+ pax_mirror_anon_pte(vma, address, page, ptl);
68642+#endif
68643+
68644 unlock:
68645 pte_unmap_unlock(page_table, ptl);
68646 return 0;
68647@@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68648 */
68649 /* Only go through if we didn't race with anybody else... */
68650 if (likely(pte_same(*page_table, orig_pte))) {
68651+
68652+#ifdef CONFIG_PAX_SEGMEXEC
68653+ if (anon && pax_find_mirror_vma(vma))
68654+ BUG_ON(!trylock_page(page));
68655+#endif
68656+
68657 flush_icache_page(vma, page);
68658 entry = mk_pte(page, vma->vm_page_prot);
68659 if (flags & FAULT_FLAG_WRITE)
68660@@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68661
68662 /* no need to invalidate: a not-present page won't be cached */
68663 update_mmu_cache(vma, address, page_table);
68664+
68665+#ifdef CONFIG_PAX_SEGMEXEC
68666+ if (anon)
68667+ pax_mirror_anon_pte(vma, address, page, ptl);
68668+ else
68669+ pax_mirror_file_pte(vma, address, page, ptl);
68670+#endif
68671+
68672 } else {
68673 if (cow_page)
68674 mem_cgroup_uncharge_page(cow_page);
68675@@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *mm,
68676 if (flags & FAULT_FLAG_WRITE)
68677 flush_tlb_fix_spurious_fault(vma, address);
68678 }
68679+
68680+#ifdef CONFIG_PAX_SEGMEXEC
68681+ pax_mirror_pte(vma, address, pte, pmd, ptl);
68682+ return 0;
68683+#endif
68684+
68685 unlock:
68686 pte_unmap_unlock(pte, ptl);
68687 return 0;
68688@@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68689 pmd_t *pmd;
68690 pte_t *pte;
68691
68692+#ifdef CONFIG_PAX_SEGMEXEC
68693+ struct vm_area_struct *vma_m;
68694+#endif
68695+
68696 __set_current_state(TASK_RUNNING);
68697
68698 count_vm_event(PGFAULT);
68699@@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68700 if (unlikely(is_vm_hugetlb_page(vma)))
68701 return hugetlb_fault(mm, vma, address, flags);
68702
68703+#ifdef CONFIG_PAX_SEGMEXEC
68704+ vma_m = pax_find_mirror_vma(vma);
68705+ if (vma_m) {
68706+ unsigned long address_m;
68707+ pgd_t *pgd_m;
68708+ pud_t *pud_m;
68709+ pmd_t *pmd_m;
68710+
68711+ if (vma->vm_start > vma_m->vm_start) {
68712+ address_m = address;
68713+ address -= SEGMEXEC_TASK_SIZE;
68714+ vma = vma_m;
68715+ } else
68716+ address_m = address + SEGMEXEC_TASK_SIZE;
68717+
68718+ pgd_m = pgd_offset(mm, address_m);
68719+ pud_m = pud_alloc(mm, pgd_m, address_m);
68720+ if (!pud_m)
68721+ return VM_FAULT_OOM;
68722+ pmd_m = pmd_alloc(mm, pud_m, address_m);
68723+ if (!pmd_m)
68724+ return VM_FAULT_OOM;
68725+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
68726+ return VM_FAULT_OOM;
68727+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
68728+ }
68729+#endif
68730+
68731 pgd = pgd_offset(mm, address);
68732 pud = pud_alloc(mm, pgd, address);
68733 if (!pud)
68734@@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68735 * run pte_offset_map on the pmd, if an huge pmd could
68736 * materialize from under us from a different thread.
68737 */
68738- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
68739+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
68740 return VM_FAULT_OOM;
68741 /* if an huge pmd materialized from under us just retry later */
68742 if (unlikely(pmd_trans_huge(*pmd)))
68743@@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void)
68744 gate_vma.vm_start = FIXADDR_USER_START;
68745 gate_vma.vm_end = FIXADDR_USER_END;
68746 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
68747- gate_vma.vm_page_prot = __P101;
68748+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
68749 /*
68750 * Make sure the vDSO gets into every core dump.
68751 * Dumping its contents makes post-mortem fully interpretable later
68752diff --git a/mm/mempolicy.c b/mm/mempolicy.c
68753index c3fdbcb..2e8ef90 100644
68754--- a/mm/mempolicy.c
68755+++ b/mm/mempolicy.c
68756@@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68757 unsigned long vmstart;
68758 unsigned long vmend;
68759
68760+#ifdef CONFIG_PAX_SEGMEXEC
68761+ struct vm_area_struct *vma_m;
68762+#endif
68763+
68764 vma = find_vma_prev(mm, start, &prev);
68765 if (!vma || vma->vm_start > start)
68766 return -EFAULT;
68767@@ -678,6 +682,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68768 err = policy_vma(vma, new_pol);
68769 if (err)
68770 goto out;
68771+
68772+#ifdef CONFIG_PAX_SEGMEXEC
68773+ vma_m = pax_find_mirror_vma(vma);
68774+ if (vma_m) {
68775+ err = policy_vma(vma_m, new_pol);
68776+ if (err)
68777+ goto out;
68778+ }
68779+#endif
68780+
68781 }
68782
68783 out:
68784@@ -1111,6 +1125,17 @@ static long do_mbind(unsigned long start, unsigned long len,
68785
68786 if (end < start)
68787 return -EINVAL;
68788+
68789+#ifdef CONFIG_PAX_SEGMEXEC
68790+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68791+ if (end > SEGMEXEC_TASK_SIZE)
68792+ return -EINVAL;
68793+ } else
68794+#endif
68795+
68796+ if (end > TASK_SIZE)
68797+ return -EINVAL;
68798+
68799 if (end == start)
68800 return 0;
68801
68802@@ -1329,6 +1354,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68803 if (!mm)
68804 goto out;
68805
68806+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68807+ if (mm != current->mm &&
68808+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68809+ err = -EPERM;
68810+ goto out;
68811+ }
68812+#endif
68813+
68814 /*
68815 * Check if this process has the right to modify the specified
68816 * process. The right exists if the process has administrative
68817@@ -1338,8 +1371,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68818 rcu_read_lock();
68819 tcred = __task_cred(task);
68820 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68821- cred->uid != tcred->suid && cred->uid != tcred->uid &&
68822- !capable(CAP_SYS_NICE)) {
68823+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68824 rcu_read_unlock();
68825 err = -EPERM;
68826 goto out;
68827diff --git a/mm/migrate.c b/mm/migrate.c
68828index 177aca4..ab3a744 100644
68829--- a/mm/migrate.c
68830+++ b/mm/migrate.c
68831@@ -1313,6 +1313,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68832 if (!mm)
68833 return -EINVAL;
68834
68835+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68836+ if (mm != current->mm &&
68837+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68838+ err = -EPERM;
68839+ goto out;
68840+ }
68841+#endif
68842+
68843 /*
68844 * Check if this process has the right to modify the specified
68845 * process. The right exists if the process has administrative
68846@@ -1322,8 +1330,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68847 rcu_read_lock();
68848 tcred = __task_cred(task);
68849 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68850- cred->uid != tcred->suid && cred->uid != tcred->uid &&
68851- !capable(CAP_SYS_NICE)) {
68852+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68853 rcu_read_unlock();
68854 err = -EPERM;
68855 goto out;
68856diff --git a/mm/mlock.c b/mm/mlock.c
68857index 4f4f53b..9511904 100644
68858--- a/mm/mlock.c
68859+++ b/mm/mlock.c
68860@@ -13,6 +13,7 @@
68861 #include <linux/pagemap.h>
68862 #include <linux/mempolicy.h>
68863 #include <linux/syscalls.h>
68864+#include <linux/security.h>
68865 #include <linux/sched.h>
68866 #include <linux/export.h>
68867 #include <linux/rmap.h>
68868@@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
68869 return -EINVAL;
68870 if (end == start)
68871 return 0;
68872+ if (end > TASK_SIZE)
68873+ return -EINVAL;
68874+
68875 vma = find_vma_prev(current->mm, start, &prev);
68876 if (!vma || vma->vm_start > start)
68877 return -ENOMEM;
68878@@ -395,6 +399,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
68879 for (nstart = start ; ; ) {
68880 vm_flags_t newflags;
68881
68882+#ifdef CONFIG_PAX_SEGMEXEC
68883+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68884+ break;
68885+#endif
68886+
68887 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68888
68889 newflags = vma->vm_flags | VM_LOCKED;
68890@@ -500,6 +509,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
68891 lock_limit >>= PAGE_SHIFT;
68892
68893 /* check against resource limits */
68894+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68895 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68896 error = do_mlock(start, len, 1);
68897 up_write(&current->mm->mmap_sem);
68898@@ -523,17 +533,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
68899 static int do_mlockall(int flags)
68900 {
68901 struct vm_area_struct * vma, * prev = NULL;
68902- unsigned int def_flags = 0;
68903
68904 if (flags & MCL_FUTURE)
68905- def_flags = VM_LOCKED;
68906- current->mm->def_flags = def_flags;
68907+ current->mm->def_flags |= VM_LOCKED;
68908+ else
68909+ current->mm->def_flags &= ~VM_LOCKED;
68910 if (flags == MCL_FUTURE)
68911 goto out;
68912
68913 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68914 vm_flags_t newflags;
68915
68916+#ifdef CONFIG_PAX_SEGMEXEC
68917+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68918+ break;
68919+#endif
68920+
68921+ BUG_ON(vma->vm_end > TASK_SIZE);
68922 newflags = vma->vm_flags | VM_LOCKED;
68923 if (!(flags & MCL_CURRENT))
68924 newflags &= ~VM_LOCKED;
68925@@ -566,6 +582,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68926 lock_limit >>= PAGE_SHIFT;
68927
68928 ret = -ENOMEM;
68929+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68930 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68931 capable(CAP_IPC_LOCK))
68932 ret = do_mlockall(flags);
68933diff --git a/mm/mmap.c b/mm/mmap.c
68934index eae90af..44552cf 100644
68935--- a/mm/mmap.c
68936+++ b/mm/mmap.c
68937@@ -46,6 +46,16 @@
68938 #define arch_rebalance_pgtables(addr, len) (addr)
68939 #endif
68940
68941+static inline void verify_mm_writelocked(struct mm_struct *mm)
68942+{
68943+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68944+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68945+ up_read(&mm->mmap_sem);
68946+ BUG();
68947+ }
68948+#endif
68949+}
68950+
68951 static void unmap_region(struct mm_struct *mm,
68952 struct vm_area_struct *vma, struct vm_area_struct *prev,
68953 unsigned long start, unsigned long end);
68954@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
68955 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68956 *
68957 */
68958-pgprot_t protection_map[16] = {
68959+pgprot_t protection_map[16] __read_only = {
68960 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68961 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68962 };
68963
68964-pgprot_t vm_get_page_prot(unsigned long vm_flags)
68965+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
68966 {
68967- return __pgprot(pgprot_val(protection_map[vm_flags &
68968+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68969 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68970 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68971+
68972+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68973+ if (!(__supported_pte_mask & _PAGE_NX) &&
68974+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68975+ (vm_flags & (VM_READ | VM_WRITE)))
68976+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68977+#endif
68978+
68979+ return prot;
68980 }
68981 EXPORT_SYMBOL(vm_get_page_prot);
68982
68983 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
68984 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
68985 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68986+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68987 /*
68988 * Make sure vm_committed_as in one cacheline and not cacheline shared with
68989 * other variables. It can be updated by several CPUs frequently.
68990@@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
68991 struct vm_area_struct *next = vma->vm_next;
68992
68993 might_sleep();
68994+ BUG_ON(vma->vm_mirror);
68995 if (vma->vm_ops && vma->vm_ops->close)
68996 vma->vm_ops->close(vma);
68997 if (vma->vm_file) {
68998@@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68999 * not page aligned -Ram Gupta
69000 */
69001 rlim = rlimit(RLIMIT_DATA);
69002+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
69003 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
69004 (mm->end_data - mm->start_data) > rlim)
69005 goto out;
69006@@ -689,6 +711,12 @@ static int
69007 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
69008 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
69009 {
69010+
69011+#ifdef CONFIG_PAX_SEGMEXEC
69012+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
69013+ return 0;
69014+#endif
69015+
69016 if (is_mergeable_vma(vma, file, vm_flags) &&
69017 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
69018 if (vma->vm_pgoff == vm_pgoff)
69019@@ -708,6 +736,12 @@ static int
69020 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
69021 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
69022 {
69023+
69024+#ifdef CONFIG_PAX_SEGMEXEC
69025+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
69026+ return 0;
69027+#endif
69028+
69029 if (is_mergeable_vma(vma, file, vm_flags) &&
69030 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
69031 pgoff_t vm_pglen;
69032@@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
69033 struct vm_area_struct *vma_merge(struct mm_struct *mm,
69034 struct vm_area_struct *prev, unsigned long addr,
69035 unsigned long end, unsigned long vm_flags,
69036- struct anon_vma *anon_vma, struct file *file,
69037+ struct anon_vma *anon_vma, struct file *file,
69038 pgoff_t pgoff, struct mempolicy *policy)
69039 {
69040 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
69041 struct vm_area_struct *area, *next;
69042 int err;
69043
69044+#ifdef CONFIG_PAX_SEGMEXEC
69045+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
69046+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
69047+
69048+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
69049+#endif
69050+
69051 /*
69052 * We later require that vma->vm_flags == vm_flags,
69053 * so this tests vma->vm_flags & VM_SPECIAL, too.
69054@@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
69055 if (next && next->vm_end == end) /* cases 6, 7, 8 */
69056 next = next->vm_next;
69057
69058+#ifdef CONFIG_PAX_SEGMEXEC
69059+ if (prev)
69060+ prev_m = pax_find_mirror_vma(prev);
69061+ if (area)
69062+ area_m = pax_find_mirror_vma(area);
69063+ if (next)
69064+ next_m = pax_find_mirror_vma(next);
69065+#endif
69066+
69067 /*
69068 * Can it merge with the predecessor?
69069 */
69070@@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
69071 /* cases 1, 6 */
69072 err = vma_adjust(prev, prev->vm_start,
69073 next->vm_end, prev->vm_pgoff, NULL);
69074- } else /* cases 2, 5, 7 */
69075+
69076+#ifdef CONFIG_PAX_SEGMEXEC
69077+ if (!err && prev_m)
69078+ err = vma_adjust(prev_m, prev_m->vm_start,
69079+ next_m->vm_end, prev_m->vm_pgoff, NULL);
69080+#endif
69081+
69082+ } else { /* cases 2, 5, 7 */
69083 err = vma_adjust(prev, prev->vm_start,
69084 end, prev->vm_pgoff, NULL);
69085+
69086+#ifdef CONFIG_PAX_SEGMEXEC
69087+ if (!err && prev_m)
69088+ err = vma_adjust(prev_m, prev_m->vm_start,
69089+ end_m, prev_m->vm_pgoff, NULL);
69090+#endif
69091+
69092+ }
69093 if (err)
69094 return NULL;
69095 khugepaged_enter_vma_merge(prev);
69096@@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
69097 mpol_equal(policy, vma_policy(next)) &&
69098 can_vma_merge_before(next, vm_flags,
69099 anon_vma, file, pgoff+pglen)) {
69100- if (prev && addr < prev->vm_end) /* case 4 */
69101+ if (prev && addr < prev->vm_end) { /* case 4 */
69102 err = vma_adjust(prev, prev->vm_start,
69103 addr, prev->vm_pgoff, NULL);
69104- else /* cases 3, 8 */
69105+
69106+#ifdef CONFIG_PAX_SEGMEXEC
69107+ if (!err && prev_m)
69108+ err = vma_adjust(prev_m, prev_m->vm_start,
69109+ addr_m, prev_m->vm_pgoff, NULL);
69110+#endif
69111+
69112+ } else { /* cases 3, 8 */
69113 err = vma_adjust(area, addr, next->vm_end,
69114 next->vm_pgoff - pglen, NULL);
69115+
69116+#ifdef CONFIG_PAX_SEGMEXEC
69117+ if (!err && area_m)
69118+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
69119+ next_m->vm_pgoff - pglen, NULL);
69120+#endif
69121+
69122+ }
69123 if (err)
69124 return NULL;
69125 khugepaged_enter_vma_merge(area);
69126@@ -921,14 +1001,11 @@ none:
69127 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
69128 struct file *file, long pages)
69129 {
69130- const unsigned long stack_flags
69131- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
69132-
69133 if (file) {
69134 mm->shared_vm += pages;
69135 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
69136 mm->exec_vm += pages;
69137- } else if (flags & stack_flags)
69138+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
69139 mm->stack_vm += pages;
69140 if (flags & (VM_RESERVED|VM_IO))
69141 mm->reserved_vm += pages;
69142@@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69143 * (the exception is when the underlying filesystem is noexec
69144 * mounted, in which case we dont add PROT_EXEC.)
69145 */
69146- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69147+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69148 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
69149 prot |= PROT_EXEC;
69150
69151@@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69152 /* Obtain the address to map to. we verify (or select) it and ensure
69153 * that it represents a valid section of the address space.
69154 */
69155- addr = get_unmapped_area(file, addr, len, pgoff, flags);
69156+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
69157 if (addr & ~PAGE_MASK)
69158 return addr;
69159
69160@@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69161 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
69162 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
69163
69164+#ifdef CONFIG_PAX_MPROTECT
69165+ if (mm->pax_flags & MF_PAX_MPROTECT) {
69166+#ifndef CONFIG_PAX_MPROTECT_COMPAT
69167+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
69168+ gr_log_rwxmmap(file);
69169+
69170+#ifdef CONFIG_PAX_EMUPLT
69171+ vm_flags &= ~VM_EXEC;
69172+#else
69173+ return -EPERM;
69174+#endif
69175+
69176+ }
69177+
69178+ if (!(vm_flags & VM_EXEC))
69179+ vm_flags &= ~VM_MAYEXEC;
69180+#else
69181+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69182+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69183+#endif
69184+ else
69185+ vm_flags &= ~VM_MAYWRITE;
69186+ }
69187+#endif
69188+
69189+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
69190+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
69191+ vm_flags &= ~VM_PAGEEXEC;
69192+#endif
69193+
69194 if (flags & MAP_LOCKED)
69195 if (!can_do_mlock())
69196 return -EPERM;
69197@@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69198 locked += mm->locked_vm;
69199 lock_limit = rlimit(RLIMIT_MEMLOCK);
69200 lock_limit >>= PAGE_SHIFT;
69201+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
69202 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
69203 return -EAGAIN;
69204 }
69205@@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69206 if (error)
69207 return error;
69208
69209+ if (!gr_acl_handle_mmap(file, prot))
69210+ return -EACCES;
69211+
69212 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
69213 }
69214 EXPORT_SYMBOL(do_mmap_pgoff);
69215@@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
69216 vm_flags_t vm_flags = vma->vm_flags;
69217
69218 /* If it was private or non-writable, the write bit is already clear */
69219- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
69220+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
69221 return 0;
69222
69223 /* The backer wishes to know when pages are first written to? */
69224@@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
69225 unsigned long charged = 0;
69226 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
69227
69228+#ifdef CONFIG_PAX_SEGMEXEC
69229+ struct vm_area_struct *vma_m = NULL;
69230+#endif
69231+
69232+ /*
69233+ * mm->mmap_sem is required to protect against another thread
69234+ * changing the mappings in case we sleep.
69235+ */
69236+ verify_mm_writelocked(mm);
69237+
69238 /* Clear old maps */
69239 error = -ENOMEM;
69240-munmap_back:
69241 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69242 if (vma && vma->vm_start < addr + len) {
69243 if (do_munmap(mm, addr, len))
69244 return -ENOMEM;
69245- goto munmap_back;
69246+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69247+ BUG_ON(vma && vma->vm_start < addr + len);
69248 }
69249
69250 /* Check against address space limit. */
69251@@ -1258,6 +1379,16 @@ munmap_back:
69252 goto unacct_error;
69253 }
69254
69255+#ifdef CONFIG_PAX_SEGMEXEC
69256+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
69257+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69258+ if (!vma_m) {
69259+ error = -ENOMEM;
69260+ goto free_vma;
69261+ }
69262+ }
69263+#endif
69264+
69265 vma->vm_mm = mm;
69266 vma->vm_start = addr;
69267 vma->vm_end = addr + len;
69268@@ -1281,6 +1412,19 @@ munmap_back:
69269 error = file->f_op->mmap(file, vma);
69270 if (error)
69271 goto unmap_and_free_vma;
69272+
69273+#ifdef CONFIG_PAX_SEGMEXEC
69274+ if (vma_m && (vm_flags & VM_EXECUTABLE))
69275+ added_exe_file_vma(mm);
69276+#endif
69277+
69278+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
69279+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
69280+ vma->vm_flags |= VM_PAGEEXEC;
69281+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69282+ }
69283+#endif
69284+
69285 if (vm_flags & VM_EXECUTABLE)
69286 added_exe_file_vma(mm);
69287
69288@@ -1316,6 +1460,11 @@ munmap_back:
69289 vma_link(mm, vma, prev, rb_link, rb_parent);
69290 file = vma->vm_file;
69291
69292+#ifdef CONFIG_PAX_SEGMEXEC
69293+ if (vma_m)
69294+ BUG_ON(pax_mirror_vma(vma_m, vma));
69295+#endif
69296+
69297 /* Once vma denies write, undo our temporary denial count */
69298 if (correct_wcount)
69299 atomic_inc(&inode->i_writecount);
69300@@ -1324,6 +1473,7 @@ out:
69301
69302 mm->total_vm += len >> PAGE_SHIFT;
69303 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
69304+ track_exec_limit(mm, addr, addr + len, vm_flags);
69305 if (vm_flags & VM_LOCKED) {
69306 if (!mlock_vma_pages_range(vma, addr, addr + len))
69307 mm->locked_vm += (len >> PAGE_SHIFT);
69308@@ -1341,6 +1491,12 @@ unmap_and_free_vma:
69309 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
69310 charged = 0;
69311 free_vma:
69312+
69313+#ifdef CONFIG_PAX_SEGMEXEC
69314+ if (vma_m)
69315+ kmem_cache_free(vm_area_cachep, vma_m);
69316+#endif
69317+
69318 kmem_cache_free(vm_area_cachep, vma);
69319 unacct_error:
69320 if (charged)
69321@@ -1348,6 +1504,44 @@ unacct_error:
69322 return error;
69323 }
69324
69325+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
69326+{
69327+ if (!vma) {
69328+#ifdef CONFIG_STACK_GROWSUP
69329+ if (addr > sysctl_heap_stack_gap)
69330+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
69331+ else
69332+ vma = find_vma(current->mm, 0);
69333+ if (vma && (vma->vm_flags & VM_GROWSUP))
69334+ return false;
69335+#endif
69336+ return true;
69337+ }
69338+
69339+ if (addr + len > vma->vm_start)
69340+ return false;
69341+
69342+ if (vma->vm_flags & VM_GROWSDOWN)
69343+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
69344+#ifdef CONFIG_STACK_GROWSUP
69345+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
69346+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
69347+#endif
69348+
69349+ return true;
69350+}
69351+
69352+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
69353+{
69354+ if (vma->vm_start < len)
69355+ return -ENOMEM;
69356+ if (!(vma->vm_flags & VM_GROWSDOWN))
69357+ return vma->vm_start - len;
69358+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
69359+ return vma->vm_start - len - sysctl_heap_stack_gap;
69360+ return -ENOMEM;
69361+}
69362+
69363 /* Get an address range which is currently unmapped.
69364 * For shmat() with addr=0.
69365 *
69366@@ -1374,18 +1568,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
69367 if (flags & MAP_FIXED)
69368 return addr;
69369
69370+#ifdef CONFIG_PAX_RANDMMAP
69371+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69372+#endif
69373+
69374 if (addr) {
69375 addr = PAGE_ALIGN(addr);
69376- vma = find_vma(mm, addr);
69377- if (TASK_SIZE - len >= addr &&
69378- (!vma || addr + len <= vma->vm_start))
69379- return addr;
69380+ if (TASK_SIZE - len >= addr) {
69381+ vma = find_vma(mm, addr);
69382+ if (check_heap_stack_gap(vma, addr, len))
69383+ return addr;
69384+ }
69385 }
69386 if (len > mm->cached_hole_size) {
69387- start_addr = addr = mm->free_area_cache;
69388+ start_addr = addr = mm->free_area_cache;
69389 } else {
69390- start_addr = addr = TASK_UNMAPPED_BASE;
69391- mm->cached_hole_size = 0;
69392+ start_addr = addr = mm->mmap_base;
69393+ mm->cached_hole_size = 0;
69394 }
69395
69396 full_search:
69397@@ -1396,34 +1595,40 @@ full_search:
69398 * Start a new search - just in case we missed
69399 * some holes.
69400 */
69401- if (start_addr != TASK_UNMAPPED_BASE) {
69402- addr = TASK_UNMAPPED_BASE;
69403- start_addr = addr;
69404+ if (start_addr != mm->mmap_base) {
69405+ start_addr = addr = mm->mmap_base;
69406 mm->cached_hole_size = 0;
69407 goto full_search;
69408 }
69409 return -ENOMEM;
69410 }
69411- if (!vma || addr + len <= vma->vm_start) {
69412- /*
69413- * Remember the place where we stopped the search:
69414- */
69415- mm->free_area_cache = addr + len;
69416- return addr;
69417- }
69418+ if (check_heap_stack_gap(vma, addr, len))
69419+ break;
69420 if (addr + mm->cached_hole_size < vma->vm_start)
69421 mm->cached_hole_size = vma->vm_start - addr;
69422 addr = vma->vm_end;
69423 }
69424+
69425+ /*
69426+ * Remember the place where we stopped the search:
69427+ */
69428+ mm->free_area_cache = addr + len;
69429+ return addr;
69430 }
69431 #endif
69432
69433 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
69434 {
69435+
69436+#ifdef CONFIG_PAX_SEGMEXEC
69437+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69438+ return;
69439+#endif
69440+
69441 /*
69442 * Is this a new hole at the lowest possible address?
69443 */
69444- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
69445+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
69446 mm->free_area_cache = addr;
69447 mm->cached_hole_size = ~0UL;
69448 }
69449@@ -1441,7 +1646,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69450 {
69451 struct vm_area_struct *vma;
69452 struct mm_struct *mm = current->mm;
69453- unsigned long addr = addr0;
69454+ unsigned long base = mm->mmap_base, addr = addr0;
69455
69456 /* requested length too big for entire address space */
69457 if (len > TASK_SIZE)
69458@@ -1450,13 +1655,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69459 if (flags & MAP_FIXED)
69460 return addr;
69461
69462+#ifdef CONFIG_PAX_RANDMMAP
69463+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69464+#endif
69465+
69466 /* requesting a specific address */
69467 if (addr) {
69468 addr = PAGE_ALIGN(addr);
69469- vma = find_vma(mm, addr);
69470- if (TASK_SIZE - len >= addr &&
69471- (!vma || addr + len <= vma->vm_start))
69472- return addr;
69473+ if (TASK_SIZE - len >= addr) {
69474+ vma = find_vma(mm, addr);
69475+ if (check_heap_stack_gap(vma, addr, len))
69476+ return addr;
69477+ }
69478 }
69479
69480 /* check if free_area_cache is useful for us */
69481@@ -1471,7 +1681,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69482 /* make sure it can fit in the remaining address space */
69483 if (addr > len) {
69484 vma = find_vma(mm, addr-len);
69485- if (!vma || addr <= vma->vm_start)
69486+ if (check_heap_stack_gap(vma, addr - len, len))
69487 /* remember the address as a hint for next time */
69488 return (mm->free_area_cache = addr-len);
69489 }
69490@@ -1488,7 +1698,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69491 * return with success:
69492 */
69493 vma = find_vma(mm, addr);
69494- if (!vma || addr+len <= vma->vm_start)
69495+ if (check_heap_stack_gap(vma, addr, len))
69496 /* remember the address as a hint for next time */
69497 return (mm->free_area_cache = addr);
69498
69499@@ -1497,8 +1707,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69500 mm->cached_hole_size = vma->vm_start - addr;
69501
69502 /* try just below the current vma->vm_start */
69503- addr = vma->vm_start-len;
69504- } while (len < vma->vm_start);
69505+ addr = skip_heap_stack_gap(vma, len);
69506+ } while (!IS_ERR_VALUE(addr));
69507
69508 bottomup:
69509 /*
69510@@ -1507,13 +1717,21 @@ bottomup:
69511 * can happen with large stack limits and large mmap()
69512 * allocations.
69513 */
69514+ mm->mmap_base = TASK_UNMAPPED_BASE;
69515+
69516+#ifdef CONFIG_PAX_RANDMMAP
69517+ if (mm->pax_flags & MF_PAX_RANDMMAP)
69518+ mm->mmap_base += mm->delta_mmap;
69519+#endif
69520+
69521+ mm->free_area_cache = mm->mmap_base;
69522 mm->cached_hole_size = ~0UL;
69523- mm->free_area_cache = TASK_UNMAPPED_BASE;
69524 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
69525 /*
69526 * Restore the topdown base:
69527 */
69528- mm->free_area_cache = mm->mmap_base;
69529+ mm->mmap_base = base;
69530+ mm->free_area_cache = base;
69531 mm->cached_hole_size = ~0UL;
69532
69533 return addr;
69534@@ -1522,6 +1740,12 @@ bottomup:
69535
69536 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69537 {
69538+
69539+#ifdef CONFIG_PAX_SEGMEXEC
69540+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69541+ return;
69542+#endif
69543+
69544 /*
69545 * Is this a new hole at the highest possible address?
69546 */
69547@@ -1529,8 +1753,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69548 mm->free_area_cache = addr;
69549
69550 /* dont allow allocations above current base */
69551- if (mm->free_area_cache > mm->mmap_base)
69552+ if (mm->free_area_cache > mm->mmap_base) {
69553 mm->free_area_cache = mm->mmap_base;
69554+ mm->cached_hole_size = ~0UL;
69555+ }
69556 }
69557
69558 unsigned long
69559@@ -1603,40 +1829,42 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
69560
69561 EXPORT_SYMBOL(find_vma);
69562
69563-/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
69564+/*
69565+ * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
69566+ * Note: pprev is set to NULL when return value is NULL.
69567+ */
69568 struct vm_area_struct *
69569 find_vma_prev(struct mm_struct *mm, unsigned long addr,
69570 struct vm_area_struct **pprev)
69571 {
69572- struct vm_area_struct *vma = NULL, *prev = NULL;
69573- struct rb_node *rb_node;
69574- if (!mm)
69575- goto out;
69576+ struct vm_area_struct *vma;
69577
69578- /* Guard against addr being lower than the first VMA */
69579- vma = mm->mmap;
69580+ vma = find_vma(mm, addr);
69581+ *pprev = vma ? vma->vm_prev : NULL;
69582+ return vma;
69583+}
69584
69585- /* Go through the RB tree quickly. */
69586- rb_node = mm->mm_rb.rb_node;
69587+#ifdef CONFIG_PAX_SEGMEXEC
69588+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
69589+{
69590+ struct vm_area_struct *vma_m;
69591
69592- while (rb_node) {
69593- struct vm_area_struct *vma_tmp;
69594- vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
69595-
69596- if (addr < vma_tmp->vm_end) {
69597- rb_node = rb_node->rb_left;
69598- } else {
69599- prev = vma_tmp;
69600- if (!prev->vm_next || (addr < prev->vm_next->vm_end))
69601- break;
69602- rb_node = rb_node->rb_right;
69603- }
69604+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
69605+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
69606+ BUG_ON(vma->vm_mirror);
69607+ return NULL;
69608 }
69609-
69610-out:
69611- *pprev = prev;
69612- return prev ? prev->vm_next : vma;
69613+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
69614+ vma_m = vma->vm_mirror;
69615+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
69616+ BUG_ON(vma->vm_file != vma_m->vm_file);
69617+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
69618+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
69619+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
69620+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
69621+ return vma_m;
69622 }
69623+#endif
69624
69625 /*
69626 * Verify that the stack growth is acceptable and
69627@@ -1654,6 +1882,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69628 return -ENOMEM;
69629
69630 /* Stack limit test */
69631+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
69632 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
69633 return -ENOMEM;
69634
69635@@ -1664,6 +1893,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69636 locked = mm->locked_vm + grow;
69637 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
69638 limit >>= PAGE_SHIFT;
69639+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
69640 if (locked > limit && !capable(CAP_IPC_LOCK))
69641 return -ENOMEM;
69642 }
69643@@ -1694,37 +1924,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69644 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
69645 * vma is the last one with address > vma->vm_end. Have to extend vma.
69646 */
69647+#ifndef CONFIG_IA64
69648+static
69649+#endif
69650 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69651 {
69652 int error;
69653+ bool locknext;
69654
69655 if (!(vma->vm_flags & VM_GROWSUP))
69656 return -EFAULT;
69657
69658+ /* Also guard against wrapping around to address 0. */
69659+ if (address < PAGE_ALIGN(address+1))
69660+ address = PAGE_ALIGN(address+1);
69661+ else
69662+ return -ENOMEM;
69663+
69664 /*
69665 * We must make sure the anon_vma is allocated
69666 * so that the anon_vma locking is not a noop.
69667 */
69668 if (unlikely(anon_vma_prepare(vma)))
69669 return -ENOMEM;
69670+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
69671+ if (locknext && anon_vma_prepare(vma->vm_next))
69672+ return -ENOMEM;
69673 vma_lock_anon_vma(vma);
69674+ if (locknext)
69675+ vma_lock_anon_vma(vma->vm_next);
69676
69677 /*
69678 * vma->vm_start/vm_end cannot change under us because the caller
69679 * is required to hold the mmap_sem in read mode. We need the
69680- * anon_vma lock to serialize against concurrent expand_stacks.
69681- * Also guard against wrapping around to address 0.
69682+ * anon_vma locks to serialize against concurrent expand_stacks
69683+ * and expand_upwards.
69684 */
69685- if (address < PAGE_ALIGN(address+4))
69686- address = PAGE_ALIGN(address+4);
69687- else {
69688- vma_unlock_anon_vma(vma);
69689- return -ENOMEM;
69690- }
69691 error = 0;
69692
69693 /* Somebody else might have raced and expanded it already */
69694- if (address > vma->vm_end) {
69695+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
69696+ error = -ENOMEM;
69697+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
69698 unsigned long size, grow;
69699
69700 size = address - vma->vm_start;
69701@@ -1739,6 +1980,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69702 }
69703 }
69704 }
69705+ if (locknext)
69706+ vma_unlock_anon_vma(vma->vm_next);
69707 vma_unlock_anon_vma(vma);
69708 khugepaged_enter_vma_merge(vma);
69709 return error;
69710@@ -1752,6 +1995,8 @@ int expand_downwards(struct vm_area_struct *vma,
69711 unsigned long address)
69712 {
69713 int error;
69714+ bool lockprev = false;
69715+ struct vm_area_struct *prev;
69716
69717 /*
69718 * We must make sure the anon_vma is allocated
69719@@ -1765,6 +2010,15 @@ int expand_downwards(struct vm_area_struct *vma,
69720 if (error)
69721 return error;
69722
69723+ prev = vma->vm_prev;
69724+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
69725+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
69726+#endif
69727+ if (lockprev && anon_vma_prepare(prev))
69728+ return -ENOMEM;
69729+ if (lockprev)
69730+ vma_lock_anon_vma(prev);
69731+
69732 vma_lock_anon_vma(vma);
69733
69734 /*
69735@@ -1774,9 +2028,17 @@ int expand_downwards(struct vm_area_struct *vma,
69736 */
69737
69738 /* Somebody else might have raced and expanded it already */
69739- if (address < vma->vm_start) {
69740+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
69741+ error = -ENOMEM;
69742+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
69743 unsigned long size, grow;
69744
69745+#ifdef CONFIG_PAX_SEGMEXEC
69746+ struct vm_area_struct *vma_m;
69747+
69748+ vma_m = pax_find_mirror_vma(vma);
69749+#endif
69750+
69751 size = vma->vm_end - address;
69752 grow = (vma->vm_start - address) >> PAGE_SHIFT;
69753
69754@@ -1786,11 +2048,22 @@ int expand_downwards(struct vm_area_struct *vma,
69755 if (!error) {
69756 vma->vm_start = address;
69757 vma->vm_pgoff -= grow;
69758+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
69759+
69760+#ifdef CONFIG_PAX_SEGMEXEC
69761+ if (vma_m) {
69762+ vma_m->vm_start -= grow << PAGE_SHIFT;
69763+ vma_m->vm_pgoff -= grow;
69764+ }
69765+#endif
69766+
69767 perf_event_mmap(vma);
69768 }
69769 }
69770 }
69771 vma_unlock_anon_vma(vma);
69772+ if (lockprev)
69773+ vma_unlock_anon_vma(prev);
69774 khugepaged_enter_vma_merge(vma);
69775 return error;
69776 }
69777@@ -1860,6 +2133,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
69778 do {
69779 long nrpages = vma_pages(vma);
69780
69781+#ifdef CONFIG_PAX_SEGMEXEC
69782+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
69783+ vma = remove_vma(vma);
69784+ continue;
69785+ }
69786+#endif
69787+
69788 mm->total_vm -= nrpages;
69789 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
69790 vma = remove_vma(vma);
69791@@ -1905,6 +2185,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
69792 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
69793 vma->vm_prev = NULL;
69794 do {
69795+
69796+#ifdef CONFIG_PAX_SEGMEXEC
69797+ if (vma->vm_mirror) {
69798+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
69799+ vma->vm_mirror->vm_mirror = NULL;
69800+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
69801+ vma->vm_mirror = NULL;
69802+ }
69803+#endif
69804+
69805 rb_erase(&vma->vm_rb, &mm->mm_rb);
69806 mm->map_count--;
69807 tail_vma = vma;
69808@@ -1933,14 +2223,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69809 struct vm_area_struct *new;
69810 int err = -ENOMEM;
69811
69812+#ifdef CONFIG_PAX_SEGMEXEC
69813+ struct vm_area_struct *vma_m, *new_m = NULL;
69814+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
69815+#endif
69816+
69817 if (is_vm_hugetlb_page(vma) && (addr &
69818 ~(huge_page_mask(hstate_vma(vma)))))
69819 return -EINVAL;
69820
69821+#ifdef CONFIG_PAX_SEGMEXEC
69822+ vma_m = pax_find_mirror_vma(vma);
69823+#endif
69824+
69825 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69826 if (!new)
69827 goto out_err;
69828
69829+#ifdef CONFIG_PAX_SEGMEXEC
69830+ if (vma_m) {
69831+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69832+ if (!new_m) {
69833+ kmem_cache_free(vm_area_cachep, new);
69834+ goto out_err;
69835+ }
69836+ }
69837+#endif
69838+
69839 /* most fields are the same, copy all, and then fixup */
69840 *new = *vma;
69841
69842@@ -1953,6 +2262,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69843 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
69844 }
69845
69846+#ifdef CONFIG_PAX_SEGMEXEC
69847+ if (vma_m) {
69848+ *new_m = *vma_m;
69849+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
69850+ new_m->vm_mirror = new;
69851+ new->vm_mirror = new_m;
69852+
69853+ if (new_below)
69854+ new_m->vm_end = addr_m;
69855+ else {
69856+ new_m->vm_start = addr_m;
69857+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
69858+ }
69859+ }
69860+#endif
69861+
69862 pol = mpol_dup(vma_policy(vma));
69863 if (IS_ERR(pol)) {
69864 err = PTR_ERR(pol);
69865@@ -1978,6 +2303,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69866 else
69867 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
69868
69869+#ifdef CONFIG_PAX_SEGMEXEC
69870+ if (!err && vma_m) {
69871+ if (anon_vma_clone(new_m, vma_m))
69872+ goto out_free_mpol;
69873+
69874+ mpol_get(pol);
69875+ vma_set_policy(new_m, pol);
69876+
69877+ if (new_m->vm_file) {
69878+ get_file(new_m->vm_file);
69879+ if (vma_m->vm_flags & VM_EXECUTABLE)
69880+ added_exe_file_vma(mm);
69881+ }
69882+
69883+ if (new_m->vm_ops && new_m->vm_ops->open)
69884+ new_m->vm_ops->open(new_m);
69885+
69886+ if (new_below)
69887+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69888+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69889+ else
69890+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69891+
69892+ if (err) {
69893+ if (new_m->vm_ops && new_m->vm_ops->close)
69894+ new_m->vm_ops->close(new_m);
69895+ if (new_m->vm_file) {
69896+ if (vma_m->vm_flags & VM_EXECUTABLE)
69897+ removed_exe_file_vma(mm);
69898+ fput(new_m->vm_file);
69899+ }
69900+ mpol_put(pol);
69901+ }
69902+ }
69903+#endif
69904+
69905 /* Success. */
69906 if (!err)
69907 return 0;
69908@@ -1990,10 +2351,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69909 removed_exe_file_vma(mm);
69910 fput(new->vm_file);
69911 }
69912- unlink_anon_vmas(new);
69913 out_free_mpol:
69914 mpol_put(pol);
69915 out_free_vma:
69916+
69917+#ifdef CONFIG_PAX_SEGMEXEC
69918+ if (new_m) {
69919+ unlink_anon_vmas(new_m);
69920+ kmem_cache_free(vm_area_cachep, new_m);
69921+ }
69922+#endif
69923+
69924+ unlink_anon_vmas(new);
69925 kmem_cache_free(vm_area_cachep, new);
69926 out_err:
69927 return err;
69928@@ -2006,6 +2375,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69929 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69930 unsigned long addr, int new_below)
69931 {
69932+
69933+#ifdef CONFIG_PAX_SEGMEXEC
69934+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69935+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
69936+ if (mm->map_count >= sysctl_max_map_count-1)
69937+ return -ENOMEM;
69938+ } else
69939+#endif
69940+
69941 if (mm->map_count >= sysctl_max_map_count)
69942 return -ENOMEM;
69943
69944@@ -2017,11 +2395,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69945 * work. This now handles partial unmappings.
69946 * Jeremy Fitzhardinge <jeremy@goop.org>
69947 */
69948+#ifdef CONFIG_PAX_SEGMEXEC
69949 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69950 {
69951+ int ret = __do_munmap(mm, start, len);
69952+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
69953+ return ret;
69954+
69955+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
69956+}
69957+
69958+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69959+#else
69960+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69961+#endif
69962+{
69963 unsigned long end;
69964 struct vm_area_struct *vma, *prev, *last;
69965
69966+ /*
69967+ * mm->mmap_sem is required to protect against another thread
69968+ * changing the mappings in case we sleep.
69969+ */
69970+ verify_mm_writelocked(mm);
69971+
69972 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
69973 return -EINVAL;
69974
69975@@ -2096,6 +2493,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69976 /* Fix up all other VM information */
69977 remove_vma_list(mm, vma);
69978
69979+ track_exec_limit(mm, start, end, 0UL);
69980+
69981 return 0;
69982 }
69983
69984@@ -2108,22 +2507,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
69985
69986 profile_munmap(addr);
69987
69988+#ifdef CONFIG_PAX_SEGMEXEC
69989+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
69990+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
69991+ return -EINVAL;
69992+#endif
69993+
69994 down_write(&mm->mmap_sem);
69995 ret = do_munmap(mm, addr, len);
69996 up_write(&mm->mmap_sem);
69997 return ret;
69998 }
69999
70000-static inline void verify_mm_writelocked(struct mm_struct *mm)
70001-{
70002-#ifdef CONFIG_DEBUG_VM
70003- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
70004- WARN_ON(1);
70005- up_read(&mm->mmap_sem);
70006- }
70007-#endif
70008-}
70009-
70010 /*
70011 * this is really a simplified "do_mmap". it only handles
70012 * anonymous maps. eventually we may be able to do some
70013@@ -2137,6 +2532,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
70014 struct rb_node ** rb_link, * rb_parent;
70015 pgoff_t pgoff = addr >> PAGE_SHIFT;
70016 int error;
70017+ unsigned long charged;
70018
70019 len = PAGE_ALIGN(len);
70020 if (!len)
70021@@ -2148,16 +2544,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
70022
70023 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
70024
70025+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
70026+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
70027+ flags &= ~VM_EXEC;
70028+
70029+#ifdef CONFIG_PAX_MPROTECT
70030+ if (mm->pax_flags & MF_PAX_MPROTECT)
70031+ flags &= ~VM_MAYEXEC;
70032+#endif
70033+
70034+ }
70035+#endif
70036+
70037 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
70038 if (error & ~PAGE_MASK)
70039 return error;
70040
70041+ charged = len >> PAGE_SHIFT;
70042+
70043 /*
70044 * mlock MCL_FUTURE?
70045 */
70046 if (mm->def_flags & VM_LOCKED) {
70047 unsigned long locked, lock_limit;
70048- locked = len >> PAGE_SHIFT;
70049+ locked = charged;
70050 locked += mm->locked_vm;
70051 lock_limit = rlimit(RLIMIT_MEMLOCK);
70052 lock_limit >>= PAGE_SHIFT;
70053@@ -2174,22 +2584,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
70054 /*
70055 * Clear old maps. this also does some error checking for us
70056 */
70057- munmap_back:
70058 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70059 if (vma && vma->vm_start < addr + len) {
70060 if (do_munmap(mm, addr, len))
70061 return -ENOMEM;
70062- goto munmap_back;
70063+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70064+ BUG_ON(vma && vma->vm_start < addr + len);
70065 }
70066
70067 /* Check against address space limits *after* clearing old maps... */
70068- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
70069+ if (!may_expand_vm(mm, charged))
70070 return -ENOMEM;
70071
70072 if (mm->map_count > sysctl_max_map_count)
70073 return -ENOMEM;
70074
70075- if (security_vm_enough_memory(len >> PAGE_SHIFT))
70076+ if (security_vm_enough_memory(charged))
70077 return -ENOMEM;
70078
70079 /* Can we just expand an old private anonymous mapping? */
70080@@ -2203,7 +2613,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
70081 */
70082 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70083 if (!vma) {
70084- vm_unacct_memory(len >> PAGE_SHIFT);
70085+ vm_unacct_memory(charged);
70086 return -ENOMEM;
70087 }
70088
70089@@ -2217,11 +2627,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
70090 vma_link(mm, vma, prev, rb_link, rb_parent);
70091 out:
70092 perf_event_mmap(vma);
70093- mm->total_vm += len >> PAGE_SHIFT;
70094+ mm->total_vm += charged;
70095 if (flags & VM_LOCKED) {
70096 if (!mlock_vma_pages_range(vma, addr, addr + len))
70097- mm->locked_vm += (len >> PAGE_SHIFT);
70098+ mm->locked_vm += charged;
70099 }
70100+ track_exec_limit(mm, addr, addr + len, flags);
70101 return addr;
70102 }
70103
70104@@ -2268,8 +2679,10 @@ void exit_mmap(struct mm_struct *mm)
70105 * Walk the list again, actually closing and freeing it,
70106 * with preemption enabled, without holding any MM locks.
70107 */
70108- while (vma)
70109+ while (vma) {
70110+ vma->vm_mirror = NULL;
70111 vma = remove_vma(vma);
70112+ }
70113
70114 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
70115 }
70116@@ -2283,6 +2696,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
70117 struct vm_area_struct * __vma, * prev;
70118 struct rb_node ** rb_link, * rb_parent;
70119
70120+#ifdef CONFIG_PAX_SEGMEXEC
70121+ struct vm_area_struct *vma_m = NULL;
70122+#endif
70123+
70124+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
70125+ return -EPERM;
70126+
70127 /*
70128 * The vm_pgoff of a purely anonymous vma should be irrelevant
70129 * until its first write fault, when page's anon_vma and index
70130@@ -2305,7 +2725,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
70131 if ((vma->vm_flags & VM_ACCOUNT) &&
70132 security_vm_enough_memory_mm(mm, vma_pages(vma)))
70133 return -ENOMEM;
70134+
70135+#ifdef CONFIG_PAX_SEGMEXEC
70136+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
70137+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70138+ if (!vma_m)
70139+ return -ENOMEM;
70140+ }
70141+#endif
70142+
70143 vma_link(mm, vma, prev, rb_link, rb_parent);
70144+
70145+#ifdef CONFIG_PAX_SEGMEXEC
70146+ if (vma_m)
70147+ BUG_ON(pax_mirror_vma(vma_m, vma));
70148+#endif
70149+
70150 return 0;
70151 }
70152
70153@@ -2323,6 +2758,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
70154 struct rb_node **rb_link, *rb_parent;
70155 struct mempolicy *pol;
70156
70157+ BUG_ON(vma->vm_mirror);
70158+
70159 /*
70160 * If anonymous vma has not yet been faulted, update new pgoff
70161 * to match new location, to increase its chance of merging.
70162@@ -2373,6 +2810,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
70163 return NULL;
70164 }
70165
70166+#ifdef CONFIG_PAX_SEGMEXEC
70167+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
70168+{
70169+ struct vm_area_struct *prev_m;
70170+ struct rb_node **rb_link_m, *rb_parent_m;
70171+ struct mempolicy *pol_m;
70172+
70173+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
70174+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
70175+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
70176+ *vma_m = *vma;
70177+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
70178+ if (anon_vma_clone(vma_m, vma))
70179+ return -ENOMEM;
70180+ pol_m = vma_policy(vma_m);
70181+ mpol_get(pol_m);
70182+ vma_set_policy(vma_m, pol_m);
70183+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
70184+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
70185+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
70186+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
70187+ if (vma_m->vm_file)
70188+ get_file(vma_m->vm_file);
70189+ if (vma_m->vm_ops && vma_m->vm_ops->open)
70190+ vma_m->vm_ops->open(vma_m);
70191+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
70192+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
70193+ vma_m->vm_mirror = vma;
70194+ vma->vm_mirror = vma_m;
70195+ return 0;
70196+}
70197+#endif
70198+
70199 /*
70200 * Return true if the calling process may expand its vm space by the passed
70201 * number of pages
70202@@ -2383,7 +2853,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
70203 unsigned long lim;
70204
70205 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
70206-
70207+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
70208 if (cur + npages > lim)
70209 return 0;
70210 return 1;
70211@@ -2454,6 +2924,22 @@ int install_special_mapping(struct mm_struct *mm,
70212 vma->vm_start = addr;
70213 vma->vm_end = addr + len;
70214
70215+#ifdef CONFIG_PAX_MPROTECT
70216+ if (mm->pax_flags & MF_PAX_MPROTECT) {
70217+#ifndef CONFIG_PAX_MPROTECT_COMPAT
70218+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
70219+ return -EPERM;
70220+ if (!(vm_flags & VM_EXEC))
70221+ vm_flags &= ~VM_MAYEXEC;
70222+#else
70223+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
70224+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
70225+#endif
70226+ else
70227+ vm_flags &= ~VM_MAYWRITE;
70228+ }
70229+#endif
70230+
70231 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
70232 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
70233
70234diff --git a/mm/mprotect.c b/mm/mprotect.c
70235index 5a688a2..27e031c 100644
70236--- a/mm/mprotect.c
70237+++ b/mm/mprotect.c
70238@@ -23,10 +23,16 @@
70239 #include <linux/mmu_notifier.h>
70240 #include <linux/migrate.h>
70241 #include <linux/perf_event.h>
70242+
70243+#ifdef CONFIG_PAX_MPROTECT
70244+#include <linux/elf.h>
70245+#endif
70246+
70247 #include <asm/uaccess.h>
70248 #include <asm/pgtable.h>
70249 #include <asm/cacheflush.h>
70250 #include <asm/tlbflush.h>
70251+#include <asm/mmu_context.h>
70252
70253 #ifndef pgprot_modify
70254 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
70255@@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
70256 flush_tlb_range(vma, start, end);
70257 }
70258
70259+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70260+/* called while holding the mmap semaphor for writing except stack expansion */
70261+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
70262+{
70263+ unsigned long oldlimit, newlimit = 0UL;
70264+
70265+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
70266+ return;
70267+
70268+ spin_lock(&mm->page_table_lock);
70269+ oldlimit = mm->context.user_cs_limit;
70270+ if ((prot & VM_EXEC) && oldlimit < end)
70271+ /* USER_CS limit moved up */
70272+ newlimit = end;
70273+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
70274+ /* USER_CS limit moved down */
70275+ newlimit = start;
70276+
70277+ if (newlimit) {
70278+ mm->context.user_cs_limit = newlimit;
70279+
70280+#ifdef CONFIG_SMP
70281+ wmb();
70282+ cpus_clear(mm->context.cpu_user_cs_mask);
70283+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
70284+#endif
70285+
70286+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
70287+ }
70288+ spin_unlock(&mm->page_table_lock);
70289+ if (newlimit == end) {
70290+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
70291+
70292+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
70293+ if (is_vm_hugetlb_page(vma))
70294+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
70295+ else
70296+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
70297+ }
70298+}
70299+#endif
70300+
70301 int
70302 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70303 unsigned long start, unsigned long end, unsigned long newflags)
70304@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70305 int error;
70306 int dirty_accountable = 0;
70307
70308+#ifdef CONFIG_PAX_SEGMEXEC
70309+ struct vm_area_struct *vma_m = NULL;
70310+ unsigned long start_m, end_m;
70311+
70312+ start_m = start + SEGMEXEC_TASK_SIZE;
70313+ end_m = end + SEGMEXEC_TASK_SIZE;
70314+#endif
70315+
70316 if (newflags == oldflags) {
70317 *pprev = vma;
70318 return 0;
70319 }
70320
70321+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
70322+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
70323+
70324+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
70325+ return -ENOMEM;
70326+
70327+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
70328+ return -ENOMEM;
70329+ }
70330+
70331 /*
70332 * If we make a private mapping writable we increase our commit;
70333 * but (without finer accounting) cannot reduce our commit if we
70334@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70335 }
70336 }
70337
70338+#ifdef CONFIG_PAX_SEGMEXEC
70339+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
70340+ if (start != vma->vm_start) {
70341+ error = split_vma(mm, vma, start, 1);
70342+ if (error)
70343+ goto fail;
70344+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
70345+ *pprev = (*pprev)->vm_next;
70346+ }
70347+
70348+ if (end != vma->vm_end) {
70349+ error = split_vma(mm, vma, end, 0);
70350+ if (error)
70351+ goto fail;
70352+ }
70353+
70354+ if (pax_find_mirror_vma(vma)) {
70355+ error = __do_munmap(mm, start_m, end_m - start_m);
70356+ if (error)
70357+ goto fail;
70358+ } else {
70359+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70360+ if (!vma_m) {
70361+ error = -ENOMEM;
70362+ goto fail;
70363+ }
70364+ vma->vm_flags = newflags;
70365+ error = pax_mirror_vma(vma_m, vma);
70366+ if (error) {
70367+ vma->vm_flags = oldflags;
70368+ goto fail;
70369+ }
70370+ }
70371+ }
70372+#endif
70373+
70374 /*
70375 * First try to merge with previous and/or next vma.
70376 */
70377@@ -204,9 +306,21 @@ success:
70378 * vm_flags and vm_page_prot are protected by the mmap_sem
70379 * held in write mode.
70380 */
70381+
70382+#ifdef CONFIG_PAX_SEGMEXEC
70383+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
70384+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
70385+#endif
70386+
70387 vma->vm_flags = newflags;
70388+
70389+#ifdef CONFIG_PAX_MPROTECT
70390+ if (mm->binfmt && mm->binfmt->handle_mprotect)
70391+ mm->binfmt->handle_mprotect(vma, newflags);
70392+#endif
70393+
70394 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
70395- vm_get_page_prot(newflags));
70396+ vm_get_page_prot(vma->vm_flags));
70397
70398 if (vma_wants_writenotify(vma)) {
70399 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
70400@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70401 end = start + len;
70402 if (end <= start)
70403 return -ENOMEM;
70404+
70405+#ifdef CONFIG_PAX_SEGMEXEC
70406+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70407+ if (end > SEGMEXEC_TASK_SIZE)
70408+ return -EINVAL;
70409+ } else
70410+#endif
70411+
70412+ if (end > TASK_SIZE)
70413+ return -EINVAL;
70414+
70415 if (!arch_validate_prot(prot))
70416 return -EINVAL;
70417
70418@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70419 /*
70420 * Does the application expect PROT_READ to imply PROT_EXEC:
70421 */
70422- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
70423+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
70424 prot |= PROT_EXEC;
70425
70426 vm_flags = calc_vm_prot_bits(prot);
70427@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70428 if (start > vma->vm_start)
70429 prev = vma;
70430
70431+#ifdef CONFIG_PAX_MPROTECT
70432+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
70433+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
70434+#endif
70435+
70436 for (nstart = start ; ; ) {
70437 unsigned long newflags;
70438
70439@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70440
70441 /* newflags >> 4 shift VM_MAY% in place of VM_% */
70442 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
70443+ if (prot & (PROT_WRITE | PROT_EXEC))
70444+ gr_log_rwxmprotect(vma->vm_file);
70445+
70446+ error = -EACCES;
70447+ goto out;
70448+ }
70449+
70450+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
70451 error = -EACCES;
70452 goto out;
70453 }
70454@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70455 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
70456 if (error)
70457 goto out;
70458+
70459+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
70460+
70461 nstart = tmp;
70462
70463 if (nstart < prev->vm_end)
70464diff --git a/mm/mremap.c b/mm/mremap.c
70465index d6959cb..18a402a 100644
70466--- a/mm/mremap.c
70467+++ b/mm/mremap.c
70468@@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
70469 continue;
70470 pte = ptep_get_and_clear(mm, old_addr, old_pte);
70471 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
70472+
70473+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70474+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
70475+ pte = pte_exprotect(pte);
70476+#endif
70477+
70478 set_pte_at(mm, new_addr, new_pte, pte);
70479 }
70480
70481@@ -290,6 +296,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
70482 if (is_vm_hugetlb_page(vma))
70483 goto Einval;
70484
70485+#ifdef CONFIG_PAX_SEGMEXEC
70486+ if (pax_find_mirror_vma(vma))
70487+ goto Einval;
70488+#endif
70489+
70490 /* We can't remap across vm area boundaries */
70491 if (old_len > vma->vm_end - addr)
70492 goto Efault;
70493@@ -346,20 +357,25 @@ static unsigned long mremap_to(unsigned long addr,
70494 unsigned long ret = -EINVAL;
70495 unsigned long charged = 0;
70496 unsigned long map_flags;
70497+ unsigned long pax_task_size = TASK_SIZE;
70498
70499 if (new_addr & ~PAGE_MASK)
70500 goto out;
70501
70502- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
70503+#ifdef CONFIG_PAX_SEGMEXEC
70504+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
70505+ pax_task_size = SEGMEXEC_TASK_SIZE;
70506+#endif
70507+
70508+ pax_task_size -= PAGE_SIZE;
70509+
70510+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
70511 goto out;
70512
70513 /* Check if the location we're moving into overlaps the
70514 * old location at all, and fail if it does.
70515 */
70516- if ((new_addr <= addr) && (new_addr+new_len) > addr)
70517- goto out;
70518-
70519- if ((addr <= new_addr) && (addr+old_len) > new_addr)
70520+ if (addr + old_len > new_addr && new_addr + new_len > addr)
70521 goto out;
70522
70523 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70524@@ -431,6 +447,7 @@ unsigned long do_mremap(unsigned long addr,
70525 struct vm_area_struct *vma;
70526 unsigned long ret = -EINVAL;
70527 unsigned long charged = 0;
70528+ unsigned long pax_task_size = TASK_SIZE;
70529
70530 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
70531 goto out;
70532@@ -449,6 +466,17 @@ unsigned long do_mremap(unsigned long addr,
70533 if (!new_len)
70534 goto out;
70535
70536+#ifdef CONFIG_PAX_SEGMEXEC
70537+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
70538+ pax_task_size = SEGMEXEC_TASK_SIZE;
70539+#endif
70540+
70541+ pax_task_size -= PAGE_SIZE;
70542+
70543+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
70544+ old_len > pax_task_size || addr > pax_task_size-old_len)
70545+ goto out;
70546+
70547 if (flags & MREMAP_FIXED) {
70548 if (flags & MREMAP_MAYMOVE)
70549 ret = mremap_to(addr, old_len, new_addr, new_len);
70550@@ -498,6 +526,7 @@ unsigned long do_mremap(unsigned long addr,
70551 addr + new_len);
70552 }
70553 ret = addr;
70554+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
70555 goto out;
70556 }
70557 }
70558@@ -524,7 +553,13 @@ unsigned long do_mremap(unsigned long addr,
70559 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70560 if (ret)
70561 goto out;
70562+
70563+ map_flags = vma->vm_flags;
70564 ret = move_vma(vma, addr, old_len, new_len, new_addr);
70565+ if (!(ret & ~PAGE_MASK)) {
70566+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
70567+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
70568+ }
70569 }
70570 out:
70571 if (ret & ~PAGE_MASK)
70572diff --git a/mm/nobootmem.c b/mm/nobootmem.c
70573index 7fa41b4..6087460 100644
70574--- a/mm/nobootmem.c
70575+++ b/mm/nobootmem.c
70576@@ -110,19 +110,30 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
70577 unsigned long __init free_all_memory_core_early(int nodeid)
70578 {
70579 int i;
70580- u64 start, end;
70581+ u64 start, end, startrange, endrange;
70582 unsigned long count = 0;
70583- struct range *range = NULL;
70584+ struct range *range = NULL, rangerange = { 0, 0 };
70585 int nr_range;
70586
70587 nr_range = get_free_all_memory_range(&range, nodeid);
70588+ startrange = __pa(range) >> PAGE_SHIFT;
70589+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
70590
70591 for (i = 0; i < nr_range; i++) {
70592 start = range[i].start;
70593 end = range[i].end;
70594+ if (start <= endrange && startrange < end) {
70595+ BUG_ON(rangerange.start | rangerange.end);
70596+ rangerange = range[i];
70597+ continue;
70598+ }
70599 count += end - start;
70600 __free_pages_memory(start, end);
70601 }
70602+ start = rangerange.start;
70603+ end = rangerange.end;
70604+ count += end - start;
70605+ __free_pages_memory(start, end);
70606
70607 return count;
70608 }
70609diff --git a/mm/nommu.c b/mm/nommu.c
70610index ee7e57e..cae4e40 100644
70611--- a/mm/nommu.c
70612+++ b/mm/nommu.c
70613@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
70614 int sysctl_overcommit_ratio = 50; /* default is 50% */
70615 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
70616 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
70617-int heap_stack_gap = 0;
70618
70619 atomic_long_t mmap_pages_allocated;
70620
70621@@ -829,15 +828,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
70622 EXPORT_SYMBOL(find_vma);
70623
70624 /*
70625- * find a VMA
70626- * - we don't extend stack VMAs under NOMMU conditions
70627- */
70628-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
70629-{
70630- return find_vma(mm, addr);
70631-}
70632-
70633-/*
70634 * expand a stack to a given address
70635 * - not supported under NOMMU conditions
70636 */
70637@@ -1557,6 +1547,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
70638
70639 /* most fields are the same, copy all, and then fixup */
70640 *new = *vma;
70641+ INIT_LIST_HEAD(&new->anon_vma_chain);
70642 *region = *vma->vm_region;
70643 new->vm_region = region;
70644
70645diff --git a/mm/page_alloc.c b/mm/page_alloc.c
70646index 485be89..c059ad3 100644
70647--- a/mm/page_alloc.c
70648+++ b/mm/page_alloc.c
70649@@ -341,7 +341,7 @@ out:
70650 * This usage means that zero-order pages may not be compound.
70651 */
70652
70653-static void free_compound_page(struct page *page)
70654+void free_compound_page(struct page *page)
70655 {
70656 __free_pages_ok(page, compound_order(page));
70657 }
70658@@ -654,6 +654,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
70659 int i;
70660 int bad = 0;
70661
70662+#ifdef CONFIG_PAX_MEMORY_SANITIZE
70663+ unsigned long index = 1UL << order;
70664+#endif
70665+
70666 trace_mm_page_free_direct(page, order);
70667 kmemcheck_free_shadow(page, order);
70668
70669@@ -669,6 +673,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
70670 debug_check_no_obj_freed(page_address(page),
70671 PAGE_SIZE << order);
70672 }
70673+
70674+#ifdef CONFIG_PAX_MEMORY_SANITIZE
70675+ for (; index; --index)
70676+ sanitize_highpage(page + index - 1);
70677+#endif
70678+
70679 arch_free_page(page, order);
70680 kernel_map_pages(page, 1 << order, 0);
70681
70682@@ -784,8 +794,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
70683 arch_alloc_page(page, order);
70684 kernel_map_pages(page, 1 << order, 1);
70685
70686+#ifndef CONFIG_PAX_MEMORY_SANITIZE
70687 if (gfp_flags & __GFP_ZERO)
70688 prep_zero_page(page, order, gfp_flags);
70689+#endif
70690
70691 if (order && (gfp_flags & __GFP_COMP))
70692 prep_compound_page(page, order);
70693@@ -3357,7 +3369,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
70694 unsigned long pfn;
70695
70696 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
70697+#ifdef CONFIG_X86_32
70698+ /* boot failures in VMware 8 on 32bit vanilla since
70699+ this change */
70700+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
70701+#else
70702 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
70703+#endif
70704 return 1;
70705 }
70706 return 0;
70707diff --git a/mm/percpu.c b/mm/percpu.c
70708index 716eb4a..8d10419 100644
70709--- a/mm/percpu.c
70710+++ b/mm/percpu.c
70711@@ -121,7 +121,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
70712 static unsigned int pcpu_high_unit_cpu __read_mostly;
70713
70714 /* the address of the first chunk which starts with the kernel static area */
70715-void *pcpu_base_addr __read_mostly;
70716+void *pcpu_base_addr __read_only;
70717 EXPORT_SYMBOL_GPL(pcpu_base_addr);
70718
70719 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
70720diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
70721index e920aa3..137702a 100644
70722--- a/mm/process_vm_access.c
70723+++ b/mm/process_vm_access.c
70724@@ -13,6 +13,7 @@
70725 #include <linux/uio.h>
70726 #include <linux/sched.h>
70727 #include <linux/highmem.h>
70728+#include <linux/security.h>
70729 #include <linux/ptrace.h>
70730 #include <linux/slab.h>
70731 #include <linux/syscalls.h>
70732@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
70733 size_t iov_l_curr_offset = 0;
70734 ssize_t iov_len;
70735
70736+ return -ENOSYS; // PaX: until properly audited
70737+
70738 /*
70739 * Work out how many pages of struct pages we're going to need
70740 * when eventually calling get_user_pages
70741 */
70742 for (i = 0; i < riovcnt; i++) {
70743 iov_len = rvec[i].iov_len;
70744- if (iov_len > 0) {
70745- nr_pages_iov = ((unsigned long)rvec[i].iov_base
70746- + iov_len)
70747- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
70748- / PAGE_SIZE + 1;
70749- nr_pages = max(nr_pages, nr_pages_iov);
70750- }
70751+ if (iov_len <= 0)
70752+ continue;
70753+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
70754+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
70755+ nr_pages = max(nr_pages, nr_pages_iov);
70756 }
70757
70758 if (nr_pages == 0)
70759@@ -298,23 +299,23 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
70760 goto free_proc_pages;
70761 }
70762
70763- task_lock(task);
70764- if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
70765- task_unlock(task);
70766+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
70767 rc = -EPERM;
70768 goto put_task_struct;
70769 }
70770- mm = task->mm;
70771
70772- if (!mm || (task->flags & PF_KTHREAD)) {
70773- task_unlock(task);
70774- rc = -EINVAL;
70775+ mm = mm_access(task, PTRACE_MODE_ATTACH);
70776+ if (!mm || IS_ERR(mm)) {
70777+ rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
70778+ /*
70779+ * Explicitly map EACCES to EPERM as EPERM is a more a
70780+ * appropriate error code for process_vw_readv/writev
70781+ */
70782+ if (rc == -EACCES)
70783+ rc = -EPERM;
70784 goto put_task_struct;
70785 }
70786
70787- atomic_inc(&mm->mm_users);
70788- task_unlock(task);
70789-
70790 for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
70791 rc = process_vm_rw_single_vec(
70792 (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
70793diff --git a/mm/rmap.c b/mm/rmap.c
70794index a4fd368..e0ffec7 100644
70795--- a/mm/rmap.c
70796+++ b/mm/rmap.c
70797@@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70798 struct anon_vma *anon_vma = vma->anon_vma;
70799 struct anon_vma_chain *avc;
70800
70801+#ifdef CONFIG_PAX_SEGMEXEC
70802+ struct anon_vma_chain *avc_m = NULL;
70803+#endif
70804+
70805 might_sleep();
70806 if (unlikely(!anon_vma)) {
70807 struct mm_struct *mm = vma->vm_mm;
70808@@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70809 if (!avc)
70810 goto out_enomem;
70811
70812+#ifdef CONFIG_PAX_SEGMEXEC
70813+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
70814+ if (!avc_m)
70815+ goto out_enomem_free_avc;
70816+#endif
70817+
70818 anon_vma = find_mergeable_anon_vma(vma);
70819 allocated = NULL;
70820 if (!anon_vma) {
70821@@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70822 /* page_table_lock to protect against threads */
70823 spin_lock(&mm->page_table_lock);
70824 if (likely(!vma->anon_vma)) {
70825+
70826+#ifdef CONFIG_PAX_SEGMEXEC
70827+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
70828+
70829+ if (vma_m) {
70830+ BUG_ON(vma_m->anon_vma);
70831+ vma_m->anon_vma = anon_vma;
70832+ avc_m->anon_vma = anon_vma;
70833+ avc_m->vma = vma;
70834+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
70835+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
70836+ avc_m = NULL;
70837+ }
70838+#endif
70839+
70840 vma->anon_vma = anon_vma;
70841 avc->anon_vma = anon_vma;
70842 avc->vma = vma;
70843@@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70844
70845 if (unlikely(allocated))
70846 put_anon_vma(allocated);
70847+
70848+#ifdef CONFIG_PAX_SEGMEXEC
70849+ if (unlikely(avc_m))
70850+ anon_vma_chain_free(avc_m);
70851+#endif
70852+
70853 if (unlikely(avc))
70854 anon_vma_chain_free(avc);
70855 }
70856 return 0;
70857
70858 out_enomem_free_avc:
70859+
70860+#ifdef CONFIG_PAX_SEGMEXEC
70861+ if (avc_m)
70862+ anon_vma_chain_free(avc_m);
70863+#endif
70864+
70865 anon_vma_chain_free(avc);
70866 out_enomem:
70867 return -ENOMEM;
70868@@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
70869 * Attach the anon_vmas from src to dst.
70870 * Returns 0 on success, -ENOMEM on failure.
70871 */
70872-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70873+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
70874 {
70875 struct anon_vma_chain *avc, *pavc;
70876 struct anon_vma *root = NULL;
70877@@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70878 * the corresponding VMA in the parent process is attached to.
70879 * Returns 0 on success, non-zero on failure.
70880 */
70881-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
70882+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
70883 {
70884 struct anon_vma_chain *avc;
70885 struct anon_vma *anon_vma;
70886diff --git a/mm/shmem.c b/mm/shmem.c
70887index 6c253f7..367e20a 100644
70888--- a/mm/shmem.c
70889+++ b/mm/shmem.c
70890@@ -31,7 +31,7 @@
70891 #include <linux/export.h>
70892 #include <linux/swap.h>
70893
70894-static struct vfsmount *shm_mnt;
70895+struct vfsmount *shm_mnt;
70896
70897 #ifdef CONFIG_SHMEM
70898 /*
70899@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
70900 #define BOGO_DIRENT_SIZE 20
70901
70902 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
70903-#define SHORT_SYMLINK_LEN 128
70904+#define SHORT_SYMLINK_LEN 64
70905
70906 struct shmem_xattr {
70907 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
70908@@ -2180,8 +2180,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
70909 int err = -ENOMEM;
70910
70911 /* Round up to L1_CACHE_BYTES to resist false sharing */
70912- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
70913- L1_CACHE_BYTES), GFP_KERNEL);
70914+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
70915 if (!sbinfo)
70916 return -ENOMEM;
70917
70918diff --git a/mm/slab.c b/mm/slab.c
70919index 83311c9a..fcf8f86 100644
70920--- a/mm/slab.c
70921+++ b/mm/slab.c
70922@@ -151,7 +151,7 @@
70923
70924 /* Legal flag mask for kmem_cache_create(). */
70925 #if DEBUG
70926-# define CREATE_MASK (SLAB_RED_ZONE | \
70927+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
70928 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
70929 SLAB_CACHE_DMA | \
70930 SLAB_STORE_USER | \
70931@@ -159,7 +159,7 @@
70932 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70933 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
70934 #else
70935-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
70936+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
70937 SLAB_CACHE_DMA | \
70938 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
70939 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70940@@ -288,7 +288,7 @@ struct kmem_list3 {
70941 * Need this for bootstrapping a per node allocator.
70942 */
70943 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
70944-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
70945+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
70946 #define CACHE_CACHE 0
70947 #define SIZE_AC MAX_NUMNODES
70948 #define SIZE_L3 (2 * MAX_NUMNODES)
70949@@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
70950 if ((x)->max_freeable < i) \
70951 (x)->max_freeable = i; \
70952 } while (0)
70953-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
70954-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
70955-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
70956-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
70957+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
70958+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
70959+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
70960+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
70961 #else
70962 #define STATS_INC_ACTIVE(x) do { } while (0)
70963 #define STATS_DEC_ACTIVE(x) do { } while (0)
70964@@ -538,7 +538,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
70965 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
70966 */
70967 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
70968- const struct slab *slab, void *obj)
70969+ const struct slab *slab, const void *obj)
70970 {
70971 u32 offset = (obj - slab->s_mem);
70972 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
70973@@ -564,7 +564,7 @@ struct cache_names {
70974 static struct cache_names __initdata cache_names[] = {
70975 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
70976 #include <linux/kmalloc_sizes.h>
70977- {NULL,}
70978+ {NULL}
70979 #undef CACHE
70980 };
70981
70982@@ -1572,7 +1572,7 @@ void __init kmem_cache_init(void)
70983 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
70984 sizes[INDEX_AC].cs_size,
70985 ARCH_KMALLOC_MINALIGN,
70986- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70987+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70988 NULL);
70989
70990 if (INDEX_AC != INDEX_L3) {
70991@@ -1580,7 +1580,7 @@ void __init kmem_cache_init(void)
70992 kmem_cache_create(names[INDEX_L3].name,
70993 sizes[INDEX_L3].cs_size,
70994 ARCH_KMALLOC_MINALIGN,
70995- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70996+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70997 NULL);
70998 }
70999
71000@@ -1598,7 +1598,7 @@ void __init kmem_cache_init(void)
71001 sizes->cs_cachep = kmem_cache_create(names->name,
71002 sizes->cs_size,
71003 ARCH_KMALLOC_MINALIGN,
71004- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
71005+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
71006 NULL);
71007 }
71008 #ifdef CONFIG_ZONE_DMA
71009@@ -4322,10 +4322,10 @@ static int s_show(struct seq_file *m, void *p)
71010 }
71011 /* cpu stats */
71012 {
71013- unsigned long allochit = atomic_read(&cachep->allochit);
71014- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
71015- unsigned long freehit = atomic_read(&cachep->freehit);
71016- unsigned long freemiss = atomic_read(&cachep->freemiss);
71017+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
71018+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
71019+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
71020+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
71021
71022 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
71023 allochit, allocmiss, freehit, freemiss);
71024@@ -4584,13 +4584,62 @@ static int __init slab_proc_init(void)
71025 {
71026 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
71027 #ifdef CONFIG_DEBUG_SLAB_LEAK
71028- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
71029+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
71030 #endif
71031 return 0;
71032 }
71033 module_init(slab_proc_init);
71034 #endif
71035
71036+void check_object_size(const void *ptr, unsigned long n, bool to)
71037+{
71038+
71039+#ifdef CONFIG_PAX_USERCOPY
71040+ struct page *page;
71041+ struct kmem_cache *cachep = NULL;
71042+ struct slab *slabp;
71043+ unsigned int objnr;
71044+ unsigned long offset;
71045+ const char *type;
71046+
71047+ if (!n)
71048+ return;
71049+
71050+ type = "<null>";
71051+ if (ZERO_OR_NULL_PTR(ptr))
71052+ goto report;
71053+
71054+ if (!virt_addr_valid(ptr))
71055+ return;
71056+
71057+ page = virt_to_head_page(ptr);
71058+
71059+ type = "<process stack>";
71060+ if (!PageSlab(page)) {
71061+ if (object_is_on_stack(ptr, n) == -1)
71062+ goto report;
71063+ return;
71064+ }
71065+
71066+ cachep = page_get_cache(page);
71067+ type = cachep->name;
71068+ if (!(cachep->flags & SLAB_USERCOPY))
71069+ goto report;
71070+
71071+ slabp = page_get_slab(page);
71072+ objnr = obj_to_index(cachep, slabp, ptr);
71073+ BUG_ON(objnr >= cachep->num);
71074+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
71075+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
71076+ return;
71077+
71078+report:
71079+ pax_report_usercopy(ptr, n, to, type);
71080+#endif
71081+
71082+}
71083+EXPORT_SYMBOL(check_object_size);
71084+
71085 /**
71086 * ksize - get the actual amount of memory allocated for a given object
71087 * @objp: Pointer to the object
71088diff --git a/mm/slob.c b/mm/slob.c
71089index 8105be4..e045f96 100644
71090--- a/mm/slob.c
71091+++ b/mm/slob.c
71092@@ -29,7 +29,7 @@
71093 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
71094 * alloc_pages() directly, allocating compound pages so the page order
71095 * does not have to be separately tracked, and also stores the exact
71096- * allocation size in page->private so that it can be used to accurately
71097+ * allocation size in slob_page->size so that it can be used to accurately
71098 * provide ksize(). These objects are detected in kfree() because slob_page()
71099 * is false for them.
71100 *
71101@@ -58,6 +58,7 @@
71102 */
71103
71104 #include <linux/kernel.h>
71105+#include <linux/sched.h>
71106 #include <linux/slab.h>
71107 #include <linux/mm.h>
71108 #include <linux/swap.h> /* struct reclaim_state */
71109@@ -102,7 +103,8 @@ struct slob_page {
71110 unsigned long flags; /* mandatory */
71111 atomic_t _count; /* mandatory */
71112 slobidx_t units; /* free units left in page */
71113- unsigned long pad[2];
71114+ unsigned long pad[1];
71115+ unsigned long size; /* size when >=PAGE_SIZE */
71116 slob_t *free; /* first free slob_t in page */
71117 struct list_head list; /* linked list of free pages */
71118 };
71119@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
71120 */
71121 static inline int is_slob_page(struct slob_page *sp)
71122 {
71123- return PageSlab((struct page *)sp);
71124+ return PageSlab((struct page *)sp) && !sp->size;
71125 }
71126
71127 static inline void set_slob_page(struct slob_page *sp)
71128@@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
71129
71130 static inline struct slob_page *slob_page(const void *addr)
71131 {
71132- return (struct slob_page *)virt_to_page(addr);
71133+ return (struct slob_page *)virt_to_head_page(addr);
71134 }
71135
71136 /*
71137@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
71138 /*
71139 * Return the size of a slob block.
71140 */
71141-static slobidx_t slob_units(slob_t *s)
71142+static slobidx_t slob_units(const slob_t *s)
71143 {
71144 if (s->units > 0)
71145 return s->units;
71146@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
71147 /*
71148 * Return the next free slob block pointer after this one.
71149 */
71150-static slob_t *slob_next(slob_t *s)
71151+static slob_t *slob_next(const slob_t *s)
71152 {
71153 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
71154 slobidx_t next;
71155@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
71156 /*
71157 * Returns true if s is the last free block in its page.
71158 */
71159-static int slob_last(slob_t *s)
71160+static int slob_last(const slob_t *s)
71161 {
71162 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
71163 }
71164@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
71165 if (!page)
71166 return NULL;
71167
71168+ set_slob_page(page);
71169 return page_address(page);
71170 }
71171
71172@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
71173 if (!b)
71174 return NULL;
71175 sp = slob_page(b);
71176- set_slob_page(sp);
71177
71178 spin_lock_irqsave(&slob_lock, flags);
71179 sp->units = SLOB_UNITS(PAGE_SIZE);
71180 sp->free = b;
71181+ sp->size = 0;
71182 INIT_LIST_HEAD(&sp->list);
71183 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
71184 set_slob_page_free(sp, slob_list);
71185@@ -476,10 +479,9 @@ out:
71186 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
71187 */
71188
71189-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
71190+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
71191 {
71192- unsigned int *m;
71193- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71194+ slob_t *m;
71195 void *ret;
71196
71197 gfp &= gfp_allowed_mask;
71198@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
71199
71200 if (!m)
71201 return NULL;
71202- *m = size;
71203+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
71204+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
71205+ m[0].units = size;
71206+ m[1].units = align;
71207 ret = (void *)m + align;
71208
71209 trace_kmalloc_node(_RET_IP_, ret,
71210@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
71211 gfp |= __GFP_COMP;
71212 ret = slob_new_pages(gfp, order, node);
71213 if (ret) {
71214- struct page *page;
71215- page = virt_to_page(ret);
71216- page->private = size;
71217+ struct slob_page *sp;
71218+ sp = slob_page(ret);
71219+ sp->size = size;
71220 }
71221
71222 trace_kmalloc_node(_RET_IP_, ret,
71223 size, PAGE_SIZE << order, gfp, node);
71224 }
71225
71226- kmemleak_alloc(ret, size, 1, gfp);
71227+ return ret;
71228+}
71229+
71230+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
71231+{
71232+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71233+ void *ret = __kmalloc_node_align(size, gfp, node, align);
71234+
71235+ if (!ZERO_OR_NULL_PTR(ret))
71236+ kmemleak_alloc(ret, size, 1, gfp);
71237 return ret;
71238 }
71239 EXPORT_SYMBOL(__kmalloc_node);
71240@@ -533,13 +547,92 @@ void kfree(const void *block)
71241 sp = slob_page(block);
71242 if (is_slob_page(sp)) {
71243 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71244- unsigned int *m = (unsigned int *)(block - align);
71245- slob_free(m, *m + align);
71246- } else
71247+ slob_t *m = (slob_t *)(block - align);
71248+ slob_free(m, m[0].units + align);
71249+ } else {
71250+ clear_slob_page(sp);
71251+ free_slob_page(sp);
71252+ sp->size = 0;
71253 put_page(&sp->page);
71254+ }
71255 }
71256 EXPORT_SYMBOL(kfree);
71257
71258+void check_object_size(const void *ptr, unsigned long n, bool to)
71259+{
71260+
71261+#ifdef CONFIG_PAX_USERCOPY
71262+ struct slob_page *sp;
71263+ const slob_t *free;
71264+ const void *base;
71265+ unsigned long flags;
71266+ const char *type;
71267+
71268+ if (!n)
71269+ return;
71270+
71271+ type = "<null>";
71272+ if (ZERO_OR_NULL_PTR(ptr))
71273+ goto report;
71274+
71275+ if (!virt_addr_valid(ptr))
71276+ return;
71277+
71278+ type = "<process stack>";
71279+ sp = slob_page(ptr);
71280+ if (!PageSlab((struct page *)sp)) {
71281+ if (object_is_on_stack(ptr, n) == -1)
71282+ goto report;
71283+ return;
71284+ }
71285+
71286+ type = "<slob>";
71287+ if (sp->size) {
71288+ base = page_address(&sp->page);
71289+ if (base <= ptr && n <= sp->size - (ptr - base))
71290+ return;
71291+ goto report;
71292+ }
71293+
71294+ /* some tricky double walking to find the chunk */
71295+ spin_lock_irqsave(&slob_lock, flags);
71296+ base = (void *)((unsigned long)ptr & PAGE_MASK);
71297+ free = sp->free;
71298+
71299+ while (!slob_last(free) && (void *)free <= ptr) {
71300+ base = free + slob_units(free);
71301+ free = slob_next(free);
71302+ }
71303+
71304+ while (base < (void *)free) {
71305+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
71306+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
71307+ int offset;
71308+
71309+ if (ptr < base + align)
71310+ break;
71311+
71312+ offset = ptr - base - align;
71313+ if (offset >= m) {
71314+ base += size;
71315+ continue;
71316+ }
71317+
71318+ if (n > m - offset)
71319+ break;
71320+
71321+ spin_unlock_irqrestore(&slob_lock, flags);
71322+ return;
71323+ }
71324+
71325+ spin_unlock_irqrestore(&slob_lock, flags);
71326+report:
71327+ pax_report_usercopy(ptr, n, to, type);
71328+#endif
71329+
71330+}
71331+EXPORT_SYMBOL(check_object_size);
71332+
71333 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
71334 size_t ksize(const void *block)
71335 {
71336@@ -552,10 +645,10 @@ size_t ksize(const void *block)
71337 sp = slob_page(block);
71338 if (is_slob_page(sp)) {
71339 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71340- unsigned int *m = (unsigned int *)(block - align);
71341- return SLOB_UNITS(*m) * SLOB_UNIT;
71342+ slob_t *m = (slob_t *)(block - align);
71343+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
71344 } else
71345- return sp->page.private;
71346+ return sp->size;
71347 }
71348 EXPORT_SYMBOL(ksize);
71349
71350@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71351 {
71352 struct kmem_cache *c;
71353
71354+#ifdef CONFIG_PAX_USERCOPY
71355+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
71356+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
71357+#else
71358 c = slob_alloc(sizeof(struct kmem_cache),
71359 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
71360+#endif
71361
71362 if (c) {
71363 c->name = name;
71364@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
71365
71366 lockdep_trace_alloc(flags);
71367
71368+#ifdef CONFIG_PAX_USERCOPY
71369+ b = __kmalloc_node_align(c->size, flags, node, c->align);
71370+#else
71371 if (c->size < PAGE_SIZE) {
71372 b = slob_alloc(c->size, flags, c->align, node);
71373 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
71374 SLOB_UNITS(c->size) * SLOB_UNIT,
71375 flags, node);
71376 } else {
71377+ struct slob_page *sp;
71378+
71379 b = slob_new_pages(flags, get_order(c->size), node);
71380+ sp = slob_page(b);
71381+ sp->size = c->size;
71382 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
71383 PAGE_SIZE << get_order(c->size),
71384 flags, node);
71385 }
71386+#endif
71387
71388 if (c->ctor)
71389 c->ctor(b);
71390@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
71391
71392 static void __kmem_cache_free(void *b, int size)
71393 {
71394- if (size < PAGE_SIZE)
71395+ struct slob_page *sp = slob_page(b);
71396+
71397+ if (is_slob_page(sp))
71398 slob_free(b, size);
71399- else
71400+ else {
71401+ clear_slob_page(sp);
71402+ free_slob_page(sp);
71403+ sp->size = 0;
71404 slob_free_pages(b, get_order(size));
71405+ }
71406 }
71407
71408 static void kmem_rcu_free(struct rcu_head *head)
71409@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
71410
71411 void kmem_cache_free(struct kmem_cache *c, void *b)
71412 {
71413+ int size = c->size;
71414+
71415+#ifdef CONFIG_PAX_USERCOPY
71416+ if (size + c->align < PAGE_SIZE) {
71417+ size += c->align;
71418+ b -= c->align;
71419+ }
71420+#endif
71421+
71422 kmemleak_free_recursive(b, c->flags);
71423 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
71424 struct slob_rcu *slob_rcu;
71425- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
71426- slob_rcu->size = c->size;
71427+ slob_rcu = b + (size - sizeof(struct slob_rcu));
71428+ slob_rcu->size = size;
71429 call_rcu(&slob_rcu->head, kmem_rcu_free);
71430 } else {
71431- __kmem_cache_free(b, c->size);
71432+ __kmem_cache_free(b, size);
71433 }
71434
71435+#ifdef CONFIG_PAX_USERCOPY
71436+ trace_kfree(_RET_IP_, b);
71437+#else
71438 trace_kmem_cache_free(_RET_IP_, b);
71439+#endif
71440+
71441 }
71442 EXPORT_SYMBOL(kmem_cache_free);
71443
71444diff --git a/mm/slub.c b/mm/slub.c
71445index 1a919f0..1739c9b 100644
71446--- a/mm/slub.c
71447+++ b/mm/slub.c
71448@@ -208,7 +208,7 @@ struct track {
71449
71450 enum track_item { TRACK_ALLOC, TRACK_FREE };
71451
71452-#ifdef CONFIG_SYSFS
71453+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71454 static int sysfs_slab_add(struct kmem_cache *);
71455 static int sysfs_slab_alias(struct kmem_cache *, const char *);
71456 static void sysfs_slab_remove(struct kmem_cache *);
71457@@ -530,7 +530,7 @@ static void print_track(const char *s, struct track *t)
71458 if (!t->addr)
71459 return;
71460
71461- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
71462+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
71463 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
71464 #ifdef CONFIG_STACKTRACE
71465 {
71466@@ -2559,6 +2559,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
71467
71468 page = virt_to_head_page(x);
71469
71470+ BUG_ON(!PageSlab(page));
71471+
71472 slab_free(s, page, x, _RET_IP_);
71473
71474 trace_kmem_cache_free(_RET_IP_, x);
71475@@ -2592,7 +2594,7 @@ static int slub_min_objects;
71476 * Merge control. If this is set then no merging of slab caches will occur.
71477 * (Could be removed. This was introduced to pacify the merge skeptics.)
71478 */
71479-static int slub_nomerge;
71480+static int slub_nomerge = 1;
71481
71482 /*
71483 * Calculate the order of allocation given an slab object size.
71484@@ -3042,7 +3044,7 @@ static int kmem_cache_open(struct kmem_cache *s,
71485 else
71486 s->cpu_partial = 30;
71487
71488- s->refcount = 1;
71489+ atomic_set(&s->refcount, 1);
71490 #ifdef CONFIG_NUMA
71491 s->remote_node_defrag_ratio = 1000;
71492 #endif
71493@@ -3146,8 +3148,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
71494 void kmem_cache_destroy(struct kmem_cache *s)
71495 {
71496 down_write(&slub_lock);
71497- s->refcount--;
71498- if (!s->refcount) {
71499+ if (atomic_dec_and_test(&s->refcount)) {
71500 list_del(&s->list);
71501 up_write(&slub_lock);
71502 if (kmem_cache_close(s)) {
71503@@ -3358,6 +3359,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
71504 EXPORT_SYMBOL(__kmalloc_node);
71505 #endif
71506
71507+void check_object_size(const void *ptr, unsigned long n, bool to)
71508+{
71509+
71510+#ifdef CONFIG_PAX_USERCOPY
71511+ struct page *page;
71512+ struct kmem_cache *s = NULL;
71513+ unsigned long offset;
71514+ const char *type;
71515+
71516+ if (!n)
71517+ return;
71518+
71519+ type = "<null>";
71520+ if (ZERO_OR_NULL_PTR(ptr))
71521+ goto report;
71522+
71523+ if (!virt_addr_valid(ptr))
71524+ return;
71525+
71526+ page = virt_to_head_page(ptr);
71527+
71528+ type = "<process stack>";
71529+ if (!PageSlab(page)) {
71530+ if (object_is_on_stack(ptr, n) == -1)
71531+ goto report;
71532+ return;
71533+ }
71534+
71535+ s = page->slab;
71536+ type = s->name;
71537+ if (!(s->flags & SLAB_USERCOPY))
71538+ goto report;
71539+
71540+ offset = (ptr - page_address(page)) % s->size;
71541+ if (offset <= s->objsize && n <= s->objsize - offset)
71542+ return;
71543+
71544+report:
71545+ pax_report_usercopy(ptr, n, to, type);
71546+#endif
71547+
71548+}
71549+EXPORT_SYMBOL(check_object_size);
71550+
71551 size_t ksize(const void *object)
71552 {
71553 struct page *page;
71554@@ -3632,7 +3677,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
71555 int node;
71556
71557 list_add(&s->list, &slab_caches);
71558- s->refcount = -1;
71559+ atomic_set(&s->refcount, -1);
71560
71561 for_each_node_state(node, N_NORMAL_MEMORY) {
71562 struct kmem_cache_node *n = get_node(s, node);
71563@@ -3749,17 +3794,17 @@ void __init kmem_cache_init(void)
71564
71565 /* Caches that are not of the two-to-the-power-of size */
71566 if (KMALLOC_MIN_SIZE <= 32) {
71567- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
71568+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
71569 caches++;
71570 }
71571
71572 if (KMALLOC_MIN_SIZE <= 64) {
71573- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
71574+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
71575 caches++;
71576 }
71577
71578 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
71579- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
71580+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
71581 caches++;
71582 }
71583
71584@@ -3827,7 +3872,7 @@ static int slab_unmergeable(struct kmem_cache *s)
71585 /*
71586 * We may have set a slab to be unmergeable during bootstrap.
71587 */
71588- if (s->refcount < 0)
71589+ if (atomic_read(&s->refcount) < 0)
71590 return 1;
71591
71592 return 0;
71593@@ -3886,7 +3931,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71594 down_write(&slub_lock);
71595 s = find_mergeable(size, align, flags, name, ctor);
71596 if (s) {
71597- s->refcount++;
71598+ atomic_inc(&s->refcount);
71599 /*
71600 * Adjust the object sizes so that we clear
71601 * the complete object on kzalloc.
71602@@ -3895,7 +3940,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71603 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
71604
71605 if (sysfs_slab_alias(s, name)) {
71606- s->refcount--;
71607+ atomic_dec(&s->refcount);
71608 goto err;
71609 }
71610 up_write(&slub_lock);
71611@@ -4023,7 +4068,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
71612 }
71613 #endif
71614
71615-#ifdef CONFIG_SYSFS
71616+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71617 static int count_inuse(struct page *page)
71618 {
71619 return page->inuse;
71620@@ -4410,12 +4455,12 @@ static void resiliency_test(void)
71621 validate_slab_cache(kmalloc_caches[9]);
71622 }
71623 #else
71624-#ifdef CONFIG_SYSFS
71625+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71626 static void resiliency_test(void) {};
71627 #endif
71628 #endif
71629
71630-#ifdef CONFIG_SYSFS
71631+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71632 enum slab_stat_type {
71633 SL_ALL, /* All slabs */
71634 SL_PARTIAL, /* Only partially allocated slabs */
71635@@ -4656,7 +4701,7 @@ SLAB_ATTR_RO(ctor);
71636
71637 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
71638 {
71639- return sprintf(buf, "%d\n", s->refcount - 1);
71640+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
71641 }
71642 SLAB_ATTR_RO(aliases);
71643
71644@@ -5223,6 +5268,7 @@ static char *create_unique_id(struct kmem_cache *s)
71645 return name;
71646 }
71647
71648+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71649 static int sysfs_slab_add(struct kmem_cache *s)
71650 {
71651 int err;
71652@@ -5285,6 +5331,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
71653 kobject_del(&s->kobj);
71654 kobject_put(&s->kobj);
71655 }
71656+#endif
71657
71658 /*
71659 * Need to buffer aliases during bootup until sysfs becomes
71660@@ -5298,6 +5345,7 @@ struct saved_alias {
71661
71662 static struct saved_alias *alias_list;
71663
71664+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71665 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71666 {
71667 struct saved_alias *al;
71668@@ -5320,6 +5368,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71669 alias_list = al;
71670 return 0;
71671 }
71672+#endif
71673
71674 static int __init slab_sysfs_init(void)
71675 {
71676diff --git a/mm/swap.c b/mm/swap.c
71677index 55b266d..a532537 100644
71678--- a/mm/swap.c
71679+++ b/mm/swap.c
71680@@ -31,6 +31,7 @@
71681 #include <linux/backing-dev.h>
71682 #include <linux/memcontrol.h>
71683 #include <linux/gfp.h>
71684+#include <linux/hugetlb.h>
71685
71686 #include "internal.h"
71687
71688@@ -71,6 +72,8 @@ static void __put_compound_page(struct page *page)
71689
71690 __page_cache_release(page);
71691 dtor = get_compound_page_dtor(page);
71692+ if (!PageHuge(page))
71693+ BUG_ON(dtor != free_compound_page);
71694 (*dtor)(page);
71695 }
71696
71697diff --git a/mm/swapfile.c b/mm/swapfile.c
71698index b1cd120..aaae885 100644
71699--- a/mm/swapfile.c
71700+++ b/mm/swapfile.c
71701@@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
71702
71703 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
71704 /* Activity counter to indicate that a swapon or swapoff has occurred */
71705-static atomic_t proc_poll_event = ATOMIC_INIT(0);
71706+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
71707
71708 static inline unsigned char swap_count(unsigned char ent)
71709 {
71710@@ -1670,7 +1670,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
71711 }
71712 filp_close(swap_file, NULL);
71713 err = 0;
71714- atomic_inc(&proc_poll_event);
71715+ atomic_inc_unchecked(&proc_poll_event);
71716 wake_up_interruptible(&proc_poll_wait);
71717
71718 out_dput:
71719@@ -1686,8 +1686,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
71720
71721 poll_wait(file, &proc_poll_wait, wait);
71722
71723- if (seq->poll_event != atomic_read(&proc_poll_event)) {
71724- seq->poll_event = atomic_read(&proc_poll_event);
71725+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
71726+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71727 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
71728 }
71729
71730@@ -1785,7 +1785,7 @@ static int swaps_open(struct inode *inode, struct file *file)
71731 return ret;
71732
71733 seq = file->private_data;
71734- seq->poll_event = atomic_read(&proc_poll_event);
71735+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71736 return 0;
71737 }
71738
71739@@ -2123,7 +2123,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
71740 (p->flags & SWP_DISCARDABLE) ? "D" : "");
71741
71742 mutex_unlock(&swapon_mutex);
71743- atomic_inc(&proc_poll_event);
71744+ atomic_inc_unchecked(&proc_poll_event);
71745 wake_up_interruptible(&proc_poll_wait);
71746
71747 if (S_ISREG(inode->i_mode))
71748diff --git a/mm/util.c b/mm/util.c
71749index 136ac4f..5117eef 100644
71750--- a/mm/util.c
71751+++ b/mm/util.c
71752@@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
71753 * allocated buffer. Use this if you don't want to free the buffer immediately
71754 * like, for example, with RCU.
71755 */
71756+#undef __krealloc
71757 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
71758 {
71759 void *ret;
71760@@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
71761 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
71762 * %NULL pointer, the object pointed to is freed.
71763 */
71764+#undef krealloc
71765 void *krealloc(const void *p, size_t new_size, gfp_t flags)
71766 {
71767 void *ret;
71768@@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
71769 void arch_pick_mmap_layout(struct mm_struct *mm)
71770 {
71771 mm->mmap_base = TASK_UNMAPPED_BASE;
71772+
71773+#ifdef CONFIG_PAX_RANDMMAP
71774+ if (mm->pax_flags & MF_PAX_RANDMMAP)
71775+ mm->mmap_base += mm->delta_mmap;
71776+#endif
71777+
71778 mm->get_unmapped_area = arch_get_unmapped_area;
71779 mm->unmap_area = arch_unmap_area;
71780 }
71781diff --git a/mm/vmalloc.c b/mm/vmalloc.c
71782index 27be2f0..0aef2c2 100644
71783--- a/mm/vmalloc.c
71784+++ b/mm/vmalloc.c
71785@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
71786
71787 pte = pte_offset_kernel(pmd, addr);
71788 do {
71789- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71790- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71791+
71792+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71793+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
71794+ BUG_ON(!pte_exec(*pte));
71795+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
71796+ continue;
71797+ }
71798+#endif
71799+
71800+ {
71801+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71802+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71803+ }
71804 } while (pte++, addr += PAGE_SIZE, addr != end);
71805 }
71806
71807@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71808 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
71809 {
71810 pte_t *pte;
71811+ int ret = -ENOMEM;
71812
71813 /*
71814 * nr is a running index into the array which helps higher level
71815@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71816 pte = pte_alloc_kernel(pmd, addr);
71817 if (!pte)
71818 return -ENOMEM;
71819+
71820+ pax_open_kernel();
71821 do {
71822 struct page *page = pages[*nr];
71823
71824- if (WARN_ON(!pte_none(*pte)))
71825- return -EBUSY;
71826- if (WARN_ON(!page))
71827- return -ENOMEM;
71828+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71829+ if (pgprot_val(prot) & _PAGE_NX)
71830+#endif
71831+
71832+ if (WARN_ON(!pte_none(*pte))) {
71833+ ret = -EBUSY;
71834+ goto out;
71835+ }
71836+ if (WARN_ON(!page)) {
71837+ ret = -ENOMEM;
71838+ goto out;
71839+ }
71840 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
71841 (*nr)++;
71842 } while (pte++, addr += PAGE_SIZE, addr != end);
71843- return 0;
71844+ ret = 0;
71845+out:
71846+ pax_close_kernel();
71847+ return ret;
71848 }
71849
71850 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
71851@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
71852 * and fall back on vmalloc() if that fails. Others
71853 * just put it in the vmalloc space.
71854 */
71855-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
71856+#ifdef CONFIG_MODULES
71857+#ifdef MODULES_VADDR
71858 unsigned long addr = (unsigned long)x;
71859 if (addr >= MODULES_VADDR && addr < MODULES_END)
71860 return 1;
71861 #endif
71862+
71863+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71864+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
71865+ return 1;
71866+#endif
71867+
71868+#endif
71869+
71870 return is_vmalloc_addr(x);
71871 }
71872
71873@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
71874
71875 if (!pgd_none(*pgd)) {
71876 pud_t *pud = pud_offset(pgd, addr);
71877+#ifdef CONFIG_X86
71878+ if (!pud_large(*pud))
71879+#endif
71880 if (!pud_none(*pud)) {
71881 pmd_t *pmd = pmd_offset(pud, addr);
71882+#ifdef CONFIG_X86
71883+ if (!pmd_large(*pmd))
71884+#endif
71885 if (!pmd_none(*pmd)) {
71886 pte_t *ptep, pte;
71887
71888@@ -1294,6 +1334,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
71889 struct vm_struct *area;
71890
71891 BUG_ON(in_interrupt());
71892+
71893+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71894+ if (flags & VM_KERNEXEC) {
71895+ if (start != VMALLOC_START || end != VMALLOC_END)
71896+ return NULL;
71897+ start = (unsigned long)MODULES_EXEC_VADDR;
71898+ end = (unsigned long)MODULES_EXEC_END;
71899+ }
71900+#endif
71901+
71902 if (flags & VM_IOREMAP) {
71903 int bit = fls(size);
71904
71905@@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned int count,
71906 if (count > totalram_pages)
71907 return NULL;
71908
71909+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71910+ if (!(pgprot_val(prot) & _PAGE_NX))
71911+ flags |= VM_KERNEXEC;
71912+#endif
71913+
71914 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
71915 __builtin_return_address(0));
71916 if (!area)
71917@@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
71918 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
71919 goto fail;
71920
71921+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71922+ if (!(pgprot_val(prot) & _PAGE_NX))
71923+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
71924+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
71925+ else
71926+#endif
71927+
71928 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
71929 start, end, node, gfp_mask, caller);
71930 if (!area)
71931@@ -1679,6 +1741,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
71932 gfp_mask, prot, node, caller);
71933 }
71934
71935+#undef __vmalloc
71936 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
71937 {
71938 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
71939@@ -1702,6 +1765,7 @@ static inline void *__vmalloc_node_flags(unsigned long size,
71940 * For tight control over page level allocator and protection flags
71941 * use __vmalloc() instead.
71942 */
71943+#undef vmalloc
71944 void *vmalloc(unsigned long size)
71945 {
71946 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
71947@@ -1718,6 +1782,7 @@ EXPORT_SYMBOL(vmalloc);
71948 * For tight control over page level allocator and protection flags
71949 * use __vmalloc() instead.
71950 */
71951+#undef vzalloc
71952 void *vzalloc(unsigned long size)
71953 {
71954 return __vmalloc_node_flags(size, -1,
71955@@ -1732,6 +1797,7 @@ EXPORT_SYMBOL(vzalloc);
71956 * The resulting memory area is zeroed so it can be mapped to userspace
71957 * without leaking data.
71958 */
71959+#undef vmalloc_user
71960 void *vmalloc_user(unsigned long size)
71961 {
71962 struct vm_struct *area;
71963@@ -1759,6 +1825,7 @@ EXPORT_SYMBOL(vmalloc_user);
71964 * For tight control over page level allocator and protection flags
71965 * use __vmalloc() instead.
71966 */
71967+#undef vmalloc_node
71968 void *vmalloc_node(unsigned long size, int node)
71969 {
71970 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
71971@@ -1778,6 +1845,7 @@ EXPORT_SYMBOL(vmalloc_node);
71972 * For tight control over page level allocator and protection flags
71973 * use __vmalloc_node() instead.
71974 */
71975+#undef vzalloc_node
71976 void *vzalloc_node(unsigned long size, int node)
71977 {
71978 return __vmalloc_node_flags(size, node,
71979@@ -1800,10 +1868,10 @@ EXPORT_SYMBOL(vzalloc_node);
71980 * For tight control over page level allocator and protection flags
71981 * use __vmalloc() instead.
71982 */
71983-
71984+#undef vmalloc_exec
71985 void *vmalloc_exec(unsigned long size)
71986 {
71987- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
71988+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
71989 -1, __builtin_return_address(0));
71990 }
71991
71992@@ -1822,6 +1890,7 @@ void *vmalloc_exec(unsigned long size)
71993 * Allocate enough 32bit PA addressable pages to cover @size from the
71994 * page level allocator and map them into contiguous kernel virtual space.
71995 */
71996+#undef vmalloc_32
71997 void *vmalloc_32(unsigned long size)
71998 {
71999 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
72000@@ -1836,6 +1905,7 @@ EXPORT_SYMBOL(vmalloc_32);
72001 * The resulting memory area is 32bit addressable and zeroed so it can be
72002 * mapped to userspace without leaking data.
72003 */
72004+#undef vmalloc_32_user
72005 void *vmalloc_32_user(unsigned long size)
72006 {
72007 struct vm_struct *area;
72008@@ -2098,6 +2168,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
72009 unsigned long uaddr = vma->vm_start;
72010 unsigned long usize = vma->vm_end - vma->vm_start;
72011
72012+ BUG_ON(vma->vm_mirror);
72013+
72014 if ((PAGE_SIZE-1) & (unsigned long)addr)
72015 return -EINVAL;
72016
72017diff --git a/mm/vmstat.c b/mm/vmstat.c
72018index 8fd603b..cf0d930 100644
72019--- a/mm/vmstat.c
72020+++ b/mm/vmstat.c
72021@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
72022 *
72023 * vm_stat contains the global counters
72024 */
72025-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
72026+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
72027 EXPORT_SYMBOL(vm_stat);
72028
72029 #ifdef CONFIG_SMP
72030@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
72031 v = p->vm_stat_diff[i];
72032 p->vm_stat_diff[i] = 0;
72033 local_irq_restore(flags);
72034- atomic_long_add(v, &zone->vm_stat[i]);
72035+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
72036 global_diff[i] += v;
72037 #ifdef CONFIG_NUMA
72038 /* 3 seconds idle till flush */
72039@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
72040
72041 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
72042 if (global_diff[i])
72043- atomic_long_add(global_diff[i], &vm_stat[i]);
72044+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
72045 }
72046
72047 #endif
72048@@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
72049 start_cpu_timer(cpu);
72050 #endif
72051 #ifdef CONFIG_PROC_FS
72052- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
72053- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
72054- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
72055- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
72056+ {
72057+ mode_t gr_mode = S_IRUGO;
72058+#ifdef CONFIG_GRKERNSEC_PROC_ADD
72059+ gr_mode = S_IRUSR;
72060+#endif
72061+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
72062+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
72063+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
72064+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
72065+#else
72066+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
72067+#endif
72068+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
72069+ }
72070 #endif
72071 return 0;
72072 }
72073diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
72074index 5471628..cef8398 100644
72075--- a/net/8021q/vlan.c
72076+++ b/net/8021q/vlan.c
72077@@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
72078 err = -EPERM;
72079 if (!capable(CAP_NET_ADMIN))
72080 break;
72081- if ((args.u.name_type >= 0) &&
72082- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
72083+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
72084 struct vlan_net *vn;
72085
72086 vn = net_generic(net, vlan_net_id);
72087diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
72088index fdfdb57..38d368c 100644
72089--- a/net/9p/trans_fd.c
72090+++ b/net/9p/trans_fd.c
72091@@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
72092 oldfs = get_fs();
72093 set_fs(get_ds());
72094 /* The cast to a user pointer is valid due to the set_fs() */
72095- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
72096+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
72097 set_fs(oldfs);
72098
72099 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
72100diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
72101index f41f026..fe76ea8 100644
72102--- a/net/atm/atm_misc.c
72103+++ b/net/atm/atm_misc.c
72104@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
72105 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
72106 return 1;
72107 atm_return(vcc, truesize);
72108- atomic_inc(&vcc->stats->rx_drop);
72109+ atomic_inc_unchecked(&vcc->stats->rx_drop);
72110 return 0;
72111 }
72112 EXPORT_SYMBOL(atm_charge);
72113@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
72114 }
72115 }
72116 atm_return(vcc, guess);
72117- atomic_inc(&vcc->stats->rx_drop);
72118+ atomic_inc_unchecked(&vcc->stats->rx_drop);
72119 return NULL;
72120 }
72121 EXPORT_SYMBOL(atm_alloc_charge);
72122@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
72123
72124 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
72125 {
72126-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
72127+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
72128 __SONET_ITEMS
72129 #undef __HANDLE_ITEM
72130 }
72131@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
72132
72133 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
72134 {
72135-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
72136+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
72137 __SONET_ITEMS
72138 #undef __HANDLE_ITEM
72139 }
72140diff --git a/net/atm/lec.h b/net/atm/lec.h
72141index dfc0719..47c5322 100644
72142--- a/net/atm/lec.h
72143+++ b/net/atm/lec.h
72144@@ -48,7 +48,7 @@ struct lane2_ops {
72145 const u8 *tlvs, u32 sizeoftlvs);
72146 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
72147 const u8 *tlvs, u32 sizeoftlvs);
72148-};
72149+} __no_const;
72150
72151 /*
72152 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
72153diff --git a/net/atm/mpc.h b/net/atm/mpc.h
72154index 0919a88..a23d54e 100644
72155--- a/net/atm/mpc.h
72156+++ b/net/atm/mpc.h
72157@@ -33,7 +33,7 @@ struct mpoa_client {
72158 struct mpc_parameters parameters; /* parameters for this client */
72159
72160 const struct net_device_ops *old_ops;
72161- struct net_device_ops new_ops;
72162+ net_device_ops_no_const new_ops;
72163 };
72164
72165
72166diff --git a/net/atm/proc.c b/net/atm/proc.c
72167index 0d020de..011c7bb 100644
72168--- a/net/atm/proc.c
72169+++ b/net/atm/proc.c
72170@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
72171 const struct k_atm_aal_stats *stats)
72172 {
72173 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
72174- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
72175- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
72176- atomic_read(&stats->rx_drop));
72177+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
72178+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
72179+ atomic_read_unchecked(&stats->rx_drop));
72180 }
72181
72182 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
72183diff --git a/net/atm/resources.c b/net/atm/resources.c
72184index 23f45ce..c748f1a 100644
72185--- a/net/atm/resources.c
72186+++ b/net/atm/resources.c
72187@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
72188 static void copy_aal_stats(struct k_atm_aal_stats *from,
72189 struct atm_aal_stats *to)
72190 {
72191-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
72192+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
72193 __AAL_STAT_ITEMS
72194 #undef __HANDLE_ITEM
72195 }
72196@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
72197 static void subtract_aal_stats(struct k_atm_aal_stats *from,
72198 struct atm_aal_stats *to)
72199 {
72200-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
72201+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
72202 __AAL_STAT_ITEMS
72203 #undef __HANDLE_ITEM
72204 }
72205diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
72206index 3512e25..2b33401 100644
72207--- a/net/batman-adv/bat_iv_ogm.c
72208+++ b/net/batman-adv/bat_iv_ogm.c
72209@@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
72210
72211 /* change sequence number to network order */
72212 batman_ogm_packet->seqno =
72213- htonl((uint32_t)atomic_read(&hard_iface->seqno));
72214+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
72215
72216 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
72217 batman_ogm_packet->tt_crc = htons((uint16_t)
72218@@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
72219 else
72220 batman_ogm_packet->gw_flags = NO_FLAGS;
72221
72222- atomic_inc(&hard_iface->seqno);
72223+ atomic_inc_unchecked(&hard_iface->seqno);
72224
72225 slide_own_bcast_window(hard_iface);
72226 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
72227@@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
72228 return;
72229
72230 /* could be changed by schedule_own_packet() */
72231- if_incoming_seqno = atomic_read(&if_incoming->seqno);
72232+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
72233
72234 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
72235
72236diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
72237index 7704df4..beb4e16 100644
72238--- a/net/batman-adv/hard-interface.c
72239+++ b/net/batman-adv/hard-interface.c
72240@@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
72241 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
72242 dev_add_pack(&hard_iface->batman_adv_ptype);
72243
72244- atomic_set(&hard_iface->seqno, 1);
72245- atomic_set(&hard_iface->frag_seqno, 1);
72246+ atomic_set_unchecked(&hard_iface->seqno, 1);
72247+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
72248 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
72249 hard_iface->net_dev->name);
72250
72251diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
72252index f9cc957..efd9dae 100644
72253--- a/net/batman-adv/soft-interface.c
72254+++ b/net/batman-adv/soft-interface.c
72255@@ -634,7 +634,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
72256
72257 /* set broadcast sequence number */
72258 bcast_packet->seqno =
72259- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
72260+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
72261
72262 add_bcast_packet_to_list(bat_priv, skb, 1);
72263
72264@@ -828,7 +828,7 @@ struct net_device *softif_create(const char *name)
72265 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
72266
72267 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
72268- atomic_set(&bat_priv->bcast_seqno, 1);
72269+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
72270 atomic_set(&bat_priv->ttvn, 0);
72271 atomic_set(&bat_priv->tt_local_changes, 0);
72272 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
72273diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
72274index ab8d0fe..ceba3fd 100644
72275--- a/net/batman-adv/types.h
72276+++ b/net/batman-adv/types.h
72277@@ -38,8 +38,8 @@ struct hard_iface {
72278 int16_t if_num;
72279 char if_status;
72280 struct net_device *net_dev;
72281- atomic_t seqno;
72282- atomic_t frag_seqno;
72283+ atomic_unchecked_t seqno;
72284+ atomic_unchecked_t frag_seqno;
72285 unsigned char *packet_buff;
72286 int packet_len;
72287 struct kobject *hardif_obj;
72288@@ -154,7 +154,7 @@ struct bat_priv {
72289 atomic_t orig_interval; /* uint */
72290 atomic_t hop_penalty; /* uint */
72291 atomic_t log_level; /* uint */
72292- atomic_t bcast_seqno;
72293+ atomic_unchecked_t bcast_seqno;
72294 atomic_t bcast_queue_left;
72295 atomic_t batman_queue_left;
72296 atomic_t ttvn; /* translation table version number */
72297diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
72298index 07d1c1d..7e9bea9 100644
72299--- a/net/batman-adv/unicast.c
72300+++ b/net/batman-adv/unicast.c
72301@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
72302 frag1->flags = UNI_FRAG_HEAD | large_tail;
72303 frag2->flags = large_tail;
72304
72305- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
72306+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
72307 frag1->seqno = htons(seqno - 1);
72308 frag2->seqno = htons(seqno);
72309
72310diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
72311index c1c597e..05ebb40 100644
72312--- a/net/bluetooth/hci_conn.c
72313+++ b/net/bluetooth/hci_conn.c
72314@@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
72315 memset(&cp, 0, sizeof(cp));
72316
72317 cp.handle = cpu_to_le16(conn->handle);
72318- memcpy(cp.ltk, ltk, sizeof(ltk));
72319+ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
72320
72321 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
72322 }
72323diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
72324index 17b5b1c..826d872 100644
72325--- a/net/bluetooth/l2cap_core.c
72326+++ b/net/bluetooth/l2cap_core.c
72327@@ -2176,8 +2176,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
72328 break;
72329
72330 case L2CAP_CONF_RFC:
72331- if (olen == sizeof(rfc))
72332- memcpy(&rfc, (void *)val, olen);
72333+ if (olen != sizeof(rfc))
72334+ break;
72335+
72336+ memcpy(&rfc, (void *)val, olen);
72337
72338 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
72339 rfc.mode != chan->mode)
72340@@ -2265,8 +2267,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
72341
72342 switch (type) {
72343 case L2CAP_CONF_RFC:
72344- if (olen == sizeof(rfc))
72345- memcpy(&rfc, (void *)val, olen);
72346+ if (olen != sizeof(rfc))
72347+ break;
72348+
72349+ memcpy(&rfc, (void *)val, olen);
72350 goto done;
72351 }
72352 }
72353diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
72354index a5f4e57..910ee6d 100644
72355--- a/net/bridge/br_multicast.c
72356+++ b/net/bridge/br_multicast.c
72357@@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
72358 nexthdr = ip6h->nexthdr;
72359 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
72360
72361- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
72362+ if (nexthdr != IPPROTO_ICMPV6)
72363 return 0;
72364
72365 /* Okay, we found ICMPv6 header */
72366diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
72367index 5864cc4..121f3a3 100644
72368--- a/net/bridge/netfilter/ebtables.c
72369+++ b/net/bridge/netfilter/ebtables.c
72370@@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
72371 tmp.valid_hooks = t->table->valid_hooks;
72372 }
72373 mutex_unlock(&ebt_mutex);
72374- if (copy_to_user(user, &tmp, *len) != 0){
72375+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
72376 BUGPRINT("c2u Didn't work\n");
72377 ret = -EFAULT;
72378 break;
72379diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
72380index a986280..13444a1 100644
72381--- a/net/caif/caif_socket.c
72382+++ b/net/caif/caif_socket.c
72383@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
72384 #ifdef CONFIG_DEBUG_FS
72385 struct debug_fs_counter {
72386 atomic_t caif_nr_socks;
72387- atomic_t caif_sock_create;
72388- atomic_t num_connect_req;
72389- atomic_t num_connect_resp;
72390- atomic_t num_connect_fail_resp;
72391- atomic_t num_disconnect;
72392- atomic_t num_remote_shutdown_ind;
72393- atomic_t num_tx_flow_off_ind;
72394- atomic_t num_tx_flow_on_ind;
72395- atomic_t num_rx_flow_off;
72396- atomic_t num_rx_flow_on;
72397+ atomic_unchecked_t caif_sock_create;
72398+ atomic_unchecked_t num_connect_req;
72399+ atomic_unchecked_t num_connect_resp;
72400+ atomic_unchecked_t num_connect_fail_resp;
72401+ atomic_unchecked_t num_disconnect;
72402+ atomic_unchecked_t num_remote_shutdown_ind;
72403+ atomic_unchecked_t num_tx_flow_off_ind;
72404+ atomic_unchecked_t num_tx_flow_on_ind;
72405+ atomic_unchecked_t num_rx_flow_off;
72406+ atomic_unchecked_t num_rx_flow_on;
72407 };
72408 static struct debug_fs_counter cnt;
72409 #define dbfs_atomic_inc(v) atomic_inc_return(v)
72410+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
72411 #define dbfs_atomic_dec(v) atomic_dec_return(v)
72412 #else
72413 #define dbfs_atomic_inc(v) 0
72414@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72415 atomic_read(&cf_sk->sk.sk_rmem_alloc),
72416 sk_rcvbuf_lowwater(cf_sk));
72417 set_rx_flow_off(cf_sk);
72418- dbfs_atomic_inc(&cnt.num_rx_flow_off);
72419+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72420 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72421 }
72422
72423@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72424 set_rx_flow_off(cf_sk);
72425 if (net_ratelimit())
72426 pr_debug("sending flow OFF due to rmem_schedule\n");
72427- dbfs_atomic_inc(&cnt.num_rx_flow_off);
72428+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72429 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72430 }
72431 skb->dev = NULL;
72432@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
72433 switch (flow) {
72434 case CAIF_CTRLCMD_FLOW_ON_IND:
72435 /* OK from modem to start sending again */
72436- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
72437+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
72438 set_tx_flow_on(cf_sk);
72439 cf_sk->sk.sk_state_change(&cf_sk->sk);
72440 break;
72441
72442 case CAIF_CTRLCMD_FLOW_OFF_IND:
72443 /* Modem asks us to shut up */
72444- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
72445+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
72446 set_tx_flow_off(cf_sk);
72447 cf_sk->sk.sk_state_change(&cf_sk->sk);
72448 break;
72449@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72450 /* We're now connected */
72451 caif_client_register_refcnt(&cf_sk->layer,
72452 cfsk_hold, cfsk_put);
72453- dbfs_atomic_inc(&cnt.num_connect_resp);
72454+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
72455 cf_sk->sk.sk_state = CAIF_CONNECTED;
72456 set_tx_flow_on(cf_sk);
72457 cf_sk->sk.sk_state_change(&cf_sk->sk);
72458@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72459
72460 case CAIF_CTRLCMD_INIT_FAIL_RSP:
72461 /* Connect request failed */
72462- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
72463+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
72464 cf_sk->sk.sk_err = ECONNREFUSED;
72465 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
72466 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72467@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72468
72469 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
72470 /* Modem has closed this connection, or device is down. */
72471- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
72472+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
72473 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72474 cf_sk->sk.sk_err = ECONNRESET;
72475 set_rx_flow_on(cf_sk);
72476@@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
72477 return;
72478
72479 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
72480- dbfs_atomic_inc(&cnt.num_rx_flow_on);
72481+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
72482 set_rx_flow_on(cf_sk);
72483 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
72484 }
72485@@ -854,7 +855,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
72486 /*ifindex = id of the interface.*/
72487 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
72488
72489- dbfs_atomic_inc(&cnt.num_connect_req);
72490+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
72491 cf_sk->layer.receive = caif_sktrecv_cb;
72492
72493 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
72494@@ -943,7 +944,7 @@ static int caif_release(struct socket *sock)
72495 spin_unlock_bh(&sk->sk_receive_queue.lock);
72496 sock->sk = NULL;
72497
72498- dbfs_atomic_inc(&cnt.num_disconnect);
72499+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
72500
72501 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
72502 if (cf_sk->debugfs_socket_dir != NULL)
72503@@ -1122,7 +1123,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
72504 cf_sk->conn_req.protocol = protocol;
72505 /* Increase the number of sockets created. */
72506 dbfs_atomic_inc(&cnt.caif_nr_socks);
72507- num = dbfs_atomic_inc(&cnt.caif_sock_create);
72508+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
72509 #ifdef CONFIG_DEBUG_FS
72510 if (!IS_ERR(debugfsdir)) {
72511
72512diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
72513index 5cf5222..6f704ad 100644
72514--- a/net/caif/cfctrl.c
72515+++ b/net/caif/cfctrl.c
72516@@ -9,6 +9,7 @@
72517 #include <linux/stddef.h>
72518 #include <linux/spinlock.h>
72519 #include <linux/slab.h>
72520+#include <linux/sched.h>
72521 #include <net/caif/caif_layer.h>
72522 #include <net/caif/cfpkt.h>
72523 #include <net/caif/cfctrl.h>
72524@@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
72525 memset(&dev_info, 0, sizeof(dev_info));
72526 dev_info.id = 0xff;
72527 cfsrvl_init(&this->serv, 0, &dev_info, false);
72528- atomic_set(&this->req_seq_no, 1);
72529- atomic_set(&this->rsp_seq_no, 1);
72530+ atomic_set_unchecked(&this->req_seq_no, 1);
72531+ atomic_set_unchecked(&this->rsp_seq_no, 1);
72532 this->serv.layer.receive = cfctrl_recv;
72533 sprintf(this->serv.layer.name, "ctrl");
72534 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
72535@@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
72536 struct cfctrl_request_info *req)
72537 {
72538 spin_lock_bh(&ctrl->info_list_lock);
72539- atomic_inc(&ctrl->req_seq_no);
72540- req->sequence_no = atomic_read(&ctrl->req_seq_no);
72541+ atomic_inc_unchecked(&ctrl->req_seq_no);
72542+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
72543 list_add_tail(&req->list, &ctrl->list);
72544 spin_unlock_bh(&ctrl->info_list_lock);
72545 }
72546@@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
72547 if (p != first)
72548 pr_warn("Requests are not received in order\n");
72549
72550- atomic_set(&ctrl->rsp_seq_no,
72551+ atomic_set_unchecked(&ctrl->rsp_seq_no,
72552 p->sequence_no);
72553 list_del(&p->list);
72554 goto out;
72555diff --git a/net/can/gw.c b/net/can/gw.c
72556index 3d79b12..8de85fa 100644
72557--- a/net/can/gw.c
72558+++ b/net/can/gw.c
72559@@ -96,7 +96,7 @@ struct cf_mod {
72560 struct {
72561 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
72562 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
72563- } csumfunc;
72564+ } __no_const csumfunc;
72565 };
72566
72567
72568diff --git a/net/compat.c b/net/compat.c
72569index 6def90e..c6992fa 100644
72570--- a/net/compat.c
72571+++ b/net/compat.c
72572@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
72573 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
72574 __get_user(kmsg->msg_flags, &umsg->msg_flags))
72575 return -EFAULT;
72576- kmsg->msg_name = compat_ptr(tmp1);
72577- kmsg->msg_iov = compat_ptr(tmp2);
72578- kmsg->msg_control = compat_ptr(tmp3);
72579+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
72580+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
72581+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
72582 return 0;
72583 }
72584
72585@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72586
72587 if (kern_msg->msg_namelen) {
72588 if (mode == VERIFY_READ) {
72589- int err = move_addr_to_kernel(kern_msg->msg_name,
72590+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
72591 kern_msg->msg_namelen,
72592 kern_address);
72593 if (err < 0)
72594@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72595 kern_msg->msg_name = NULL;
72596
72597 tot_len = iov_from_user_compat_to_kern(kern_iov,
72598- (struct compat_iovec __user *)kern_msg->msg_iov,
72599+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
72600 kern_msg->msg_iovlen);
72601 if (tot_len >= 0)
72602 kern_msg->msg_iov = kern_iov;
72603@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72604
72605 #define CMSG_COMPAT_FIRSTHDR(msg) \
72606 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
72607- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
72608+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
72609 (struct compat_cmsghdr __user *)NULL)
72610
72611 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
72612 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
72613 (ucmlen) <= (unsigned long) \
72614 ((mhdr)->msg_controllen - \
72615- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
72616+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
72617
72618 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
72619 struct compat_cmsghdr __user *cmsg, int cmsg_len)
72620 {
72621 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
72622- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
72623+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
72624 msg->msg_controllen)
72625 return NULL;
72626 return (struct compat_cmsghdr __user *)ptr;
72627@@ -221,7 +221,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
72628 {
72629 struct compat_timeval ctv;
72630 struct compat_timespec cts[3];
72631- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72632+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72633 struct compat_cmsghdr cmhdr;
72634 int cmlen;
72635
72636@@ -273,7 +273,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
72637
72638 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
72639 {
72640- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72641+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72642 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
72643 int fdnum = scm->fp->count;
72644 struct file **fp = scm->fp->fp;
72645@@ -370,7 +370,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
72646 return -EFAULT;
72647 old_fs = get_fs();
72648 set_fs(KERNEL_DS);
72649- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
72650+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
72651 set_fs(old_fs);
72652
72653 return err;
72654@@ -431,7 +431,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
72655 len = sizeof(ktime);
72656 old_fs = get_fs();
72657 set_fs(KERNEL_DS);
72658- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
72659+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
72660 set_fs(old_fs);
72661
72662 if (!err) {
72663@@ -566,7 +566,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72664 case MCAST_JOIN_GROUP:
72665 case MCAST_LEAVE_GROUP:
72666 {
72667- struct compat_group_req __user *gr32 = (void *)optval;
72668+ struct compat_group_req __user *gr32 = (void __user *)optval;
72669 struct group_req __user *kgr =
72670 compat_alloc_user_space(sizeof(struct group_req));
72671 u32 interface;
72672@@ -587,7 +587,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72673 case MCAST_BLOCK_SOURCE:
72674 case MCAST_UNBLOCK_SOURCE:
72675 {
72676- struct compat_group_source_req __user *gsr32 = (void *)optval;
72677+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
72678 struct group_source_req __user *kgsr = compat_alloc_user_space(
72679 sizeof(struct group_source_req));
72680 u32 interface;
72681@@ -608,7 +608,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72682 }
72683 case MCAST_MSFILTER:
72684 {
72685- struct compat_group_filter __user *gf32 = (void *)optval;
72686+ struct compat_group_filter __user *gf32 = (void __user *)optval;
72687 struct group_filter __user *kgf;
72688 u32 interface, fmode, numsrc;
72689
72690@@ -646,7 +646,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
72691 char __user *optval, int __user *optlen,
72692 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
72693 {
72694- struct compat_group_filter __user *gf32 = (void *)optval;
72695+ struct compat_group_filter __user *gf32 = (void __user *)optval;
72696 struct group_filter __user *kgf;
72697 int __user *koptlen;
72698 u32 interface, fmode, numsrc;
72699diff --git a/net/core/datagram.c b/net/core/datagram.c
72700index 68bbf9f..5ef0d12 100644
72701--- a/net/core/datagram.c
72702+++ b/net/core/datagram.c
72703@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
72704 }
72705
72706 kfree_skb(skb);
72707- atomic_inc(&sk->sk_drops);
72708+ atomic_inc_unchecked(&sk->sk_drops);
72709 sk_mem_reclaim_partial(sk);
72710
72711 return err;
72712diff --git a/net/core/dev.c b/net/core/dev.c
72713index c56cacf..b28e35f 100644
72714--- a/net/core/dev.c
72715+++ b/net/core/dev.c
72716@@ -1139,10 +1139,14 @@ void dev_load(struct net *net, const char *name)
72717 if (no_module && capable(CAP_NET_ADMIN))
72718 no_module = request_module("netdev-%s", name);
72719 if (no_module && capable(CAP_SYS_MODULE)) {
72720+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72721+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
72722+#else
72723 if (!request_module("%s", name))
72724 pr_err("Loading kernel module for a network device "
72725 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
72726 "instead\n", name);
72727+#endif
72728 }
72729 }
72730 EXPORT_SYMBOL(dev_load);
72731@@ -1573,7 +1577,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
72732 {
72733 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
72734 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
72735- atomic_long_inc(&dev->rx_dropped);
72736+ atomic_long_inc_unchecked(&dev->rx_dropped);
72737 kfree_skb(skb);
72738 return NET_RX_DROP;
72739 }
72740@@ -1583,7 +1587,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
72741 nf_reset(skb);
72742
72743 if (unlikely(!is_skb_forwardable(dev, skb))) {
72744- atomic_long_inc(&dev->rx_dropped);
72745+ atomic_long_inc_unchecked(&dev->rx_dropped);
72746 kfree_skb(skb);
72747 return NET_RX_DROP;
72748 }
72749@@ -2036,7 +2040,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
72750
72751 struct dev_gso_cb {
72752 void (*destructor)(struct sk_buff *skb);
72753-};
72754+} __no_const;
72755
72756 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
72757
72758@@ -2970,7 +2974,7 @@ enqueue:
72759
72760 local_irq_restore(flags);
72761
72762- atomic_long_inc(&skb->dev->rx_dropped);
72763+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
72764 kfree_skb(skb);
72765 return NET_RX_DROP;
72766 }
72767@@ -3044,7 +3048,7 @@ int netif_rx_ni(struct sk_buff *skb)
72768 }
72769 EXPORT_SYMBOL(netif_rx_ni);
72770
72771-static void net_tx_action(struct softirq_action *h)
72772+static void net_tx_action(void)
72773 {
72774 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72775
72776@@ -3333,7 +3337,7 @@ ncls:
72777 if (pt_prev) {
72778 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
72779 } else {
72780- atomic_long_inc(&skb->dev->rx_dropped);
72781+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
72782 kfree_skb(skb);
72783 /* Jamal, now you will not able to escape explaining
72784 * me how you were going to use this. :-)
72785@@ -3897,7 +3901,7 @@ void netif_napi_del(struct napi_struct *napi)
72786 }
72787 EXPORT_SYMBOL(netif_napi_del);
72788
72789-static void net_rx_action(struct softirq_action *h)
72790+static void net_rx_action(void)
72791 {
72792 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72793 unsigned long time_limit = jiffies + 2;
72794@@ -5955,7 +5959,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
72795 } else {
72796 netdev_stats_to_stats64(storage, &dev->stats);
72797 }
72798- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
72799+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
72800 return storage;
72801 }
72802 EXPORT_SYMBOL(dev_get_stats);
72803diff --git a/net/core/flow.c b/net/core/flow.c
72804index e318c7e..168b1d0 100644
72805--- a/net/core/flow.c
72806+++ b/net/core/flow.c
72807@@ -61,7 +61,7 @@ struct flow_cache {
72808 struct timer_list rnd_timer;
72809 };
72810
72811-atomic_t flow_cache_genid = ATOMIC_INIT(0);
72812+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
72813 EXPORT_SYMBOL(flow_cache_genid);
72814 static struct flow_cache flow_cache_global;
72815 static struct kmem_cache *flow_cachep __read_mostly;
72816@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
72817
72818 static int flow_entry_valid(struct flow_cache_entry *fle)
72819 {
72820- if (atomic_read(&flow_cache_genid) != fle->genid)
72821+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
72822 return 0;
72823 if (fle->object && !fle->object->ops->check(fle->object))
72824 return 0;
72825@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
72826 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
72827 fcp->hash_count++;
72828 }
72829- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
72830+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
72831 flo = fle->object;
72832 if (!flo)
72833 goto ret_object;
72834@@ -280,7 +280,7 @@ nocache:
72835 }
72836 flo = resolver(net, key, family, dir, flo, ctx);
72837 if (fle) {
72838- fle->genid = atomic_read(&flow_cache_genid);
72839+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
72840 if (!IS_ERR(flo))
72841 fle->object = flo;
72842 else
72843diff --git a/net/core/iovec.c b/net/core/iovec.c
72844index c40f27e..7f49254 100644
72845--- a/net/core/iovec.c
72846+++ b/net/core/iovec.c
72847@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72848 if (m->msg_namelen) {
72849 if (mode == VERIFY_READ) {
72850 void __user *namep;
72851- namep = (void __user __force *) m->msg_name;
72852+ namep = (void __force_user *) m->msg_name;
72853 err = move_addr_to_kernel(namep, m->msg_namelen,
72854 address);
72855 if (err < 0)
72856@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72857 }
72858
72859 size = m->msg_iovlen * sizeof(struct iovec);
72860- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
72861+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
72862 return -EFAULT;
72863
72864 m->msg_iov = iov;
72865diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
72866index 9083e82..1673203 100644
72867--- a/net/core/rtnetlink.c
72868+++ b/net/core/rtnetlink.c
72869@@ -57,7 +57,7 @@ struct rtnl_link {
72870 rtnl_doit_func doit;
72871 rtnl_dumpit_func dumpit;
72872 rtnl_calcit_func calcit;
72873-};
72874+} __no_const;
72875
72876 static DEFINE_MUTEX(rtnl_mutex);
72877 static u16 min_ifinfo_dump_size;
72878diff --git a/net/core/scm.c b/net/core/scm.c
72879index ff52ad0..aff1c0f 100644
72880--- a/net/core/scm.c
72881+++ b/net/core/scm.c
72882@@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
72883 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72884 {
72885 struct cmsghdr __user *cm
72886- = (__force struct cmsghdr __user *)msg->msg_control;
72887+ = (struct cmsghdr __force_user *)msg->msg_control;
72888 struct cmsghdr cmhdr;
72889 int cmlen = CMSG_LEN(len);
72890 int err;
72891@@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72892 err = -EFAULT;
72893 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
72894 goto out;
72895- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
72896+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
72897 goto out;
72898 cmlen = CMSG_SPACE(len);
72899 if (msg->msg_controllen < cmlen)
72900@@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
72901 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72902 {
72903 struct cmsghdr __user *cm
72904- = (__force struct cmsghdr __user*)msg->msg_control;
72905+ = (struct cmsghdr __force_user *)msg->msg_control;
72906
72907 int fdmax = 0;
72908 int fdnum = scm->fp->count;
72909@@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72910 if (fdnum < fdmax)
72911 fdmax = fdnum;
72912
72913- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
72914+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
72915 i++, cmfptr++)
72916 {
72917 int new_fd;
72918diff --git a/net/core/sock.c b/net/core/sock.c
72919index b23f174..b9a0d26 100644
72920--- a/net/core/sock.c
72921+++ b/net/core/sock.c
72922@@ -289,7 +289,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72923 struct sk_buff_head *list = &sk->sk_receive_queue;
72924
72925 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
72926- atomic_inc(&sk->sk_drops);
72927+ atomic_inc_unchecked(&sk->sk_drops);
72928 trace_sock_rcvqueue_full(sk, skb);
72929 return -ENOMEM;
72930 }
72931@@ -299,7 +299,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72932 return err;
72933
72934 if (!sk_rmem_schedule(sk, skb->truesize)) {
72935- atomic_inc(&sk->sk_drops);
72936+ atomic_inc_unchecked(&sk->sk_drops);
72937 return -ENOBUFS;
72938 }
72939
72940@@ -319,7 +319,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72941 skb_dst_force(skb);
72942
72943 spin_lock_irqsave(&list->lock, flags);
72944- skb->dropcount = atomic_read(&sk->sk_drops);
72945+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
72946 __skb_queue_tail(list, skb);
72947 spin_unlock_irqrestore(&list->lock, flags);
72948
72949@@ -339,7 +339,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
72950 skb->dev = NULL;
72951
72952 if (sk_rcvqueues_full(sk, skb)) {
72953- atomic_inc(&sk->sk_drops);
72954+ atomic_inc_unchecked(&sk->sk_drops);
72955 goto discard_and_relse;
72956 }
72957 if (nested)
72958@@ -357,7 +357,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
72959 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
72960 } else if (sk_add_backlog(sk, skb)) {
72961 bh_unlock_sock(sk);
72962- atomic_inc(&sk->sk_drops);
72963+ atomic_inc_unchecked(&sk->sk_drops);
72964 goto discard_and_relse;
72965 }
72966
72967@@ -917,7 +917,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72968 if (len > sizeof(peercred))
72969 len = sizeof(peercred);
72970 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
72971- if (copy_to_user(optval, &peercred, len))
72972+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
72973 return -EFAULT;
72974 goto lenout;
72975 }
72976@@ -930,7 +930,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72977 return -ENOTCONN;
72978 if (lv < len)
72979 return -EINVAL;
72980- if (copy_to_user(optval, address, len))
72981+ if (len > sizeof(address) || copy_to_user(optval, address, len))
72982 return -EFAULT;
72983 goto lenout;
72984 }
72985@@ -963,7 +963,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72986
72987 if (len > lv)
72988 len = lv;
72989- if (copy_to_user(optval, &v, len))
72990+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
72991 return -EFAULT;
72992 lenout:
72993 if (put_user(len, optlen))
72994@@ -2020,7 +2020,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
72995 */
72996 smp_wmb();
72997 atomic_set(&sk->sk_refcnt, 1);
72998- atomic_set(&sk->sk_drops, 0);
72999+ atomic_set_unchecked(&sk->sk_drops, 0);
73000 }
73001 EXPORT_SYMBOL(sock_init_data);
73002
73003diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
73004index 02e75d1..9a57a7c 100644
73005--- a/net/decnet/sysctl_net_decnet.c
73006+++ b/net/decnet/sysctl_net_decnet.c
73007@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
73008
73009 if (len > *lenp) len = *lenp;
73010
73011- if (copy_to_user(buffer, addr, len))
73012+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
73013 return -EFAULT;
73014
73015 *lenp = len;
73016@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
73017
73018 if (len > *lenp) len = *lenp;
73019
73020- if (copy_to_user(buffer, devname, len))
73021+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
73022 return -EFAULT;
73023
73024 *lenp = len;
73025diff --git a/net/econet/Kconfig b/net/econet/Kconfig
73026index 39a2d29..f39c0fe 100644
73027--- a/net/econet/Kconfig
73028+++ b/net/econet/Kconfig
73029@@ -4,7 +4,7 @@
73030
73031 config ECONET
73032 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
73033- depends on EXPERIMENTAL && INET
73034+ depends on EXPERIMENTAL && INET && BROKEN
73035 ---help---
73036 Econet is a fairly old and slow networking protocol mainly used by
73037 Acorn computers to access file and print servers. It uses native
73038diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
73039index 92fc5f6..b790d91 100644
73040--- a/net/ipv4/fib_frontend.c
73041+++ b/net/ipv4/fib_frontend.c
73042@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
73043 #ifdef CONFIG_IP_ROUTE_MULTIPATH
73044 fib_sync_up(dev);
73045 #endif
73046- atomic_inc(&net->ipv4.dev_addr_genid);
73047+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
73048 rt_cache_flush(dev_net(dev), -1);
73049 break;
73050 case NETDEV_DOWN:
73051 fib_del_ifaddr(ifa, NULL);
73052- atomic_inc(&net->ipv4.dev_addr_genid);
73053+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
73054 if (ifa->ifa_dev->ifa_list == NULL) {
73055 /* Last address was deleted from this interface.
73056 * Disable IP.
73057@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
73058 #ifdef CONFIG_IP_ROUTE_MULTIPATH
73059 fib_sync_up(dev);
73060 #endif
73061- atomic_inc(&net->ipv4.dev_addr_genid);
73062+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
73063 rt_cache_flush(dev_net(dev), -1);
73064 break;
73065 case NETDEV_DOWN:
73066diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
73067index 80106d8..232e898 100644
73068--- a/net/ipv4/fib_semantics.c
73069+++ b/net/ipv4/fib_semantics.c
73070@@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
73071 nh->nh_saddr = inet_select_addr(nh->nh_dev,
73072 nh->nh_gw,
73073 nh->nh_parent->fib_scope);
73074- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
73075+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
73076
73077 return nh->nh_saddr;
73078 }
73079diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
73080index ccee270..db23c3c 100644
73081--- a/net/ipv4/inet_diag.c
73082+++ b/net/ipv4/inet_diag.c
73083@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct sock *sk,
73084 r->idiag_retrans = 0;
73085
73086 r->id.idiag_if = sk->sk_bound_dev_if;
73087+
73088+#ifdef CONFIG_GRKERNSEC_HIDESYM
73089+ r->id.idiag_cookie[0] = 0;
73090+ r->id.idiag_cookie[1] = 0;
73091+#else
73092 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
73093 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
73094+#endif
73095
73096 r->id.idiag_sport = inet->inet_sport;
73097 r->id.idiag_dport = inet->inet_dport;
73098@@ -210,8 +216,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
73099 r->idiag_family = tw->tw_family;
73100 r->idiag_retrans = 0;
73101 r->id.idiag_if = tw->tw_bound_dev_if;
73102+
73103+#ifdef CONFIG_GRKERNSEC_HIDESYM
73104+ r->id.idiag_cookie[0] = 0;
73105+ r->id.idiag_cookie[1] = 0;
73106+#else
73107 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
73108 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
73109+#endif
73110+
73111 r->id.idiag_sport = tw->tw_sport;
73112 r->id.idiag_dport = tw->tw_dport;
73113 r->id.idiag_src[0] = tw->tw_rcv_saddr;
73114@@ -294,12 +307,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
73115 if (sk == NULL)
73116 goto unlock;
73117
73118+#ifndef CONFIG_GRKERNSEC_HIDESYM
73119 err = -ESTALE;
73120 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
73121 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
73122 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
73123 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
73124 goto out;
73125+#endif
73126
73127 err = -ENOMEM;
73128 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
73129@@ -589,8 +604,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
73130 r->idiag_retrans = req->retrans;
73131
73132 r->id.idiag_if = sk->sk_bound_dev_if;
73133+
73134+#ifdef CONFIG_GRKERNSEC_HIDESYM
73135+ r->id.idiag_cookie[0] = 0;
73136+ r->id.idiag_cookie[1] = 0;
73137+#else
73138 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
73139 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
73140+#endif
73141
73142 tmo = req->expires - jiffies;
73143 if (tmo < 0)
73144diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
73145index 984ec65..97ac518 100644
73146--- a/net/ipv4/inet_hashtables.c
73147+++ b/net/ipv4/inet_hashtables.c
73148@@ -18,12 +18,15 @@
73149 #include <linux/sched.h>
73150 #include <linux/slab.h>
73151 #include <linux/wait.h>
73152+#include <linux/security.h>
73153
73154 #include <net/inet_connection_sock.h>
73155 #include <net/inet_hashtables.h>
73156 #include <net/secure_seq.h>
73157 #include <net/ip.h>
73158
73159+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
73160+
73161 /*
73162 * Allocate and initialize a new local port bind bucket.
73163 * The bindhash mutex for snum's hash chain must be held here.
73164@@ -530,6 +533,8 @@ ok:
73165 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
73166 spin_unlock(&head->lock);
73167
73168+ gr_update_task_in_ip_table(current, inet_sk(sk));
73169+
73170 if (tw) {
73171 inet_twsk_deschedule(tw, death_row);
73172 while (twrefcnt) {
73173diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
73174index 86f13c67..59a35b5 100644
73175--- a/net/ipv4/inetpeer.c
73176+++ b/net/ipv4/inetpeer.c
73177@@ -436,8 +436,8 @@ relookup:
73178 if (p) {
73179 p->daddr = *daddr;
73180 atomic_set(&p->refcnt, 1);
73181- atomic_set(&p->rid, 0);
73182- atomic_set(&p->ip_id_count,
73183+ atomic_set_unchecked(&p->rid, 0);
73184+ atomic_set_unchecked(&p->ip_id_count,
73185 (daddr->family == AF_INET) ?
73186 secure_ip_id(daddr->addr.a4) :
73187 secure_ipv6_id(daddr->addr.a6));
73188diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
73189index fdaabf2..0ec3205 100644
73190--- a/net/ipv4/ip_fragment.c
73191+++ b/net/ipv4/ip_fragment.c
73192@@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
73193 return 0;
73194
73195 start = qp->rid;
73196- end = atomic_inc_return(&peer->rid);
73197+ end = atomic_inc_return_unchecked(&peer->rid);
73198 qp->rid = end;
73199
73200 rc = qp->q.fragments && (end - start) > max;
73201diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
73202index 09ff51b..d3968eb 100644
73203--- a/net/ipv4/ip_sockglue.c
73204+++ b/net/ipv4/ip_sockglue.c
73205@@ -1111,7 +1111,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
73206 len = min_t(unsigned int, len, opt->optlen);
73207 if (put_user(len, optlen))
73208 return -EFAULT;
73209- if (copy_to_user(optval, opt->__data, len))
73210+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
73211+ copy_to_user(optval, opt->__data, len))
73212 return -EFAULT;
73213 return 0;
73214 }
73215@@ -1239,7 +1240,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
73216 if (sk->sk_type != SOCK_STREAM)
73217 return -ENOPROTOOPT;
73218
73219- msg.msg_control = optval;
73220+ msg.msg_control = (void __force_kernel *)optval;
73221 msg.msg_controllen = len;
73222 msg.msg_flags = flags;
73223
73224diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
73225index 99ec116..c5628fe 100644
73226--- a/net/ipv4/ipconfig.c
73227+++ b/net/ipv4/ipconfig.c
73228@@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
73229
73230 mm_segment_t oldfs = get_fs();
73231 set_fs(get_ds());
73232- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
73233+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
73234 set_fs(oldfs);
73235 return res;
73236 }
73237@@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
73238
73239 mm_segment_t oldfs = get_fs();
73240 set_fs(get_ds());
73241- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
73242+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
73243 set_fs(oldfs);
73244 return res;
73245 }
73246@@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
73247
73248 mm_segment_t oldfs = get_fs();
73249 set_fs(get_ds());
73250- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
73251+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
73252 set_fs(oldfs);
73253 return res;
73254 }
73255diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
73256index 2133c30..5c4b40b 100644
73257--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
73258+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
73259@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
73260
73261 *len = 0;
73262
73263- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
73264+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
73265 if (*octets == NULL)
73266 return 0;
73267
73268diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
73269index 43d4c3b..1914409 100644
73270--- a/net/ipv4/ping.c
73271+++ b/net/ipv4/ping.c
73272@@ -836,7 +836,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
73273 sk_rmem_alloc_get(sp),
73274 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73275 atomic_read(&sp->sk_refcnt), sp,
73276- atomic_read(&sp->sk_drops), len);
73277+ atomic_read_unchecked(&sp->sk_drops), len);
73278 }
73279
73280 static int ping_seq_show(struct seq_file *seq, void *v)
73281diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
73282index 007e2eb..85a18a0 100644
73283--- a/net/ipv4/raw.c
73284+++ b/net/ipv4/raw.c
73285@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
73286 int raw_rcv(struct sock *sk, struct sk_buff *skb)
73287 {
73288 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
73289- atomic_inc(&sk->sk_drops);
73290+ atomic_inc_unchecked(&sk->sk_drops);
73291 kfree_skb(skb);
73292 return NET_RX_DROP;
73293 }
73294@@ -738,16 +738,20 @@ static int raw_init(struct sock *sk)
73295
73296 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
73297 {
73298+ struct icmp_filter filter;
73299+
73300 if (optlen > sizeof(struct icmp_filter))
73301 optlen = sizeof(struct icmp_filter);
73302- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
73303+ if (copy_from_user(&filter, optval, optlen))
73304 return -EFAULT;
73305+ raw_sk(sk)->filter = filter;
73306 return 0;
73307 }
73308
73309 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
73310 {
73311 int len, ret = -EFAULT;
73312+ struct icmp_filter filter;
73313
73314 if (get_user(len, optlen))
73315 goto out;
73316@@ -757,8 +761,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
73317 if (len > sizeof(struct icmp_filter))
73318 len = sizeof(struct icmp_filter);
73319 ret = -EFAULT;
73320- if (put_user(len, optlen) ||
73321- copy_to_user(optval, &raw_sk(sk)->filter, len))
73322+ filter = raw_sk(sk)->filter;
73323+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
73324 goto out;
73325 ret = 0;
73326 out: return ret;
73327@@ -986,7 +990,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
73328 sk_wmem_alloc_get(sp),
73329 sk_rmem_alloc_get(sp),
73330 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73331- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73332+ atomic_read(&sp->sk_refcnt),
73333+#ifdef CONFIG_GRKERNSEC_HIDESYM
73334+ NULL,
73335+#else
73336+ sp,
73337+#endif
73338+ atomic_read_unchecked(&sp->sk_drops));
73339 }
73340
73341 static int raw_seq_show(struct seq_file *seq, void *v)
73342diff --git a/net/ipv4/route.c b/net/ipv4/route.c
73343index 94cdbc5..0cb0063 100644
73344--- a/net/ipv4/route.c
73345+++ b/net/ipv4/route.c
73346@@ -313,7 +313,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
73347
73348 static inline int rt_genid(struct net *net)
73349 {
73350- return atomic_read(&net->ipv4.rt_genid);
73351+ return atomic_read_unchecked(&net->ipv4.rt_genid);
73352 }
73353
73354 #ifdef CONFIG_PROC_FS
73355@@ -937,7 +937,7 @@ static void rt_cache_invalidate(struct net *net)
73356 unsigned char shuffle;
73357
73358 get_random_bytes(&shuffle, sizeof(shuffle));
73359- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
73360+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
73361 redirect_genid++;
73362 }
73363
73364@@ -3022,7 +3022,7 @@ static int rt_fill_info(struct net *net,
73365 error = rt->dst.error;
73366 if (peer) {
73367 inet_peer_refcheck(rt->peer);
73368- id = atomic_read(&peer->ip_id_count) & 0xffff;
73369+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
73370 if (peer->tcp_ts_stamp) {
73371 ts = peer->tcp_ts;
73372 tsage = get_seconds() - peer->tcp_ts_stamp;
73373diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
73374index eb90aa8..22bf114 100644
73375--- a/net/ipv4/tcp_ipv4.c
73376+++ b/net/ipv4/tcp_ipv4.c
73377@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
73378 int sysctl_tcp_low_latency __read_mostly;
73379 EXPORT_SYMBOL(sysctl_tcp_low_latency);
73380
73381+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73382+extern int grsec_enable_blackhole;
73383+#endif
73384
73385 #ifdef CONFIG_TCP_MD5SIG
73386 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
73387@@ -1632,6 +1635,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
73388 return 0;
73389
73390 reset:
73391+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73392+ if (!grsec_enable_blackhole)
73393+#endif
73394 tcp_v4_send_reset(rsk, skb);
73395 discard:
73396 kfree_skb(skb);
73397@@ -1694,12 +1700,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
73398 TCP_SKB_CB(skb)->sacked = 0;
73399
73400 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73401- if (!sk)
73402+ if (!sk) {
73403+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73404+ ret = 1;
73405+#endif
73406 goto no_tcp_socket;
73407-
73408+ }
73409 process:
73410- if (sk->sk_state == TCP_TIME_WAIT)
73411+ if (sk->sk_state == TCP_TIME_WAIT) {
73412+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73413+ ret = 2;
73414+#endif
73415 goto do_time_wait;
73416+ }
73417
73418 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
73419 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73420@@ -1749,6 +1762,10 @@ no_tcp_socket:
73421 bad_packet:
73422 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73423 } else {
73424+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73425+ if (!grsec_enable_blackhole || (ret == 1 &&
73426+ (skb->dev->flags & IFF_LOOPBACK)))
73427+#endif
73428 tcp_v4_send_reset(NULL, skb);
73429 }
73430
73431@@ -2409,7 +2426,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
73432 0, /* non standard timer */
73433 0, /* open_requests have no inode */
73434 atomic_read(&sk->sk_refcnt),
73435+#ifdef CONFIG_GRKERNSEC_HIDESYM
73436+ NULL,
73437+#else
73438 req,
73439+#endif
73440 len);
73441 }
73442
73443@@ -2459,7 +2480,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
73444 sock_i_uid(sk),
73445 icsk->icsk_probes_out,
73446 sock_i_ino(sk),
73447- atomic_read(&sk->sk_refcnt), sk,
73448+ atomic_read(&sk->sk_refcnt),
73449+#ifdef CONFIG_GRKERNSEC_HIDESYM
73450+ NULL,
73451+#else
73452+ sk,
73453+#endif
73454 jiffies_to_clock_t(icsk->icsk_rto),
73455 jiffies_to_clock_t(icsk->icsk_ack.ato),
73456 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
73457@@ -2487,7 +2513,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
73458 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
73459 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
73460 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73461- atomic_read(&tw->tw_refcnt), tw, len);
73462+ atomic_read(&tw->tw_refcnt),
73463+#ifdef CONFIG_GRKERNSEC_HIDESYM
73464+ NULL,
73465+#else
73466+ tw,
73467+#endif
73468+ len);
73469 }
73470
73471 #define TMPSZ 150
73472diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
73473index 66363b6..b0654a3 100644
73474--- a/net/ipv4/tcp_minisocks.c
73475+++ b/net/ipv4/tcp_minisocks.c
73476@@ -27,6 +27,10 @@
73477 #include <net/inet_common.h>
73478 #include <net/xfrm.h>
73479
73480+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73481+extern int grsec_enable_blackhole;
73482+#endif
73483+
73484 int sysctl_tcp_syncookies __read_mostly = 1;
73485 EXPORT_SYMBOL(sysctl_tcp_syncookies);
73486
73487@@ -751,6 +755,10 @@ listen_overflow:
73488
73489 embryonic_reset:
73490 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
73491+
73492+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73493+ if (!grsec_enable_blackhole)
73494+#endif
73495 if (!(flg & TCP_FLAG_RST))
73496 req->rsk_ops->send_reset(sk, skb);
73497
73498diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
73499index 85ee7eb..53277ab 100644
73500--- a/net/ipv4/tcp_probe.c
73501+++ b/net/ipv4/tcp_probe.c
73502@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
73503 if (cnt + width >= len)
73504 break;
73505
73506- if (copy_to_user(buf + cnt, tbuf, width))
73507+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
73508 return -EFAULT;
73509 cnt += width;
73510 }
73511diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
73512index 2e0f0af..e2948bf 100644
73513--- a/net/ipv4/tcp_timer.c
73514+++ b/net/ipv4/tcp_timer.c
73515@@ -22,6 +22,10 @@
73516 #include <linux/gfp.h>
73517 #include <net/tcp.h>
73518
73519+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73520+extern int grsec_lastack_retries;
73521+#endif
73522+
73523 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
73524 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
73525 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
73526@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock *sk)
73527 }
73528 }
73529
73530+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73531+ if ((sk->sk_state == TCP_LAST_ACK) &&
73532+ (grsec_lastack_retries > 0) &&
73533+ (grsec_lastack_retries < retry_until))
73534+ retry_until = grsec_lastack_retries;
73535+#endif
73536+
73537 if (retransmits_timed_out(sk, retry_until,
73538 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
73539 /* Has it gone just too far? */
73540diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
73541index 5a65eea..bd913a1 100644
73542--- a/net/ipv4/udp.c
73543+++ b/net/ipv4/udp.c
73544@@ -86,6 +86,7 @@
73545 #include <linux/types.h>
73546 #include <linux/fcntl.h>
73547 #include <linux/module.h>
73548+#include <linux/security.h>
73549 #include <linux/socket.h>
73550 #include <linux/sockios.h>
73551 #include <linux/igmp.h>
73552@@ -108,6 +109,10 @@
73553 #include <trace/events/udp.h>
73554 #include "udp_impl.h"
73555
73556+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73557+extern int grsec_enable_blackhole;
73558+#endif
73559+
73560 struct udp_table udp_table __read_mostly;
73561 EXPORT_SYMBOL(udp_table);
73562
73563@@ -565,6 +570,9 @@ found:
73564 return s;
73565 }
73566
73567+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
73568+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
73569+
73570 /*
73571 * This routine is called by the ICMP module when it gets some
73572 * sort of error condition. If err < 0 then the socket should
73573@@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
73574 dport = usin->sin_port;
73575 if (dport == 0)
73576 return -EINVAL;
73577+
73578+ err = gr_search_udp_sendmsg(sk, usin);
73579+ if (err)
73580+ return err;
73581 } else {
73582 if (sk->sk_state != TCP_ESTABLISHED)
73583 return -EDESTADDRREQ;
73584+
73585+ err = gr_search_udp_sendmsg(sk, NULL);
73586+ if (err)
73587+ return err;
73588+
73589 daddr = inet->inet_daddr;
73590 dport = inet->inet_dport;
73591 /* Open fast path for connected socket.
73592@@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(struct sock *sk)
73593 udp_lib_checksum_complete(skb)) {
73594 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73595 IS_UDPLITE(sk));
73596- atomic_inc(&sk->sk_drops);
73597+ atomic_inc_unchecked(&sk->sk_drops);
73598 __skb_unlink(skb, rcvq);
73599 __skb_queue_tail(&list_kill, skb);
73600 }
73601@@ -1185,6 +1202,10 @@ try_again:
73602 if (!skb)
73603 goto out;
73604
73605+ err = gr_search_udp_recvmsg(sk, skb);
73606+ if (err)
73607+ goto out_free;
73608+
73609 ulen = skb->len - sizeof(struct udphdr);
73610 copied = len;
73611 if (copied > ulen)
73612@@ -1487,7 +1508,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
73613
73614 drop:
73615 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73616- atomic_inc(&sk->sk_drops);
73617+ atomic_inc_unchecked(&sk->sk_drops);
73618 kfree_skb(skb);
73619 return -1;
73620 }
73621@@ -1506,7 +1527,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
73622 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
73623
73624 if (!skb1) {
73625- atomic_inc(&sk->sk_drops);
73626+ atomic_inc_unchecked(&sk->sk_drops);
73627 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
73628 IS_UDPLITE(sk));
73629 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73630@@ -1675,6 +1696,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73631 goto csum_error;
73632
73633 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
73634+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73635+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73636+#endif
73637 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
73638
73639 /*
73640@@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
73641 sk_wmem_alloc_get(sp),
73642 sk_rmem_alloc_get(sp),
73643 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73644- atomic_read(&sp->sk_refcnt), sp,
73645- atomic_read(&sp->sk_drops), len);
73646+ atomic_read(&sp->sk_refcnt),
73647+#ifdef CONFIG_GRKERNSEC_HIDESYM
73648+ NULL,
73649+#else
73650+ sp,
73651+#endif
73652+ atomic_read_unchecked(&sp->sk_drops), len);
73653 }
73654
73655 int udp4_seq_show(struct seq_file *seq, void *v)
73656diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
73657index 836c4ea..cbb74dc 100644
73658--- a/net/ipv6/addrconf.c
73659+++ b/net/ipv6/addrconf.c
73660@@ -2149,7 +2149,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
73661 p.iph.ihl = 5;
73662 p.iph.protocol = IPPROTO_IPV6;
73663 p.iph.ttl = 64;
73664- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
73665+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
73666
73667 if (ops->ndo_do_ioctl) {
73668 mm_segment_t oldfs = get_fs();
73669diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
73670index 1567fb1..29af910 100644
73671--- a/net/ipv6/inet6_connection_sock.c
73672+++ b/net/ipv6/inet6_connection_sock.c
73673@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
73674 #ifdef CONFIG_XFRM
73675 {
73676 struct rt6_info *rt = (struct rt6_info *)dst;
73677- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
73678+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
73679 }
73680 #endif
73681 }
73682@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
73683 #ifdef CONFIG_XFRM
73684 if (dst) {
73685 struct rt6_info *rt = (struct rt6_info *)dst;
73686- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
73687+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
73688 __sk_dst_reset(sk);
73689 dst = NULL;
73690 }
73691diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
73692index 26cb08c..8af9877 100644
73693--- a/net/ipv6/ipv6_sockglue.c
73694+++ b/net/ipv6/ipv6_sockglue.c
73695@@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
73696 if (sk->sk_type != SOCK_STREAM)
73697 return -ENOPROTOOPT;
73698
73699- msg.msg_control = optval;
73700+ msg.msg_control = (void __force_kernel *)optval;
73701 msg.msg_controllen = len;
73702 msg.msg_flags = flags;
73703
73704diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
73705index 361ebf3..d5628fb 100644
73706--- a/net/ipv6/raw.c
73707+++ b/net/ipv6/raw.c
73708@@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
73709 {
73710 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
73711 skb_checksum_complete(skb)) {
73712- atomic_inc(&sk->sk_drops);
73713+ atomic_inc_unchecked(&sk->sk_drops);
73714 kfree_skb(skb);
73715 return NET_RX_DROP;
73716 }
73717@@ -404,7 +404,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
73718 struct raw6_sock *rp = raw6_sk(sk);
73719
73720 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
73721- atomic_inc(&sk->sk_drops);
73722+ atomic_inc_unchecked(&sk->sk_drops);
73723 kfree_skb(skb);
73724 return NET_RX_DROP;
73725 }
73726@@ -428,7 +428,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
73727
73728 if (inet->hdrincl) {
73729 if (skb_checksum_complete(skb)) {
73730- atomic_inc(&sk->sk_drops);
73731+ atomic_inc_unchecked(&sk->sk_drops);
73732 kfree_skb(skb);
73733 return NET_RX_DROP;
73734 }
73735@@ -601,7 +601,7 @@ out:
73736 return err;
73737 }
73738
73739-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
73740+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
73741 struct flowi6 *fl6, struct dst_entry **dstp,
73742 unsigned int flags)
73743 {
73744@@ -909,12 +909,15 @@ do_confirm:
73745 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
73746 char __user *optval, int optlen)
73747 {
73748+ struct icmp6_filter filter;
73749+
73750 switch (optname) {
73751 case ICMPV6_FILTER:
73752 if (optlen > sizeof(struct icmp6_filter))
73753 optlen = sizeof(struct icmp6_filter);
73754- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
73755+ if (copy_from_user(&filter, optval, optlen))
73756 return -EFAULT;
73757+ raw6_sk(sk)->filter = filter;
73758 return 0;
73759 default:
73760 return -ENOPROTOOPT;
73761@@ -927,6 +930,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
73762 char __user *optval, int __user *optlen)
73763 {
73764 int len;
73765+ struct icmp6_filter filter;
73766
73767 switch (optname) {
73768 case ICMPV6_FILTER:
73769@@ -938,7 +942,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
73770 len = sizeof(struct icmp6_filter);
73771 if (put_user(len, optlen))
73772 return -EFAULT;
73773- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
73774+ filter = raw6_sk(sk)->filter;
73775+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
73776 return -EFAULT;
73777 return 0;
73778 default:
73779@@ -1245,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
73780 0, 0L, 0,
73781 sock_i_uid(sp), 0,
73782 sock_i_ino(sp),
73783- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73784+ atomic_read(&sp->sk_refcnt),
73785+#ifdef CONFIG_GRKERNSEC_HIDESYM
73786+ NULL,
73787+#else
73788+ sp,
73789+#endif
73790+ atomic_read_unchecked(&sp->sk_drops));
73791 }
73792
73793 static int raw6_seq_show(struct seq_file *seq, void *v)
73794diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
73795index b859e4a..f9d1589 100644
73796--- a/net/ipv6/tcp_ipv6.c
73797+++ b/net/ipv6/tcp_ipv6.c
73798@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
73799 }
73800 #endif
73801
73802+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73803+extern int grsec_enable_blackhole;
73804+#endif
73805+
73806 static void tcp_v6_hash(struct sock *sk)
73807 {
73808 if (sk->sk_state != TCP_CLOSE) {
73809@@ -1651,6 +1655,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
73810 return 0;
73811
73812 reset:
73813+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73814+ if (!grsec_enable_blackhole)
73815+#endif
73816 tcp_v6_send_reset(sk, skb);
73817 discard:
73818 if (opt_skb)
73819@@ -1730,12 +1737,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
73820 TCP_SKB_CB(skb)->sacked = 0;
73821
73822 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73823- if (!sk)
73824+ if (!sk) {
73825+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73826+ ret = 1;
73827+#endif
73828 goto no_tcp_socket;
73829+ }
73830
73831 process:
73832- if (sk->sk_state == TCP_TIME_WAIT)
73833+ if (sk->sk_state == TCP_TIME_WAIT) {
73834+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73835+ ret = 2;
73836+#endif
73837 goto do_time_wait;
73838+ }
73839
73840 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
73841 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73842@@ -1783,6 +1798,10 @@ no_tcp_socket:
73843 bad_packet:
73844 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73845 } else {
73846+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73847+ if (!grsec_enable_blackhole || (ret == 1 &&
73848+ (skb->dev->flags & IFF_LOOPBACK)))
73849+#endif
73850 tcp_v6_send_reset(NULL, skb);
73851 }
73852
73853@@ -2043,7 +2062,13 @@ static void get_openreq6(struct seq_file *seq,
73854 uid,
73855 0, /* non standard timer */
73856 0, /* open_requests have no inode */
73857- 0, req);
73858+ 0,
73859+#ifdef CONFIG_GRKERNSEC_HIDESYM
73860+ NULL
73861+#else
73862+ req
73863+#endif
73864+ );
73865 }
73866
73867 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73868@@ -2093,7 +2118,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73869 sock_i_uid(sp),
73870 icsk->icsk_probes_out,
73871 sock_i_ino(sp),
73872- atomic_read(&sp->sk_refcnt), sp,
73873+ atomic_read(&sp->sk_refcnt),
73874+#ifdef CONFIG_GRKERNSEC_HIDESYM
73875+ NULL,
73876+#else
73877+ sp,
73878+#endif
73879 jiffies_to_clock_t(icsk->icsk_rto),
73880 jiffies_to_clock_t(icsk->icsk_ack.ato),
73881 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
73882@@ -2128,7 +2158,13 @@ static void get_timewait6_sock(struct seq_file *seq,
73883 dest->s6_addr32[2], dest->s6_addr32[3], destp,
73884 tw->tw_substate, 0, 0,
73885 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73886- atomic_read(&tw->tw_refcnt), tw);
73887+ atomic_read(&tw->tw_refcnt),
73888+#ifdef CONFIG_GRKERNSEC_HIDESYM
73889+ NULL
73890+#else
73891+ tw
73892+#endif
73893+ );
73894 }
73895
73896 static int tcp6_seq_show(struct seq_file *seq, void *v)
73897diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
73898index 8c25419..47a51ae 100644
73899--- a/net/ipv6/udp.c
73900+++ b/net/ipv6/udp.c
73901@@ -50,6 +50,10 @@
73902 #include <linux/seq_file.h>
73903 #include "udp_impl.h"
73904
73905+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73906+extern int grsec_enable_blackhole;
73907+#endif
73908+
73909 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
73910 {
73911 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
73912@@ -549,7 +553,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
73913
73914 return 0;
73915 drop:
73916- atomic_inc(&sk->sk_drops);
73917+ atomic_inc_unchecked(&sk->sk_drops);
73918 drop_no_sk_drops_inc:
73919 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73920 kfree_skb(skb);
73921@@ -625,7 +629,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
73922 continue;
73923 }
73924 drop:
73925- atomic_inc(&sk->sk_drops);
73926+ atomic_inc_unchecked(&sk->sk_drops);
73927 UDP6_INC_STATS_BH(sock_net(sk),
73928 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
73929 UDP6_INC_STATS_BH(sock_net(sk),
73930@@ -780,6 +784,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73931 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
73932 proto == IPPROTO_UDPLITE);
73933
73934+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73935+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73936+#endif
73937 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
73938
73939 kfree_skb(skb);
73940@@ -796,7 +803,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73941 if (!sock_owned_by_user(sk))
73942 udpv6_queue_rcv_skb(sk, skb);
73943 else if (sk_add_backlog(sk, skb)) {
73944- atomic_inc(&sk->sk_drops);
73945+ atomic_inc_unchecked(&sk->sk_drops);
73946 bh_unlock_sock(sk);
73947 sock_put(sk);
73948 goto discard;
73949@@ -1407,8 +1414,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
73950 0, 0L, 0,
73951 sock_i_uid(sp), 0,
73952 sock_i_ino(sp),
73953- atomic_read(&sp->sk_refcnt), sp,
73954- atomic_read(&sp->sk_drops));
73955+ atomic_read(&sp->sk_refcnt),
73956+#ifdef CONFIG_GRKERNSEC_HIDESYM
73957+ NULL,
73958+#else
73959+ sp,
73960+#endif
73961+ atomic_read_unchecked(&sp->sk_drops));
73962 }
73963
73964 int udp6_seq_show(struct seq_file *seq, void *v)
73965diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
73966index 253695d..9481ce8 100644
73967--- a/net/irda/ircomm/ircomm_tty.c
73968+++ b/net/irda/ircomm/ircomm_tty.c
73969@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73970 add_wait_queue(&self->open_wait, &wait);
73971
73972 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
73973- __FILE__,__LINE__, tty->driver->name, self->open_count );
73974+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73975
73976 /* As far as I can see, we protect open_count - Jean II */
73977 spin_lock_irqsave(&self->spinlock, flags);
73978 if (!tty_hung_up_p(filp)) {
73979 extra_count = 1;
73980- self->open_count--;
73981+ local_dec(&self->open_count);
73982 }
73983 spin_unlock_irqrestore(&self->spinlock, flags);
73984- self->blocked_open++;
73985+ local_inc(&self->blocked_open);
73986
73987 while (1) {
73988 if (tty->termios->c_cflag & CBAUD) {
73989@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73990 }
73991
73992 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
73993- __FILE__,__LINE__, tty->driver->name, self->open_count );
73994+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73995
73996 schedule();
73997 }
73998@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73999 if (extra_count) {
74000 /* ++ is not atomic, so this should be protected - Jean II */
74001 spin_lock_irqsave(&self->spinlock, flags);
74002- self->open_count++;
74003+ local_inc(&self->open_count);
74004 spin_unlock_irqrestore(&self->spinlock, flags);
74005 }
74006- self->blocked_open--;
74007+ local_dec(&self->blocked_open);
74008
74009 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
74010- __FILE__,__LINE__, tty->driver->name, self->open_count);
74011+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
74012
74013 if (!retval)
74014 self->flags |= ASYNC_NORMAL_ACTIVE;
74015@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
74016 }
74017 /* ++ is not atomic, so this should be protected - Jean II */
74018 spin_lock_irqsave(&self->spinlock, flags);
74019- self->open_count++;
74020+ local_inc(&self->open_count);
74021
74022 tty->driver_data = self;
74023 self->tty = tty;
74024 spin_unlock_irqrestore(&self->spinlock, flags);
74025
74026 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
74027- self->line, self->open_count);
74028+ self->line, local_read(&self->open_count));
74029
74030 /* Not really used by us, but lets do it anyway */
74031 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
74032@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
74033 return;
74034 }
74035
74036- if ((tty->count == 1) && (self->open_count != 1)) {
74037+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
74038 /*
74039 * Uh, oh. tty->count is 1, which means that the tty
74040 * structure will be freed. state->count should always
74041@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
74042 */
74043 IRDA_DEBUG(0, "%s(), bad serial port count; "
74044 "tty->count is 1, state->count is %d\n", __func__ ,
74045- self->open_count);
74046- self->open_count = 1;
74047+ local_read(&self->open_count));
74048+ local_set(&self->open_count, 1);
74049 }
74050
74051- if (--self->open_count < 0) {
74052+ if (local_dec_return(&self->open_count) < 0) {
74053 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
74054- __func__, self->line, self->open_count);
74055- self->open_count = 0;
74056+ __func__, self->line, local_read(&self->open_count));
74057+ local_set(&self->open_count, 0);
74058 }
74059- if (self->open_count) {
74060+ if (local_read(&self->open_count)) {
74061 spin_unlock_irqrestore(&self->spinlock, flags);
74062
74063 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
74064@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
74065 tty->closing = 0;
74066 self->tty = NULL;
74067
74068- if (self->blocked_open) {
74069+ if (local_read(&self->blocked_open)) {
74070 if (self->close_delay)
74071 schedule_timeout_interruptible(self->close_delay);
74072 wake_up_interruptible(&self->open_wait);
74073@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
74074 spin_lock_irqsave(&self->spinlock, flags);
74075 self->flags &= ~ASYNC_NORMAL_ACTIVE;
74076 self->tty = NULL;
74077- self->open_count = 0;
74078+ local_set(&self->open_count, 0);
74079 spin_unlock_irqrestore(&self->spinlock, flags);
74080
74081 wake_up_interruptible(&self->open_wait);
74082@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
74083 seq_putc(m, '\n');
74084
74085 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
74086- seq_printf(m, "Open count: %d\n", self->open_count);
74087+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
74088 seq_printf(m, "Max data size: %d\n", self->max_data_size);
74089 seq_printf(m, "Max header size: %d\n", self->max_header_size);
74090
74091diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
74092index 274d150..656a144 100644
74093--- a/net/iucv/af_iucv.c
74094+++ b/net/iucv/af_iucv.c
74095@@ -787,10 +787,10 @@ static int iucv_sock_autobind(struct sock *sk)
74096
74097 write_lock_bh(&iucv_sk_list.lock);
74098
74099- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
74100+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
74101 while (__iucv_get_sock_by_name(name)) {
74102 sprintf(name, "%08x",
74103- atomic_inc_return(&iucv_sk_list.autobind_name));
74104+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
74105 }
74106
74107 write_unlock_bh(&iucv_sk_list.lock);
74108diff --git a/net/key/af_key.c b/net/key/af_key.c
74109index 1e733e9..3d73c9f 100644
74110--- a/net/key/af_key.c
74111+++ b/net/key/af_key.c
74112@@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
74113 static u32 get_acqseq(void)
74114 {
74115 u32 res;
74116- static atomic_t acqseq;
74117+ static atomic_unchecked_t acqseq;
74118
74119 do {
74120- res = atomic_inc_return(&acqseq);
74121+ res = atomic_inc_return_unchecked(&acqseq);
74122 } while (!res);
74123 return res;
74124 }
74125diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
74126index 73495f1..ad51356 100644
74127--- a/net/mac80211/ieee80211_i.h
74128+++ b/net/mac80211/ieee80211_i.h
74129@@ -27,6 +27,7 @@
74130 #include <net/ieee80211_radiotap.h>
74131 #include <net/cfg80211.h>
74132 #include <net/mac80211.h>
74133+#include <asm/local.h>
74134 #include "key.h"
74135 #include "sta_info.h"
74136
74137@@ -764,7 +765,7 @@ struct ieee80211_local {
74138 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
74139 spinlock_t queue_stop_reason_lock;
74140
74141- int open_count;
74142+ local_t open_count;
74143 int monitors, cooked_mntrs;
74144 /* number of interfaces with corresponding FIF_ flags */
74145 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
74146diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
74147index 30d7355..e260095 100644
74148--- a/net/mac80211/iface.c
74149+++ b/net/mac80211/iface.c
74150@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
74151 break;
74152 }
74153
74154- if (local->open_count == 0) {
74155+ if (local_read(&local->open_count) == 0) {
74156 res = drv_start(local);
74157 if (res)
74158 goto err_del_bss;
74159@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
74160 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
74161
74162 if (!is_valid_ether_addr(dev->dev_addr)) {
74163- if (!local->open_count)
74164+ if (!local_read(&local->open_count))
74165 drv_stop(local);
74166 return -EADDRNOTAVAIL;
74167 }
74168@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
74169 mutex_unlock(&local->mtx);
74170
74171 if (coming_up)
74172- local->open_count++;
74173+ local_inc(&local->open_count);
74174
74175 if (hw_reconf_flags) {
74176 ieee80211_hw_config(local, hw_reconf_flags);
74177@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
74178 err_del_interface:
74179 drv_remove_interface(local, &sdata->vif);
74180 err_stop:
74181- if (!local->open_count)
74182+ if (!local_read(&local->open_count))
74183 drv_stop(local);
74184 err_del_bss:
74185 sdata->bss = NULL;
74186@@ -472,7 +472,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
74187 }
74188
74189 if (going_down)
74190- local->open_count--;
74191+ local_dec(&local->open_count);
74192
74193 switch (sdata->vif.type) {
74194 case NL80211_IFTYPE_AP_VLAN:
74195@@ -531,7 +531,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
74196
74197 ieee80211_recalc_ps(local, -1);
74198
74199- if (local->open_count == 0) {
74200+ if (local_read(&local->open_count) == 0) {
74201 if (local->ops->napi_poll)
74202 napi_disable(&local->napi);
74203 ieee80211_clear_tx_pending(local);
74204diff --git a/net/mac80211/main.c b/net/mac80211/main.c
74205index 7d9b21d..0687004 100644
74206--- a/net/mac80211/main.c
74207+++ b/net/mac80211/main.c
74208@@ -163,7 +163,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
74209 local->hw.conf.power_level = power;
74210 }
74211
74212- if (changed && local->open_count) {
74213+ if (changed && local_read(&local->open_count)) {
74214 ret = drv_config(local, changed);
74215 /*
74216 * Goal:
74217diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
74218index 9ee7164..56c5061 100644
74219--- a/net/mac80211/pm.c
74220+++ b/net/mac80211/pm.c
74221@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
74222 struct ieee80211_sub_if_data *sdata;
74223 struct sta_info *sta;
74224
74225- if (!local->open_count)
74226+ if (!local_read(&local->open_count))
74227 goto suspend;
74228
74229 ieee80211_scan_cancel(local);
74230@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
74231 cancel_work_sync(&local->dynamic_ps_enable_work);
74232 del_timer_sync(&local->dynamic_ps_timer);
74233
74234- local->wowlan = wowlan && local->open_count;
74235+ local->wowlan = wowlan && local_read(&local->open_count);
74236 if (local->wowlan) {
74237 int err = drv_suspend(local, wowlan);
74238 if (err < 0) {
74239@@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
74240 }
74241
74242 /* stop hardware - this must stop RX */
74243- if (local->open_count)
74244+ if (local_read(&local->open_count))
74245 ieee80211_stop_device(local);
74246
74247 suspend:
74248diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
74249index 5a5a776..9600b11 100644
74250--- a/net/mac80211/rate.c
74251+++ b/net/mac80211/rate.c
74252@@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
74253
74254 ASSERT_RTNL();
74255
74256- if (local->open_count)
74257+ if (local_read(&local->open_count))
74258 return -EBUSY;
74259
74260 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
74261diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
74262index c97a065..ff61928 100644
74263--- a/net/mac80211/rc80211_pid_debugfs.c
74264+++ b/net/mac80211/rc80211_pid_debugfs.c
74265@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
74266
74267 spin_unlock_irqrestore(&events->lock, status);
74268
74269- if (copy_to_user(buf, pb, p))
74270+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
74271 return -EFAULT;
74272
74273 return p;
74274diff --git a/net/mac80211/util.c b/net/mac80211/util.c
74275index d5230ec..c604b21 100644
74276--- a/net/mac80211/util.c
74277+++ b/net/mac80211/util.c
74278@@ -1000,7 +1000,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
74279 drv_set_coverage_class(local, hw->wiphy->coverage_class);
74280
74281 /* everything else happens only if HW was up & running */
74282- if (!local->open_count)
74283+ if (!local_read(&local->open_count))
74284 goto wake_up;
74285
74286 /*
74287diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
74288index d5597b7..ab6d39c 100644
74289--- a/net/netfilter/Kconfig
74290+++ b/net/netfilter/Kconfig
74291@@ -779,6 +779,16 @@ config NETFILTER_XT_MATCH_ESP
74292
74293 To compile it as a module, choose M here. If unsure, say N.
74294
74295+config NETFILTER_XT_MATCH_GRADM
74296+ tristate '"gradm" match support'
74297+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
74298+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
74299+ ---help---
74300+ The gradm match allows to match on grsecurity RBAC being enabled.
74301+ It is useful when iptables rules are applied early on bootup to
74302+ prevent connections to the machine (except from a trusted host)
74303+ while the RBAC system is disabled.
74304+
74305 config NETFILTER_XT_MATCH_HASHLIMIT
74306 tristate '"hashlimit" match support'
74307 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
74308diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
74309index 1a02853..5d8c22e 100644
74310--- a/net/netfilter/Makefile
74311+++ b/net/netfilter/Makefile
74312@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
74313 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
74314 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
74315 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
74316+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
74317 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
74318 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
74319 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
74320diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
74321index 29fa5ba..8debc79 100644
74322--- a/net/netfilter/ipvs/ip_vs_conn.c
74323+++ b/net/netfilter/ipvs/ip_vs_conn.c
74324@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
74325 /* Increase the refcnt counter of the dest */
74326 atomic_inc(&dest->refcnt);
74327
74328- conn_flags = atomic_read(&dest->conn_flags);
74329+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
74330 if (cp->protocol != IPPROTO_UDP)
74331 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
74332 /* Bind with the destination and its corresponding transmitter */
74333@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
74334 atomic_set(&cp->refcnt, 1);
74335
74336 atomic_set(&cp->n_control, 0);
74337- atomic_set(&cp->in_pkts, 0);
74338+ atomic_set_unchecked(&cp->in_pkts, 0);
74339
74340 atomic_inc(&ipvs->conn_count);
74341 if (flags & IP_VS_CONN_F_NO_CPORT)
74342@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
74343
74344 /* Don't drop the entry if its number of incoming packets is not
74345 located in [0, 8] */
74346- i = atomic_read(&cp->in_pkts);
74347+ i = atomic_read_unchecked(&cp->in_pkts);
74348 if (i > 8 || i < 0) return 0;
74349
74350 if (!todrop_rate[i]) return 0;
74351diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
74352index 6dc7d7d..e45913a 100644
74353--- a/net/netfilter/ipvs/ip_vs_core.c
74354+++ b/net/netfilter/ipvs/ip_vs_core.c
74355@@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
74356 ret = cp->packet_xmit(skb, cp, pd->pp);
74357 /* do not touch skb anymore */
74358
74359- atomic_inc(&cp->in_pkts);
74360+ atomic_inc_unchecked(&cp->in_pkts);
74361 ip_vs_conn_put(cp);
74362 return ret;
74363 }
74364@@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
74365 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
74366 pkts = sysctl_sync_threshold(ipvs);
74367 else
74368- pkts = atomic_add_return(1, &cp->in_pkts);
74369+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
74370
74371 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
74372 cp->protocol == IPPROTO_SCTP) {
74373diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
74374index e1a66cf..0910076 100644
74375--- a/net/netfilter/ipvs/ip_vs_ctl.c
74376+++ b/net/netfilter/ipvs/ip_vs_ctl.c
74377@@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
74378 ip_vs_rs_hash(ipvs, dest);
74379 write_unlock_bh(&ipvs->rs_lock);
74380 }
74381- atomic_set(&dest->conn_flags, conn_flags);
74382+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
74383
74384 /* bind the service */
74385 if (!dest->svc) {
74386@@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
74387 " %-7s %-6d %-10d %-10d\n",
74388 &dest->addr.in6,
74389 ntohs(dest->port),
74390- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
74391+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
74392 atomic_read(&dest->weight),
74393 atomic_read(&dest->activeconns),
74394 atomic_read(&dest->inactconns));
74395@@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
74396 "%-7s %-6d %-10d %-10d\n",
74397 ntohl(dest->addr.ip),
74398 ntohs(dest->port),
74399- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
74400+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
74401 atomic_read(&dest->weight),
74402 atomic_read(&dest->activeconns),
74403 atomic_read(&dest->inactconns));
74404@@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
74405
74406 entry.addr = dest->addr.ip;
74407 entry.port = dest->port;
74408- entry.conn_flags = atomic_read(&dest->conn_flags);
74409+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
74410 entry.weight = atomic_read(&dest->weight);
74411 entry.u_threshold = dest->u_threshold;
74412 entry.l_threshold = dest->l_threshold;
74413@@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
74414 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
74415
74416 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
74417- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74418+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74419 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
74420 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
74421 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
74422diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
74423index 2b6678c0..aaa41fc 100644
74424--- a/net/netfilter/ipvs/ip_vs_sync.c
74425+++ b/net/netfilter/ipvs/ip_vs_sync.c
74426@@ -649,7 +649,7 @@ control:
74427 * i.e only increment in_pkts for Templates.
74428 */
74429 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
74430- int pkts = atomic_add_return(1, &cp->in_pkts);
74431+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
74432
74433 if (pkts % sysctl_sync_period(ipvs) != 1)
74434 return;
74435@@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
74436
74437 if (opt)
74438 memcpy(&cp->in_seq, opt, sizeof(*opt));
74439- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74440+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74441 cp->state = state;
74442 cp->old_state = cp->state;
74443 /*
74444diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
74445index aa2d720..d8aa111 100644
74446--- a/net/netfilter/ipvs/ip_vs_xmit.c
74447+++ b/net/netfilter/ipvs/ip_vs_xmit.c
74448@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
74449 else
74450 rc = NF_ACCEPT;
74451 /* do not touch skb anymore */
74452- atomic_inc(&cp->in_pkts);
74453+ atomic_inc_unchecked(&cp->in_pkts);
74454 goto out;
74455 }
74456
74457@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
74458 else
74459 rc = NF_ACCEPT;
74460 /* do not touch skb anymore */
74461- atomic_inc(&cp->in_pkts);
74462+ atomic_inc_unchecked(&cp->in_pkts);
74463 goto out;
74464 }
74465
74466diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
74467index 66b2c54..c7884e3 100644
74468--- a/net/netfilter/nfnetlink_log.c
74469+++ b/net/netfilter/nfnetlink_log.c
74470@@ -70,7 +70,7 @@ struct nfulnl_instance {
74471 };
74472
74473 static DEFINE_SPINLOCK(instances_lock);
74474-static atomic_t global_seq;
74475+static atomic_unchecked_t global_seq;
74476
74477 #define INSTANCE_BUCKETS 16
74478 static struct hlist_head instance_table[INSTANCE_BUCKETS];
74479@@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
74480 /* global sequence number */
74481 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
74482 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
74483- htonl(atomic_inc_return(&global_seq)));
74484+ htonl(atomic_inc_return_unchecked(&global_seq)));
74485
74486 if (data_len) {
74487 struct nlattr *nla;
74488diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
74489new file mode 100644
74490index 0000000..6905327
74491--- /dev/null
74492+++ b/net/netfilter/xt_gradm.c
74493@@ -0,0 +1,51 @@
74494+/*
74495+ * gradm match for netfilter
74496