]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.0-3.16.2-201409112237.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.0-3.16.2-201409112237.patch
CommitLineData
4623ec29
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 9de9813..1462492 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -3,9 +3,11 @@
6 *.bc
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -15,6 +17,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -51,14 +54,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -72,9 +78,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -83,6 +91,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -95,32 +104,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -148,14 +168,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -165,14 +185,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -188,6 +209,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -197,6 +220,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -206,7 +230,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -216,8 +245,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -227,6 +260,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -238,13 +272,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -252,9 +290,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
238index b7fa2f5..90cd9f8 100644
239--- a/Documentation/kernel-parameters.txt
240+++ b/Documentation/kernel-parameters.txt
241@@ -1138,6 +1138,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
242 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
243 Default: 1024
244
245+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
246+ ignore grsecurity's /proc restrictions
247+
248+
249 hashdist= [KNL,NUMA] Large hashes allocated during boot
250 are distributed across NUMA nodes. Defaults on
251 for 64-bit NUMA, off otherwise.
252@@ -2141,6 +2145,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
253 noexec=on: enable non-executable mappings (default)
254 noexec=off: disable non-executable mappings
255
256+ nopcid [X86-64]
257+ Disable PCID (Process-Context IDentifier) even if it
258+ is supported by the processor.
259+
260 nosmap [X86]
261 Disable SMAP (Supervisor Mode Access Prevention)
262 even if it is supported by processor.
263@@ -2418,6 +2426,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
264 the specified number of seconds. This is to be used if
265 your oopses keep scrolling off the screen.
266
267+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
268+ virtualization environments that don't cope well with the
269+ expand down segment used by UDEREF on X86-32 or the frequent
270+ page table updates on X86-64.
271+
272+ pax_sanitize_slab=
273+ 0/1 to disable/enable slab object sanitization (enabled by
274+ default).
275+
276+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
277+
278+ pax_extra_latent_entropy
279+ Enable a very simple form of latent entropy extraction
280+ from the first 4GB of memory as the bootmem allocator
281+ passes the memory pages to the buddy allocator.
282+
283+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
284+ when the processor supports PCID.
285+
286 pcbit= [HW,ISDN]
287
288 pcd. [PARIDE]
289diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
290index ee78eba..a06b48d 100644
291--- a/Documentation/networking/filter.txt
292+++ b/Documentation/networking/filter.txt
293@@ -277,11 +277,10 @@ Possible BPF extensions are shown in the following table:
294 mark skb->mark
295 queue skb->queue_mapping
296 hatype skb->dev->type
297- rxhash skb->hash
298+ rxhash skb->rxhash
299 cpu raw_smp_processor_id()
300 vlan_tci vlan_tx_tag_get(skb)
301 vlan_pr vlan_tx_tag_present(skb)
302- rand prandom_u32()
303
304 These extensions can also be prefixed with '#'.
305 Examples for low-level BPF:
306@@ -309,18 +308,6 @@ Examples for low-level BPF:
307 ret #-1
308 drop: ret #0
309
310-** icmp random packet sampling, 1 in 4
311- ldh [12]
312- jne #0x800, drop
313- ldb [23]
314- jneq #1, drop
315- # get a random uint32 number
316- ld rand
317- mod #4
318- jneq #1, drop
319- ret #-1
320- drop: ret #0
321-
322 ** SECCOMP filter example:
323
324 ld [4] /* offsetof(struct seccomp_data, arch) */
325@@ -559,456 +546,6 @@ ffffffffa0069c8f + <x>:
326 For BPF JIT developers, bpf_jit_disasm, bpf_asm and bpf_dbg provides a useful
327 toolchain for developing and testing the kernel's JIT compiler.
328
329-BPF kernel internals
330---------------------
331-Internally, for the kernel interpreter, a different instruction set
332-format with similar underlying principles from BPF described in previous
333-paragraphs is being used. However, the instruction set format is modelled
334-closer to the underlying architecture to mimic native instruction sets, so
335-that a better performance can be achieved (more details later). This new
336-ISA is called 'eBPF' or 'internal BPF' interchangeably. (Note: eBPF which
337-originates from [e]xtended BPF is not the same as BPF extensions! While
338-eBPF is an ISA, BPF extensions date back to classic BPF's 'overloading'
339-of BPF_LD | BPF_{B,H,W} | BPF_ABS instruction.)
340-
341-It is designed to be JITed with one to one mapping, which can also open up
342-the possibility for GCC/LLVM compilers to generate optimized eBPF code through
343-an eBPF backend that performs almost as fast as natively compiled code.
344-
345-The new instruction set was originally designed with the possible goal in
346-mind to write programs in "restricted C" and compile into eBPF with a optional
347-GCC/LLVM backend, so that it can just-in-time map to modern 64-bit CPUs with
348-minimal performance overhead over two steps, that is, C -> eBPF -> native code.
349-
350-Currently, the new format is being used for running user BPF programs, which
351-includes seccomp BPF, classic socket filters, cls_bpf traffic classifier,
352-team driver's classifier for its load-balancing mode, netfilter's xt_bpf
353-extension, PTP dissector/classifier, and much more. They are all internally
354-converted by the kernel into the new instruction set representation and run
355-in the eBPF interpreter. For in-kernel handlers, this all works transparently
356-by using sk_unattached_filter_create() for setting up the filter, resp.
357-sk_unattached_filter_destroy() for destroying it. The macro
358-SK_RUN_FILTER(filter, ctx) transparently invokes eBPF interpreter or JITed
359-code to run the filter. 'filter' is a pointer to struct sk_filter that we
360-got from sk_unattached_filter_create(), and 'ctx' the given context (e.g.
361-skb pointer). All constraints and restrictions from sk_chk_filter() apply
362-before a conversion to the new layout is being done behind the scenes!
363-
364-Currently, the classic BPF format is being used for JITing on most of the
365-architectures. Only x86-64 performs JIT compilation from eBPF instruction set,
366-however, future work will migrate other JIT compilers as well, so that they
367-will profit from the very same benefits.
368-
369-Some core changes of the new internal format:
370-
371-- Number of registers increase from 2 to 10:
372-
373- The old format had two registers A and X, and a hidden frame pointer. The
374- new layout extends this to be 10 internal registers and a read-only frame
375- pointer. Since 64-bit CPUs are passing arguments to functions via registers
376- the number of args from eBPF program to in-kernel function is restricted
377- to 5 and one register is used to accept return value from an in-kernel
378- function. Natively, x86_64 passes first 6 arguments in registers, aarch64/
379- sparcv9/mips64 have 7 - 8 registers for arguments; x86_64 has 6 callee saved
380- registers, and aarch64/sparcv9/mips64 have 11 or more callee saved registers.
381-
382- Therefore, eBPF calling convention is defined as:
383-
384- * R0 - return value from in-kernel function, and exit value for eBPF program
385- * R1 - R5 - arguments from eBPF program to in-kernel function
386- * R6 - R9 - callee saved registers that in-kernel function will preserve
387- * R10 - read-only frame pointer to access stack
388-
389- Thus, all eBPF registers map one to one to HW registers on x86_64, aarch64,
390- etc, and eBPF calling convention maps directly to ABIs used by the kernel on
391- 64-bit architectures.
392-
393- On 32-bit architectures JIT may map programs that use only 32-bit arithmetic
394- and may let more complex programs to be interpreted.
395-
396- R0 - R5 are scratch registers and eBPF program needs spill/fill them if
397- necessary across calls. Note that there is only one eBPF program (== one
398- eBPF main routine) and it cannot call other eBPF functions, it can only
399- call predefined in-kernel functions, though.
400-
401-- Register width increases from 32-bit to 64-bit:
402-
403- Still, the semantics of the original 32-bit ALU operations are preserved
404- via 32-bit subregisters. All eBPF registers are 64-bit with 32-bit lower
405- subregisters that zero-extend into 64-bit if they are being written to.
406- That behavior maps directly to x86_64 and arm64 subregister definition, but
407- makes other JITs more difficult.
408-
409- 32-bit architectures run 64-bit internal BPF programs via interpreter.
410- Their JITs may convert BPF programs that only use 32-bit subregisters into
411- native instruction set and let the rest being interpreted.
412-
413- Operation is 64-bit, because on 64-bit architectures, pointers are also
414- 64-bit wide, and we want to pass 64-bit values in/out of kernel functions,
415- so 32-bit eBPF registers would otherwise require to define register-pair
416- ABI, thus, there won't be able to use a direct eBPF register to HW register
417- mapping and JIT would need to do combine/split/move operations for every
418- register in and out of the function, which is complex, bug prone and slow.
419- Another reason is the use of atomic 64-bit counters.
420-
421-- Conditional jt/jf targets replaced with jt/fall-through:
422-
423- While the original design has constructs such as "if (cond) jump_true;
424- else jump_false;", they are being replaced into alternative constructs like
425- "if (cond) jump_true; /* else fall-through */".
426-
427-- Introduces bpf_call insn and register passing convention for zero overhead
428- calls from/to other kernel functions:
429-
430- Before an in-kernel function call, the internal BPF program needs to
431- place function arguments into R1 to R5 registers to satisfy calling
432- convention, then the interpreter will take them from registers and pass
433- to in-kernel function. If R1 - R5 registers are mapped to CPU registers
434- that are used for argument passing on given architecture, the JIT compiler
435- doesn't need to emit extra moves. Function arguments will be in the correct
436- registers and BPF_CALL instruction will be JITed as single 'call' HW
437- instruction. This calling convention was picked to cover common call
438- situations without performance penalty.
439-
440- After an in-kernel function call, R1 - R5 are reset to unreadable and R0 has
441- a return value of the function. Since R6 - R9 are callee saved, their state
442- is preserved across the call.
443-
444- For example, consider three C functions:
445-
446- u64 f1() { return (*_f2)(1); }
447- u64 f2(u64 a) { return f3(a + 1, a); }
448- u64 f3(u64 a, u64 b) { return a - b; }
449-
450- GCC can compile f1, f3 into x86_64:
451-
452- f1:
453- movl $1, %edi
454- movq _f2(%rip), %rax
455- jmp *%rax
456- f3:
457- movq %rdi, %rax
458- subq %rsi, %rax
459- ret
460-
461- Function f2 in eBPF may look like:
462-
463- f2:
464- bpf_mov R2, R1
465- bpf_add R1, 1
466- bpf_call f3
467- bpf_exit
468-
469- If f2 is JITed and the pointer stored to '_f2'. The calls f1 -> f2 -> f3 and
470- returns will be seamless. Without JIT, __sk_run_filter() interpreter needs to
471- be used to call into f2.
472-
473- For practical reasons all eBPF programs have only one argument 'ctx' which is
474- already placed into R1 (e.g. on __sk_run_filter() startup) and the programs
475- can call kernel functions with up to 5 arguments. Calls with 6 or more arguments
476- are currently not supported, but these restrictions can be lifted if necessary
477- in the future.
478-
479- On 64-bit architectures all register map to HW registers one to one. For
480- example, x86_64 JIT compiler can map them as ...
481-
482- R0 - rax
483- R1 - rdi
484- R2 - rsi
485- R3 - rdx
486- R4 - rcx
487- R5 - r8
488- R6 - rbx
489- R7 - r13
490- R8 - r14
491- R9 - r15
492- R10 - rbp
493-
494- ... since x86_64 ABI mandates rdi, rsi, rdx, rcx, r8, r9 for argument passing
495- and rbx, r12 - r15 are callee saved.
496-
497- Then the following internal BPF pseudo-program:
498-
499- bpf_mov R6, R1 /* save ctx */
500- bpf_mov R2, 2
501- bpf_mov R3, 3
502- bpf_mov R4, 4
503- bpf_mov R5, 5
504- bpf_call foo
505- bpf_mov R7, R0 /* save foo() return value */
506- bpf_mov R1, R6 /* restore ctx for next call */
507- bpf_mov R2, 6
508- bpf_mov R3, 7
509- bpf_mov R4, 8
510- bpf_mov R5, 9
511- bpf_call bar
512- bpf_add R0, R7
513- bpf_exit
514-
515- After JIT to x86_64 may look like:
516-
517- push %rbp
518- mov %rsp,%rbp
519- sub $0x228,%rsp
520- mov %rbx,-0x228(%rbp)
521- mov %r13,-0x220(%rbp)
522- mov %rdi,%rbx
523- mov $0x2,%esi
524- mov $0x3,%edx
525- mov $0x4,%ecx
526- mov $0x5,%r8d
527- callq foo
528- mov %rax,%r13
529- mov %rbx,%rdi
530- mov $0x2,%esi
531- mov $0x3,%edx
532- mov $0x4,%ecx
533- mov $0x5,%r8d
534- callq bar
535- add %r13,%rax
536- mov -0x228(%rbp),%rbx
537- mov -0x220(%rbp),%r13
538- leaveq
539- retq
540-
541- Which is in this example equivalent in C to:
542-
543- u64 bpf_filter(u64 ctx)
544- {
545- return foo(ctx, 2, 3, 4, 5) + bar(ctx, 6, 7, 8, 9);
546- }
547-
548- In-kernel functions foo() and bar() with prototype: u64 (*)(u64 arg1, u64
549- arg2, u64 arg3, u64 arg4, u64 arg5); will receive arguments in proper
550- registers and place their return value into '%rax' which is R0 in eBPF.
551- Prologue and epilogue are emitted by JIT and are implicit in the
552- interpreter. R0-R5 are scratch registers, so eBPF program needs to preserve
553- them across the calls as defined by calling convention.
554-
555- For example the following program is invalid:
556-
557- bpf_mov R1, 1
558- bpf_call foo
559- bpf_mov R0, R1
560- bpf_exit
561-
562- After the call the registers R1-R5 contain junk values and cannot be read.
563- In the future an eBPF verifier can be used to validate internal BPF programs.
564-
565-Also in the new design, eBPF is limited to 4096 insns, which means that any
566-program will terminate quickly and will only call a fixed number of kernel
567-functions. Original BPF and the new format are two operand instructions,
568-which helps to do one-to-one mapping between eBPF insn and x86 insn during JIT.
569-
570-The input context pointer for invoking the interpreter function is generic,
571-its content is defined by a specific use case. For seccomp register R1 points
572-to seccomp_data, for converted BPF filters R1 points to a skb.
573-
574-A program, that is translated internally consists of the following elements:
575-
576- op:16, jt:8, jf:8, k:32 ==> op:8, dst_reg:4, src_reg:4, off:16, imm:32
577-
578-So far 87 internal BPF instructions were implemented. 8-bit 'op' opcode field
579-has room for new instructions. Some of them may use 16/24/32 byte encoding. New
580-instructions must be multiple of 8 bytes to preserve backward compatibility.
581-
582-Internal BPF is a general purpose RISC instruction set. Not every register and
583-every instruction are used during translation from original BPF to new format.
584-For example, socket filters are not using 'exclusive add' instruction, but
585-tracing filters may do to maintain counters of events, for example. Register R9
586-is not used by socket filters either, but more complex filters may be running
587-out of registers and would have to resort to spill/fill to stack.
588-
589-Internal BPF can used as generic assembler for last step performance
590-optimizations, socket filters and seccomp are using it as assembler. Tracing
591-filters may use it as assembler to generate code from kernel. In kernel usage
592-may not be bounded by security considerations, since generated internal BPF code
593-may be optimizing internal code path and not being exposed to the user space.
594-Safety of internal BPF can come from a verifier (TBD). In such use cases as
595-described, it may be used as safe instruction set.
596-
597-Just like the original BPF, the new format runs within a controlled environment,
598-is deterministic and the kernel can easily prove that. The safety of the program
599-can be determined in two steps: first step does depth-first-search to disallow
600-loops and other CFG validation; second step starts from the first insn and
601-descends all possible paths. It simulates execution of every insn and observes
602-the state change of registers and stack.
603-
604-eBPF opcode encoding
605---------------------
606-
607-eBPF is reusing most of the opcode encoding from classic to simplify conversion
608-of classic BPF to eBPF. For arithmetic and jump instructions the 8-bit 'code'
609-field is divided into three parts:
610-
611- +----------------+--------+--------------------+
612- | 4 bits | 1 bit | 3 bits |
613- | operation code | source | instruction class |
614- +----------------+--------+--------------------+
615- (MSB) (LSB)
616-
617-Three LSB bits store instruction class which is one of:
618-
619- Classic BPF classes: eBPF classes:
620-
621- BPF_LD 0x00 BPF_LD 0x00
622- BPF_LDX 0x01 BPF_LDX 0x01
623- BPF_ST 0x02 BPF_ST 0x02
624- BPF_STX 0x03 BPF_STX 0x03
625- BPF_ALU 0x04 BPF_ALU 0x04
626- BPF_JMP 0x05 BPF_JMP 0x05
627- BPF_RET 0x06 [ class 6 unused, for future if needed ]
628- BPF_MISC 0x07 BPF_ALU64 0x07
629-
630-When BPF_CLASS(code) == BPF_ALU or BPF_JMP, 4th bit encodes source operand ...
631-
632- BPF_K 0x00
633- BPF_X 0x08
634-
635- * in classic BPF, this means:
636-
637- BPF_SRC(code) == BPF_X - use register X as source operand
638- BPF_SRC(code) == BPF_K - use 32-bit immediate as source operand
639-
640- * in eBPF, this means:
641-
642- BPF_SRC(code) == BPF_X - use 'src_reg' register as source operand
643- BPF_SRC(code) == BPF_K - use 32-bit immediate as source operand
644-
645-... and four MSB bits store operation code.
646-
647-If BPF_CLASS(code) == BPF_ALU or BPF_ALU64 [ in eBPF ], BPF_OP(code) is one of:
648-
649- BPF_ADD 0x00
650- BPF_SUB 0x10
651- BPF_MUL 0x20
652- BPF_DIV 0x30
653- BPF_OR 0x40
654- BPF_AND 0x50
655- BPF_LSH 0x60
656- BPF_RSH 0x70
657- BPF_NEG 0x80
658- BPF_MOD 0x90
659- BPF_XOR 0xa0
660- BPF_MOV 0xb0 /* eBPF only: mov reg to reg */
661- BPF_ARSH 0xc0 /* eBPF only: sign extending shift right */
662- BPF_END 0xd0 /* eBPF only: endianness conversion */
663-
664-If BPF_CLASS(code) == BPF_JMP, BPF_OP(code) is one of:
665-
666- BPF_JA 0x00
667- BPF_JEQ 0x10
668- BPF_JGT 0x20
669- BPF_JGE 0x30
670- BPF_JSET 0x40
671- BPF_JNE 0x50 /* eBPF only: jump != */
672- BPF_JSGT 0x60 /* eBPF only: signed '>' */
673- BPF_JSGE 0x70 /* eBPF only: signed '>=' */
674- BPF_CALL 0x80 /* eBPF only: function call */
675- BPF_EXIT 0x90 /* eBPF only: function return */
676-
677-So BPF_ADD | BPF_X | BPF_ALU means 32-bit addition in both classic BPF
678-and eBPF. There are only two registers in classic BPF, so it means A += X.
679-In eBPF it means dst_reg = (u32) dst_reg + (u32) src_reg; similarly,
680-BPF_XOR | BPF_K | BPF_ALU means A ^= imm32 in classic BPF and analogous
681-src_reg = (u32) src_reg ^ (u32) imm32 in eBPF.
682-
683-Classic BPF is using BPF_MISC class to represent A = X and X = A moves.
684-eBPF is using BPF_MOV | BPF_X | BPF_ALU code instead. Since there are no
685-BPF_MISC operations in eBPF, the class 7 is used as BPF_ALU64 to mean
686-exactly the same operations as BPF_ALU, but with 64-bit wide operands
687-instead. So BPF_ADD | BPF_X | BPF_ALU64 means 64-bit addition, i.e.:
688-dst_reg = dst_reg + src_reg
689-
690-Classic BPF wastes the whole BPF_RET class to represent a single 'ret'
691-operation. Classic BPF_RET | BPF_K means copy imm32 into return register
692-and perform function exit. eBPF is modeled to match CPU, so BPF_JMP | BPF_EXIT
693-in eBPF means function exit only. The eBPF program needs to store return
694-value into register R0 before doing a BPF_EXIT. Class 6 in eBPF is currently
695-unused and reserved for future use.
696-
697-For load and store instructions the 8-bit 'code' field is divided as:
698-
699- +--------+--------+-------------------+
700- | 3 bits | 2 bits | 3 bits |
701- | mode | size | instruction class |
702- +--------+--------+-------------------+
703- (MSB) (LSB)
704-
705-Size modifier is one of ...
706-
707- BPF_W 0x00 /* word */
708- BPF_H 0x08 /* half word */
709- BPF_B 0x10 /* byte */
710- BPF_DW 0x18 /* eBPF only, double word */
711-
712-... which encodes size of load/store operation:
713-
714- B - 1 byte
715- H - 2 byte
716- W - 4 byte
717- DW - 8 byte (eBPF only)
718-
719-Mode modifier is one of:
720-
721- BPF_IMM 0x00 /* classic BPF only, reserved in eBPF */
722- BPF_ABS 0x20
723- BPF_IND 0x40
724- BPF_MEM 0x60
725- BPF_LEN 0x80 /* classic BPF only, reserved in eBPF */
726- BPF_MSH 0xa0 /* classic BPF only, reserved in eBPF */
727- BPF_XADD 0xc0 /* eBPF only, exclusive add */
728-
729-eBPF has two non-generic instructions: (BPF_ABS | <size> | BPF_LD) and
730-(BPF_IND | <size> | BPF_LD) which are used to access packet data.
731-
732-They had to be carried over from classic to have strong performance of
733-socket filters running in eBPF interpreter. These instructions can only
734-be used when interpreter context is a pointer to 'struct sk_buff' and
735-have seven implicit operands. Register R6 is an implicit input that must
736-contain pointer to sk_buff. Register R0 is an implicit output which contains
737-the data fetched from the packet. Registers R1-R5 are scratch registers
738-and must not be used to store the data across BPF_ABS | BPF_LD or
739-BPF_IND | BPF_LD instructions.
740-
741-These instructions have implicit program exit condition as well. When
742-eBPF program is trying to access the data beyond the packet boundary,
743-the interpreter will abort the execution of the program. JIT compilers
744-therefore must preserve this property. src_reg and imm32 fields are
745-explicit inputs to these instructions.
746-
747-For example:
748-
749- BPF_IND | BPF_W | BPF_LD means:
750-
751- R0 = ntohl(*(u32 *) (((struct sk_buff *) R6)->data + src_reg + imm32))
752- and R1 - R5 were scratched.
753-
754-Unlike classic BPF instruction set, eBPF has generic load/store operations:
755-
756-BPF_MEM | <size> | BPF_STX: *(size *) (dst_reg + off) = src_reg
757-BPF_MEM | <size> | BPF_ST: *(size *) (dst_reg + off) = imm32
758-BPF_MEM | <size> | BPF_LDX: dst_reg = *(size *) (src_reg + off)
759-BPF_XADD | BPF_W | BPF_STX: lock xadd *(u32 *)(dst_reg + off16) += src_reg
760-BPF_XADD | BPF_DW | BPF_STX: lock xadd *(u64 *)(dst_reg + off16) += src_reg
761-
762-Where size is one of: BPF_B or BPF_H or BPF_W or BPF_DW. Note that 1 and
763-2 byte atomic increments are not supported.
764-
765-Testing
766--------
767-
768-Next to the BPF toolchain, the kernel also ships a test module that contains
769-various test cases for classic and internal BPF that can be executed against
770-the BPF interpreter and JIT compiler. It can be found in lib/test_bpf.c and
771-enabled via Kconfig:
772-
773- CONFIG_TEST_BPF=m
774-
775-After the module has been built and installed, the test suite can be executed
776-via insmod or modprobe against 'test_bpf' module. Results of the test cases
777-including timings in nsec can be found in the kernel log (dmesg).
778-
779 Misc
780 ----
781
782@@ -1024,4 +561,3 @@ the underlying architecture.
783
784 Jay Schulist <jschlst@samba.org>
785 Daniel Borkmann <dborkman@redhat.com>
786-Alexei Starovoitov <ast@plumgrid.com>
787diff --git a/Makefile b/Makefile
788index c261752..7b9958b 100644
789--- a/Makefile
790+++ b/Makefile
791@@ -303,8 +303,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
792
793 HOSTCC = gcc
794 HOSTCXX = g++
795-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
796-HOSTCXXFLAGS = -O2
797+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
798+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
799+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
800
801 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
802 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
803@@ -449,8 +450,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
804 # Rules shared between *config targets and build targets
805
806 # Basic helpers built in scripts/
807-PHONY += scripts_basic
808-scripts_basic:
809+PHONY += scripts_basic gcc-plugins
810+scripts_basic: gcc-plugins
811 $(Q)$(MAKE) $(build)=scripts/basic
812 $(Q)rm -f .tmp_quiet_recordmcount
813
814@@ -621,6 +622,75 @@ else
815 KBUILD_CFLAGS += -O2
816 endif
817
818+# Tell gcc to never replace conditional load with a non-conditional one
819+KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
820+
821+ifndef DISABLE_PAX_PLUGINS
822+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
823+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
824+else
825+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
826+endif
827+ifneq ($(PLUGINCC),)
828+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
829+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
830+endif
831+ifdef CONFIG_PAX_MEMORY_STACKLEAK
832+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
833+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
834+endif
835+ifdef CONFIG_KALLOCSTAT_PLUGIN
836+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
837+endif
838+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
839+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
840+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
841+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
842+endif
843+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
844+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
845+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
846+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
847+endif
848+endif
849+ifdef CONFIG_CHECKER_PLUGIN
850+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
851+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
852+endif
853+endif
854+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
855+ifdef CONFIG_PAX_SIZE_OVERFLOW
856+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
857+endif
858+ifdef CONFIG_PAX_LATENT_ENTROPY
859+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
860+endif
861+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
862+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
863+endif
864+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
865+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
866+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
867+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
868+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
869+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
870+ifeq ($(KBUILD_EXTMOD),)
871+gcc-plugins:
872+ $(Q)$(MAKE) $(build)=tools/gcc
873+else
874+gcc-plugins: ;
875+endif
876+else
877+gcc-plugins:
878+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
879+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
880+else
881+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
882+endif
883+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
884+endif
885+endif
886+
887 ifdef CONFIG_READABLE_ASM
888 # Disable optimizations that make assembler listings hard to read.
889 # reorder blocks reorders the control in the function
890@@ -839,7 +909,7 @@ export mod_sign_cmd
891
892
893 ifeq ($(KBUILD_EXTMOD),)
894-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
895+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
896
897 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
898 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
899@@ -888,6 +958,8 @@ endif
900
901 # The actual objects are generated when descending,
902 # make sure no implicit rule kicks in
903+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
904+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
905 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
906
907 # Handle descending into subdirectories listed in $(vmlinux-dirs)
908@@ -897,7 +969,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
909 # Error messages still appears in the original language
910
911 PHONY += $(vmlinux-dirs)
912-$(vmlinux-dirs): prepare scripts
913+$(vmlinux-dirs): gcc-plugins prepare scripts
914 $(Q)$(MAKE) $(build)=$@
915
916 define filechk_kernel.release
917@@ -940,10 +1012,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
918
919 archprepare: archheaders archscripts prepare1 scripts_basic
920
921+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
922+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
923 prepare0: archprepare FORCE
924 $(Q)$(MAKE) $(build)=.
925
926 # All the preparing..
927+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
928 prepare: prepare0
929
930 # Generate some files
931@@ -1051,6 +1126,8 @@ all: modules
932 # using awk while concatenating to the final file.
933
934 PHONY += modules
935+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
936+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
937 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
938 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
939 @$(kecho) ' Building modules, stage 2.';
940@@ -1066,7 +1143,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
941
942 # Target to prepare building external modules
943 PHONY += modules_prepare
944-modules_prepare: prepare scripts
945+modules_prepare: gcc-plugins prepare scripts
946
947 # Target to install modules
948 PHONY += modules_install
949@@ -1132,7 +1209,10 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
950 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
951 signing_key.priv signing_key.x509 x509.genkey \
952 extra_certificates signing_key.x509.keyid \
953- signing_key.x509.signer include/linux/version.h
954+ signing_key.x509.signer include/linux/version.h \
955+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
956+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
957+ tools/gcc/randomize_layout_seed.h
958
959 # clean - Delete most, but leave enough to build external modules
960 #
961@@ -1171,7 +1251,7 @@ distclean: mrproper
962 @find $(srctree) $(RCS_FIND_IGNORE) \
963 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
964 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
965- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
966+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
967 -type f -print | xargs rm -f
968
969
970@@ -1332,6 +1412,8 @@ PHONY += $(module-dirs) modules
971 $(module-dirs): crmodverdir $(objtree)/Module.symvers
972 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
973
974+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
975+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
976 modules: $(module-dirs)
977 @$(kecho) ' Building modules, stage 2.';
978 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
979@@ -1471,17 +1553,21 @@ else
980 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
981 endif
982
983-%.s: %.c prepare scripts FORCE
984+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
985+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
986+%.s: %.c gcc-plugins prepare scripts FORCE
987 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
988 %.i: %.c prepare scripts FORCE
989 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
990-%.o: %.c prepare scripts FORCE
991+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
992+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
993+%.o: %.c gcc-plugins prepare scripts FORCE
994 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
995 %.lst: %.c prepare scripts FORCE
996 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
997-%.s: %.S prepare scripts FORCE
998+%.s: %.S gcc-plugins prepare scripts FORCE
999 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
1000-%.o: %.S prepare scripts FORCE
1001+%.o: %.S gcc-plugins prepare scripts FORCE
1002 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
1003 %.symtypes: %.c prepare scripts FORCE
1004 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
1005@@ -1491,11 +1577,15 @@ endif
1006 $(cmd_crmodverdir)
1007 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
1008 $(build)=$(build-dir)
1009-%/: prepare scripts FORCE
1010+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
1011+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
1012+%/: gcc-plugins prepare scripts FORCE
1013 $(cmd_crmodverdir)
1014 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
1015 $(build)=$(build-dir)
1016-%.ko: prepare scripts FORCE
1017+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
1018+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
1019+%.ko: gcc-plugins prepare scripts FORCE
1020 $(cmd_crmodverdir)
1021 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
1022 $(build)=$(build-dir) $(@:.ko=.o)
1023diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
1024index ed60a1e..47f1a55 100644
1025--- a/arch/alpha/include/asm/atomic.h
1026+++ b/arch/alpha/include/asm/atomic.h
1027@@ -292,4 +292,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
1028 #define atomic_dec(v) atomic_sub(1,(v))
1029 #define atomic64_dec(v) atomic64_sub(1,(v))
1030
1031+#define atomic64_read_unchecked(v) atomic64_read(v)
1032+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1033+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1034+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1035+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1036+#define atomic64_inc_unchecked(v) atomic64_inc(v)
1037+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1038+#define atomic64_dec_unchecked(v) atomic64_dec(v)
1039+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1040+
1041 #endif /* _ALPHA_ATOMIC_H */
1042diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
1043index ad368a9..fbe0f25 100644
1044--- a/arch/alpha/include/asm/cache.h
1045+++ b/arch/alpha/include/asm/cache.h
1046@@ -4,19 +4,19 @@
1047 #ifndef __ARCH_ALPHA_CACHE_H
1048 #define __ARCH_ALPHA_CACHE_H
1049
1050+#include <linux/const.h>
1051
1052 /* Bytes per L1 (data) cache line. */
1053 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
1054-# define L1_CACHE_BYTES 64
1055 # define L1_CACHE_SHIFT 6
1056 #else
1057 /* Both EV4 and EV5 are write-through, read-allocate,
1058 direct-mapped, physical.
1059 */
1060-# define L1_CACHE_BYTES 32
1061 # define L1_CACHE_SHIFT 5
1062 #endif
1063
1064+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1065 #define SMP_CACHE_BYTES L1_CACHE_BYTES
1066
1067 #endif
1068diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
1069index 968d999..d36b2df 100644
1070--- a/arch/alpha/include/asm/elf.h
1071+++ b/arch/alpha/include/asm/elf.h
1072@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1073
1074 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
1075
1076+#ifdef CONFIG_PAX_ASLR
1077+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
1078+
1079+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
1080+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
1081+#endif
1082+
1083 /* $0 is set by ld.so to a pointer to a function which might be
1084 registered using atexit. This provides a mean for the dynamic
1085 linker to call DT_FINI functions for shared libraries that have
1086diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
1087index aab14a0..b4fa3e7 100644
1088--- a/arch/alpha/include/asm/pgalloc.h
1089+++ b/arch/alpha/include/asm/pgalloc.h
1090@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
1091 pgd_set(pgd, pmd);
1092 }
1093
1094+static inline void
1095+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
1096+{
1097+ pgd_populate(mm, pgd, pmd);
1098+}
1099+
1100 extern pgd_t *pgd_alloc(struct mm_struct *mm);
1101
1102 static inline void
1103diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
1104index d8f9b7e..f6222fa 100644
1105--- a/arch/alpha/include/asm/pgtable.h
1106+++ b/arch/alpha/include/asm/pgtable.h
1107@@ -102,6 +102,17 @@ struct vm_area_struct;
1108 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
1109 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
1110 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
1111+
1112+#ifdef CONFIG_PAX_PAGEEXEC
1113+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
1114+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
1115+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
1116+#else
1117+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1118+# define PAGE_COPY_NOEXEC PAGE_COPY
1119+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1120+#endif
1121+
1122 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
1123
1124 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
1125diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
1126index 2fd00b7..cfd5069 100644
1127--- a/arch/alpha/kernel/module.c
1128+++ b/arch/alpha/kernel/module.c
1129@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
1130
1131 /* The small sections were sorted to the end of the segment.
1132 The following should definitely cover them. */
1133- gp = (u64)me->module_core + me->core_size - 0x8000;
1134+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
1135 got = sechdrs[me->arch.gotsecindex].sh_addr;
1136
1137 for (i = 0; i < n; i++) {
1138diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
1139index 1402fcc..0b1abd2 100644
1140--- a/arch/alpha/kernel/osf_sys.c
1141+++ b/arch/alpha/kernel/osf_sys.c
1142@@ -1298,10 +1298,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
1143 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
1144
1145 static unsigned long
1146-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
1147- unsigned long limit)
1148+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
1149+ unsigned long limit, unsigned long flags)
1150 {
1151 struct vm_unmapped_area_info info;
1152+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
1153
1154 info.flags = 0;
1155 info.length = len;
1156@@ -1309,6 +1310,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
1157 info.high_limit = limit;
1158 info.align_mask = 0;
1159 info.align_offset = 0;
1160+ info.threadstack_offset = offset;
1161 return vm_unmapped_area(&info);
1162 }
1163
1164@@ -1341,20 +1343,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1165 merely specific addresses, but regions of memory -- perhaps
1166 this feature should be incorporated into all ports? */
1167
1168+#ifdef CONFIG_PAX_RANDMMAP
1169+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1170+#endif
1171+
1172 if (addr) {
1173- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
1174+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
1175 if (addr != (unsigned long) -ENOMEM)
1176 return addr;
1177 }
1178
1179 /* Next, try allocating at TASK_UNMAPPED_BASE. */
1180- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
1181- len, limit);
1182+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
1183+
1184 if (addr != (unsigned long) -ENOMEM)
1185 return addr;
1186
1187 /* Finally, try allocating in low memory. */
1188- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
1189+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
1190
1191 return addr;
1192 }
1193diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
1194index 98838a0..b304fb4 100644
1195--- a/arch/alpha/mm/fault.c
1196+++ b/arch/alpha/mm/fault.c
1197@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
1198 __reload_thread(pcb);
1199 }
1200
1201+#ifdef CONFIG_PAX_PAGEEXEC
1202+/*
1203+ * PaX: decide what to do with offenders (regs->pc = fault address)
1204+ *
1205+ * returns 1 when task should be killed
1206+ * 2 when patched PLT trampoline was detected
1207+ * 3 when unpatched PLT trampoline was detected
1208+ */
1209+static int pax_handle_fetch_fault(struct pt_regs *regs)
1210+{
1211+
1212+#ifdef CONFIG_PAX_EMUPLT
1213+ int err;
1214+
1215+ do { /* PaX: patched PLT emulation #1 */
1216+ unsigned int ldah, ldq, jmp;
1217+
1218+ err = get_user(ldah, (unsigned int *)regs->pc);
1219+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
1220+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
1221+
1222+ if (err)
1223+ break;
1224+
1225+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
1226+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
1227+ jmp == 0x6BFB0000U)
1228+ {
1229+ unsigned long r27, addr;
1230+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
1231+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
1232+
1233+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
1234+ err = get_user(r27, (unsigned long *)addr);
1235+ if (err)
1236+ break;
1237+
1238+ regs->r27 = r27;
1239+ regs->pc = r27;
1240+ return 2;
1241+ }
1242+ } while (0);
1243+
1244+ do { /* PaX: patched PLT emulation #2 */
1245+ unsigned int ldah, lda, br;
1246+
1247+ err = get_user(ldah, (unsigned int *)regs->pc);
1248+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
1249+ err |= get_user(br, (unsigned int *)(regs->pc+8));
1250+
1251+ if (err)
1252+ break;
1253+
1254+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
1255+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
1256+ (br & 0xFFE00000U) == 0xC3E00000U)
1257+ {
1258+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
1259+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
1260+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
1261+
1262+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
1263+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
1264+ return 2;
1265+ }
1266+ } while (0);
1267+
1268+ do { /* PaX: unpatched PLT emulation */
1269+ unsigned int br;
1270+
1271+ err = get_user(br, (unsigned int *)regs->pc);
1272+
1273+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
1274+ unsigned int br2, ldq, nop, jmp;
1275+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
1276+
1277+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
1278+ err = get_user(br2, (unsigned int *)addr);
1279+ err |= get_user(ldq, (unsigned int *)(addr+4));
1280+ err |= get_user(nop, (unsigned int *)(addr+8));
1281+ err |= get_user(jmp, (unsigned int *)(addr+12));
1282+ err |= get_user(resolver, (unsigned long *)(addr+16));
1283+
1284+ if (err)
1285+ break;
1286+
1287+ if (br2 == 0xC3600000U &&
1288+ ldq == 0xA77B000CU &&
1289+ nop == 0x47FF041FU &&
1290+ jmp == 0x6B7B0000U)
1291+ {
1292+ regs->r28 = regs->pc+4;
1293+ regs->r27 = addr+16;
1294+ regs->pc = resolver;
1295+ return 3;
1296+ }
1297+ }
1298+ } while (0);
1299+#endif
1300+
1301+ return 1;
1302+}
1303+
1304+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1305+{
1306+ unsigned long i;
1307+
1308+ printk(KERN_ERR "PAX: bytes at PC: ");
1309+ for (i = 0; i < 5; i++) {
1310+ unsigned int c;
1311+ if (get_user(c, (unsigned int *)pc+i))
1312+ printk(KERN_CONT "???????? ");
1313+ else
1314+ printk(KERN_CONT "%08x ", c);
1315+ }
1316+ printk("\n");
1317+}
1318+#endif
1319
1320 /*
1321 * This routine handles page faults. It determines the address,
1322@@ -133,8 +251,29 @@ retry:
1323 good_area:
1324 si_code = SEGV_ACCERR;
1325 if (cause < 0) {
1326- if (!(vma->vm_flags & VM_EXEC))
1327+ if (!(vma->vm_flags & VM_EXEC)) {
1328+
1329+#ifdef CONFIG_PAX_PAGEEXEC
1330+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
1331+ goto bad_area;
1332+
1333+ up_read(&mm->mmap_sem);
1334+ switch (pax_handle_fetch_fault(regs)) {
1335+
1336+#ifdef CONFIG_PAX_EMUPLT
1337+ case 2:
1338+ case 3:
1339+ return;
1340+#endif
1341+
1342+ }
1343+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
1344+ do_group_exit(SIGKILL);
1345+#else
1346 goto bad_area;
1347+#endif
1348+
1349+ }
1350 } else if (!cause) {
1351 /* Allow reads even for write-only mappings */
1352 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
1353diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
1354index 290f02ee..a639059 100644
1355--- a/arch/arm/Kconfig
1356+++ b/arch/arm/Kconfig
1357@@ -1787,7 +1787,7 @@ config ALIGNMENT_TRAP
1358
1359 config UACCESS_WITH_MEMCPY
1360 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
1361- depends on MMU
1362+ depends on MMU && !PAX_MEMORY_UDEREF
1363 default y if CPU_FEROCEON
1364 help
1365 Implement faster copy_to_user and clear_user methods for CPU
1366@@ -2051,6 +2051,7 @@ config XIP_PHYS_ADDR
1367 config KEXEC
1368 bool "Kexec system call (EXPERIMENTAL)"
1369 depends on (!SMP || PM_SLEEP_SMP)
1370+ depends on !GRKERNSEC_KMEM
1371 help
1372 kexec is a system call that implements the ability to shutdown your
1373 current kernel, and to start another kernel. It is like a reboot
1374diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
1375index 3040359..89b3dfc 100644
1376--- a/arch/arm/include/asm/atomic.h
1377+++ b/arch/arm/include/asm/atomic.h
1378@@ -18,17 +18,35 @@
1379 #include <asm/barrier.h>
1380 #include <asm/cmpxchg.h>
1381
1382+#ifdef CONFIG_GENERIC_ATOMIC64
1383+#include <asm-generic/atomic64.h>
1384+#endif
1385+
1386 #define ATOMIC_INIT(i) { (i) }
1387
1388 #ifdef __KERNEL__
1389
1390+#define _ASM_EXTABLE(from, to) \
1391+" .pushsection __ex_table,\"a\"\n"\
1392+" .align 3\n" \
1393+" .long " #from ", " #to"\n" \
1394+" .popsection"
1395+
1396 /*
1397 * On ARM, ordinary assignment (str instruction) doesn't clear the local
1398 * strex/ldrex monitor on some implementations. The reason we can use it for
1399 * atomic_set() is the clrex or dummy strex done on every exception return.
1400 */
1401 #define atomic_read(v) (*(volatile int *)&(v)->counter)
1402+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
1403+{
1404+ return v->counter;
1405+}
1406 #define atomic_set(v,i) (((v)->counter) = (i))
1407+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1408+{
1409+ v->counter = i;
1410+}
1411
1412 #if __LINUX_ARM_ARCH__ >= 6
1413
1414@@ -44,6 +62,36 @@ static inline void atomic_add(int i, atomic_t *v)
1415
1416 prefetchw(&v->counter);
1417 __asm__ __volatile__("@ atomic_add\n"
1418+"1: ldrex %1, [%3]\n"
1419+" adds %0, %1, %4\n"
1420+
1421+#ifdef CONFIG_PAX_REFCOUNT
1422+" bvc 3f\n"
1423+"2: bkpt 0xf103\n"
1424+"3:\n"
1425+#endif
1426+
1427+" strex %1, %0, [%3]\n"
1428+" teq %1, #0\n"
1429+" bne 1b"
1430+
1431+#ifdef CONFIG_PAX_REFCOUNT
1432+"\n4:\n"
1433+ _ASM_EXTABLE(2b, 4b)
1434+#endif
1435+
1436+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1437+ : "r" (&v->counter), "Ir" (i)
1438+ : "cc");
1439+}
1440+
1441+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1442+{
1443+ unsigned long tmp;
1444+ int result;
1445+
1446+ prefetchw(&v->counter);
1447+ __asm__ __volatile__("@ atomic_add_unchecked\n"
1448 "1: ldrex %0, [%3]\n"
1449 " add %0, %0, %4\n"
1450 " strex %1, %0, [%3]\n"
1451@@ -63,6 +111,43 @@ static inline int atomic_add_return(int i, atomic_t *v)
1452 prefetchw(&v->counter);
1453
1454 __asm__ __volatile__("@ atomic_add_return\n"
1455+"1: ldrex %1, [%3]\n"
1456+" adds %0, %1, %4\n"
1457+
1458+#ifdef CONFIG_PAX_REFCOUNT
1459+" bvc 3f\n"
1460+" mov %0, %1\n"
1461+"2: bkpt 0xf103\n"
1462+"3:\n"
1463+#endif
1464+
1465+" strex %1, %0, [%3]\n"
1466+" teq %1, #0\n"
1467+" bne 1b"
1468+
1469+#ifdef CONFIG_PAX_REFCOUNT
1470+"\n4:\n"
1471+ _ASM_EXTABLE(2b, 4b)
1472+#endif
1473+
1474+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1475+ : "r" (&v->counter), "Ir" (i)
1476+ : "cc");
1477+
1478+ smp_mb();
1479+
1480+ return result;
1481+}
1482+
1483+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1484+{
1485+ unsigned long tmp;
1486+ int result;
1487+
1488+ smp_mb();
1489+ prefetchw(&v->counter);
1490+
1491+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
1492 "1: ldrex %0, [%3]\n"
1493 " add %0, %0, %4\n"
1494 " strex %1, %0, [%3]\n"
1495@@ -84,6 +169,36 @@ static inline void atomic_sub(int i, atomic_t *v)
1496
1497 prefetchw(&v->counter);
1498 __asm__ __volatile__("@ atomic_sub\n"
1499+"1: ldrex %1, [%3]\n"
1500+" subs %0, %1, %4\n"
1501+
1502+#ifdef CONFIG_PAX_REFCOUNT
1503+" bvc 3f\n"
1504+"2: bkpt 0xf103\n"
1505+"3:\n"
1506+#endif
1507+
1508+" strex %1, %0, [%3]\n"
1509+" teq %1, #0\n"
1510+" bne 1b"
1511+
1512+#ifdef CONFIG_PAX_REFCOUNT
1513+"\n4:\n"
1514+ _ASM_EXTABLE(2b, 4b)
1515+#endif
1516+
1517+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1518+ : "r" (&v->counter), "Ir" (i)
1519+ : "cc");
1520+}
1521+
1522+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1523+{
1524+ unsigned long tmp;
1525+ int result;
1526+
1527+ prefetchw(&v->counter);
1528+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
1529 "1: ldrex %0, [%3]\n"
1530 " sub %0, %0, %4\n"
1531 " strex %1, %0, [%3]\n"
1532@@ -103,11 +218,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1533 prefetchw(&v->counter);
1534
1535 __asm__ __volatile__("@ atomic_sub_return\n"
1536-"1: ldrex %0, [%3]\n"
1537-" sub %0, %0, %4\n"
1538+"1: ldrex %1, [%3]\n"
1539+" subs %0, %1, %4\n"
1540+
1541+#ifdef CONFIG_PAX_REFCOUNT
1542+" bvc 3f\n"
1543+" mov %0, %1\n"
1544+"2: bkpt 0xf103\n"
1545+"3:\n"
1546+#endif
1547+
1548 " strex %1, %0, [%3]\n"
1549 " teq %1, #0\n"
1550 " bne 1b"
1551+
1552+#ifdef CONFIG_PAX_REFCOUNT
1553+"\n4:\n"
1554+ _ASM_EXTABLE(2b, 4b)
1555+#endif
1556+
1557 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1558 : "r" (&v->counter), "Ir" (i)
1559 : "cc");
1560@@ -152,12 +281,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1561 __asm__ __volatile__ ("@ atomic_add_unless\n"
1562 "1: ldrex %0, [%4]\n"
1563 " teq %0, %5\n"
1564-" beq 2f\n"
1565-" add %1, %0, %6\n"
1566+" beq 4f\n"
1567+" adds %1, %0, %6\n"
1568+
1569+#ifdef CONFIG_PAX_REFCOUNT
1570+" bvc 3f\n"
1571+"2: bkpt 0xf103\n"
1572+"3:\n"
1573+#endif
1574+
1575 " strex %2, %1, [%4]\n"
1576 " teq %2, #0\n"
1577 " bne 1b\n"
1578-"2:"
1579+"4:"
1580+
1581+#ifdef CONFIG_PAX_REFCOUNT
1582+ _ASM_EXTABLE(2b, 4b)
1583+#endif
1584+
1585 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1586 : "r" (&v->counter), "r" (u), "r" (a)
1587 : "cc");
1588@@ -168,6 +309,28 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1589 return oldval;
1590 }
1591
1592+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1593+{
1594+ unsigned long oldval, res;
1595+
1596+ smp_mb();
1597+
1598+ do {
1599+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1600+ "ldrex %1, [%3]\n"
1601+ "mov %0, #0\n"
1602+ "teq %1, %4\n"
1603+ "strexeq %0, %5, [%3]\n"
1604+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1605+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1606+ : "cc");
1607+ } while (res);
1608+
1609+ smp_mb();
1610+
1611+ return oldval;
1612+}
1613+
1614 #else /* ARM_ARCH_6 */
1615
1616 #ifdef CONFIG_SMP
1617@@ -186,7 +349,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1618
1619 return val;
1620 }
1621+
1622+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1623+{
1624+ return atomic_add_return(i, v);
1625+}
1626+
1627 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1628+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1629+{
1630+ (void) atomic_add_return(i, v);
1631+}
1632
1633 static inline int atomic_sub_return(int i, atomic_t *v)
1634 {
1635@@ -201,6 +374,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1636 return val;
1637 }
1638 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1639+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1640+{
1641+ (void) atomic_sub_return(i, v);
1642+}
1643
1644 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1645 {
1646@@ -216,6 +393,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1647 return ret;
1648 }
1649
1650+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1651+{
1652+ return atomic_cmpxchg(v, old, new);
1653+}
1654+
1655 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1656 {
1657 int c, old;
1658@@ -229,13 +411,33 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1659 #endif /* __LINUX_ARM_ARCH__ */
1660
1661 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1662+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1663+{
1664+ return xchg(&v->counter, new);
1665+}
1666
1667 #define atomic_inc(v) atomic_add(1, v)
1668+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1669+{
1670+ atomic_add_unchecked(1, v);
1671+}
1672 #define atomic_dec(v) atomic_sub(1, v)
1673+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1674+{
1675+ atomic_sub_unchecked(1, v);
1676+}
1677
1678 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1679+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1680+{
1681+ return atomic_add_return_unchecked(1, v) == 0;
1682+}
1683 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1684 #define atomic_inc_return(v) (atomic_add_return(1, v))
1685+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1686+{
1687+ return atomic_add_return_unchecked(1, v);
1688+}
1689 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1690 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1691
1692@@ -246,6 +448,14 @@ typedef struct {
1693 long long counter;
1694 } atomic64_t;
1695
1696+#ifdef CONFIG_PAX_REFCOUNT
1697+typedef struct {
1698+ long long counter;
1699+} atomic64_unchecked_t;
1700+#else
1701+typedef atomic64_t atomic64_unchecked_t;
1702+#endif
1703+
1704 #define ATOMIC64_INIT(i) { (i) }
1705
1706 #ifdef CONFIG_ARM_LPAE
1707@@ -262,6 +472,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1708 return result;
1709 }
1710
1711+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1712+{
1713+ long long result;
1714+
1715+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1716+" ldrd %0, %H0, [%1]"
1717+ : "=&r" (result)
1718+ : "r" (&v->counter), "Qo" (v->counter)
1719+ );
1720+
1721+ return result;
1722+}
1723+
1724 static inline void atomic64_set(atomic64_t *v, long long i)
1725 {
1726 __asm__ __volatile__("@ atomic64_set\n"
1727@@ -270,6 +493,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1728 : "r" (&v->counter), "r" (i)
1729 );
1730 }
1731+
1732+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1733+{
1734+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1735+" strd %2, %H2, [%1]"
1736+ : "=Qo" (v->counter)
1737+ : "r" (&v->counter), "r" (i)
1738+ );
1739+}
1740 #else
1741 static inline long long atomic64_read(const atomic64_t *v)
1742 {
1743@@ -284,6 +516,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1744 return result;
1745 }
1746
1747+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1748+{
1749+ long long result;
1750+
1751+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1752+" ldrexd %0, %H0, [%1]"
1753+ : "=&r" (result)
1754+ : "r" (&v->counter), "Qo" (v->counter)
1755+ );
1756+
1757+ return result;
1758+}
1759+
1760 static inline void atomic64_set(atomic64_t *v, long long i)
1761 {
1762 long long tmp;
1763@@ -298,6 +543,21 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1764 : "r" (&v->counter), "r" (i)
1765 : "cc");
1766 }
1767+
1768+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1769+{
1770+ long long tmp;
1771+
1772+ prefetchw(&v->counter);
1773+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1774+"1: ldrexd %0, %H0, [%2]\n"
1775+" strexd %0, %3, %H3, [%2]\n"
1776+" teq %0, #0\n"
1777+" bne 1b"
1778+ : "=&r" (tmp), "=Qo" (v->counter)
1779+ : "r" (&v->counter), "r" (i)
1780+ : "cc");
1781+}
1782 #endif
1783
1784 static inline void atomic64_add(long long i, atomic64_t *v)
1785@@ -309,6 +569,37 @@ static inline void atomic64_add(long long i, atomic64_t *v)
1786 __asm__ __volatile__("@ atomic64_add\n"
1787 "1: ldrexd %0, %H0, [%3]\n"
1788 " adds %Q0, %Q0, %Q4\n"
1789+" adcs %R0, %R0, %R4\n"
1790+
1791+#ifdef CONFIG_PAX_REFCOUNT
1792+" bvc 3f\n"
1793+"2: bkpt 0xf103\n"
1794+"3:\n"
1795+#endif
1796+
1797+" strexd %1, %0, %H0, [%3]\n"
1798+" teq %1, #0\n"
1799+" bne 1b"
1800+
1801+#ifdef CONFIG_PAX_REFCOUNT
1802+"\n4:\n"
1803+ _ASM_EXTABLE(2b, 4b)
1804+#endif
1805+
1806+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1807+ : "r" (&v->counter), "r" (i)
1808+ : "cc");
1809+}
1810+
1811+static inline void atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
1812+{
1813+ long long result;
1814+ unsigned long tmp;
1815+
1816+ prefetchw(&v->counter);
1817+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1818+"1: ldrexd %0, %H0, [%3]\n"
1819+" adds %Q0, %Q0, %Q4\n"
1820 " adc %R0, %R0, %R4\n"
1821 " strexd %1, %0, %H0, [%3]\n"
1822 " teq %1, #0\n"
1823@@ -329,6 +620,44 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
1824 __asm__ __volatile__("@ atomic64_add_return\n"
1825 "1: ldrexd %0, %H0, [%3]\n"
1826 " adds %Q0, %Q0, %Q4\n"
1827+" adcs %R0, %R0, %R4\n"
1828+
1829+#ifdef CONFIG_PAX_REFCOUNT
1830+" bvc 3f\n"
1831+" mov %0, %1\n"
1832+" mov %H0, %H1\n"
1833+"2: bkpt 0xf103\n"
1834+"3:\n"
1835+#endif
1836+
1837+" strexd %1, %0, %H0, [%3]\n"
1838+" teq %1, #0\n"
1839+" bne 1b"
1840+
1841+#ifdef CONFIG_PAX_REFCOUNT
1842+"\n4:\n"
1843+ _ASM_EXTABLE(2b, 4b)
1844+#endif
1845+
1846+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1847+ : "r" (&v->counter), "r" (i)
1848+ : "cc");
1849+
1850+ smp_mb();
1851+
1852+ return result;
1853+}
1854+
1855+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
1856+{
1857+ long long result;
1858+ unsigned long tmp;
1859+
1860+ smp_mb();
1861+
1862+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1863+"1: ldrexd %0, %H0, [%3]\n"
1864+" adds %Q0, %Q0, %Q4\n"
1865 " adc %R0, %R0, %R4\n"
1866 " strexd %1, %0, %H0, [%3]\n"
1867 " teq %1, #0\n"
1868@@ -351,6 +680,37 @@ static inline void atomic64_sub(long long i, atomic64_t *v)
1869 __asm__ __volatile__("@ atomic64_sub\n"
1870 "1: ldrexd %0, %H0, [%3]\n"
1871 " subs %Q0, %Q0, %Q4\n"
1872+" sbcs %R0, %R0, %R4\n"
1873+
1874+#ifdef CONFIG_PAX_REFCOUNT
1875+" bvc 3f\n"
1876+"2: bkpt 0xf103\n"
1877+"3:\n"
1878+#endif
1879+
1880+" strexd %1, %0, %H0, [%3]\n"
1881+" teq %1, #0\n"
1882+" bne 1b"
1883+
1884+#ifdef CONFIG_PAX_REFCOUNT
1885+"\n4:\n"
1886+ _ASM_EXTABLE(2b, 4b)
1887+#endif
1888+
1889+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1890+ : "r" (&v->counter), "r" (i)
1891+ : "cc");
1892+}
1893+
1894+static inline void atomic64_sub_unchecked(long long i, atomic64_unchecked_t *v)
1895+{
1896+ long long result;
1897+ unsigned long tmp;
1898+
1899+ prefetchw(&v->counter);
1900+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1901+"1: ldrexd %0, %H0, [%3]\n"
1902+" subs %Q0, %Q0, %Q4\n"
1903 " sbc %R0, %R0, %R4\n"
1904 " strexd %1, %0, %H0, [%3]\n"
1905 " teq %1, #0\n"
1906@@ -371,16 +731,29 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
1907 __asm__ __volatile__("@ atomic64_sub_return\n"
1908 "1: ldrexd %0, %H0, [%3]\n"
1909 " subs %Q0, %Q0, %Q4\n"
1910-" sbc %R0, %R0, %R4\n"
1911+" sbcs %R0, %R0, %R4\n"
1912+
1913+#ifdef CONFIG_PAX_REFCOUNT
1914+" bvc 3f\n"
1915+" mov %0, %1\n"
1916+" mov %H0, %H1\n"
1917+"2: bkpt 0xf103\n"
1918+"3:\n"
1919+#endif
1920+
1921 " strexd %1, %0, %H0, [%3]\n"
1922 " teq %1, #0\n"
1923 " bne 1b"
1924+
1925+#ifdef CONFIG_PAX_REFCOUNT
1926+"\n4:\n"
1927+ _ASM_EXTABLE(2b, 4b)
1928+#endif
1929+
1930 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1931 : "r" (&v->counter), "r" (i)
1932 : "cc");
1933
1934- smp_mb();
1935-
1936 return result;
1937 }
1938
1939@@ -410,6 +783,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1940 return oldval;
1941 }
1942
1943+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1944+ long long new)
1945+{
1946+ long long oldval;
1947+ unsigned long res;
1948+
1949+ smp_mb();
1950+
1951+ do {
1952+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1953+ "ldrexd %1, %H1, [%3]\n"
1954+ "mov %0, #0\n"
1955+ "teq %1, %4\n"
1956+ "teqeq %H1, %H4\n"
1957+ "strexdeq %0, %5, %H5, [%3]"
1958+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1959+ : "r" (&ptr->counter), "r" (old), "r" (new)
1960+ : "cc");
1961+ } while (res);
1962+
1963+ smp_mb();
1964+
1965+ return oldval;
1966+}
1967+
1968 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1969 {
1970 long long result;
1971@@ -435,21 +833,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1972 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1973 {
1974 long long result;
1975- unsigned long tmp;
1976+ u64 tmp;
1977
1978 smp_mb();
1979 prefetchw(&v->counter);
1980
1981 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1982-"1: ldrexd %0, %H0, [%3]\n"
1983-" subs %Q0, %Q0, #1\n"
1984-" sbc %R0, %R0, #0\n"
1985+"1: ldrexd %1, %H1, [%3]\n"
1986+" subs %Q0, %Q1, #1\n"
1987+" sbcs %R0, %R1, #0\n"
1988+
1989+#ifdef CONFIG_PAX_REFCOUNT
1990+" bvc 3f\n"
1991+" mov %Q0, %Q1\n"
1992+" mov %R0, %R1\n"
1993+"2: bkpt 0xf103\n"
1994+"3:\n"
1995+#endif
1996+
1997 " teq %R0, #0\n"
1998-" bmi 2f\n"
1999+" bmi 4f\n"
2000 " strexd %1, %0, %H0, [%3]\n"
2001 " teq %1, #0\n"
2002 " bne 1b\n"
2003-"2:"
2004+"4:\n"
2005+
2006+#ifdef CONFIG_PAX_REFCOUNT
2007+ _ASM_EXTABLE(2b, 4b)
2008+#endif
2009+
2010 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
2011 : "r" (&v->counter)
2012 : "cc");
2013@@ -473,13 +885,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
2014 " teq %0, %5\n"
2015 " teqeq %H0, %H5\n"
2016 " moveq %1, #0\n"
2017-" beq 2f\n"
2018+" beq 4f\n"
2019 " adds %Q0, %Q0, %Q6\n"
2020-" adc %R0, %R0, %R6\n"
2021+" adcs %R0, %R0, %R6\n"
2022+
2023+#ifdef CONFIG_PAX_REFCOUNT
2024+" bvc 3f\n"
2025+"2: bkpt 0xf103\n"
2026+"3:\n"
2027+#endif
2028+
2029 " strexd %2, %0, %H0, [%4]\n"
2030 " teq %2, #0\n"
2031 " bne 1b\n"
2032-"2:"
2033+"4:\n"
2034+
2035+#ifdef CONFIG_PAX_REFCOUNT
2036+ _ASM_EXTABLE(2b, 4b)
2037+#endif
2038+
2039 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
2040 : "r" (&v->counter), "r" (u), "r" (a)
2041 : "cc");
2042@@ -492,10 +916,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
2043
2044 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
2045 #define atomic64_inc(v) atomic64_add(1LL, (v))
2046+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
2047 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
2048+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
2049 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2050 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
2051 #define atomic64_dec(v) atomic64_sub(1LL, (v))
2052+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
2053 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
2054 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
2055 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
2056diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
2057index c6a3e73..35cca85 100644
2058--- a/arch/arm/include/asm/barrier.h
2059+++ b/arch/arm/include/asm/barrier.h
2060@@ -63,7 +63,7 @@
2061 do { \
2062 compiletime_assert_atomic_type(*p); \
2063 smp_mb(); \
2064- ACCESS_ONCE(*p) = (v); \
2065+ ACCESS_ONCE_RW(*p) = (v); \
2066 } while (0)
2067
2068 #define smp_load_acquire(p) \
2069diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
2070index 75fe66b..ba3dee4 100644
2071--- a/arch/arm/include/asm/cache.h
2072+++ b/arch/arm/include/asm/cache.h
2073@@ -4,8 +4,10 @@
2074 #ifndef __ASMARM_CACHE_H
2075 #define __ASMARM_CACHE_H
2076
2077+#include <linux/const.h>
2078+
2079 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
2080-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2081+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2082
2083 /*
2084 * Memory returned by kmalloc() may be used for DMA, so we must make
2085@@ -24,5 +26,6 @@
2086 #endif
2087
2088 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
2089+#define __read_only __attribute__ ((__section__(".data..read_only")))
2090
2091 #endif
2092diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
2093index fd43f7f..a817f5a 100644
2094--- a/arch/arm/include/asm/cacheflush.h
2095+++ b/arch/arm/include/asm/cacheflush.h
2096@@ -116,7 +116,7 @@ struct cpu_cache_fns {
2097 void (*dma_unmap_area)(const void *, size_t, int);
2098
2099 void (*dma_flush_range)(const void *, const void *);
2100-};
2101+} __no_const;
2102
2103 /*
2104 * Select the calling method
2105diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
2106index 5233151..87a71fa 100644
2107--- a/arch/arm/include/asm/checksum.h
2108+++ b/arch/arm/include/asm/checksum.h
2109@@ -37,7 +37,19 @@ __wsum
2110 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
2111
2112 __wsum
2113-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
2114+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
2115+
2116+static inline __wsum
2117+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
2118+{
2119+ __wsum ret;
2120+ pax_open_userland();
2121+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
2122+ pax_close_userland();
2123+ return ret;
2124+}
2125+
2126+
2127
2128 /*
2129 * Fold a partial checksum without adding pseudo headers
2130diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
2131index abb2c37..96db950 100644
2132--- a/arch/arm/include/asm/cmpxchg.h
2133+++ b/arch/arm/include/asm/cmpxchg.h
2134@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
2135
2136 #define xchg(ptr,x) \
2137 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
2138+#define xchg_unchecked(ptr,x) \
2139+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
2140
2141 #include <asm-generic/cmpxchg-local.h>
2142
2143diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
2144index 6ddbe44..b5e38b1 100644
2145--- a/arch/arm/include/asm/domain.h
2146+++ b/arch/arm/include/asm/domain.h
2147@@ -48,18 +48,37 @@
2148 * Domain types
2149 */
2150 #define DOMAIN_NOACCESS 0
2151-#define DOMAIN_CLIENT 1
2152 #ifdef CONFIG_CPU_USE_DOMAINS
2153+#define DOMAIN_USERCLIENT 1
2154+#define DOMAIN_KERNELCLIENT 1
2155 #define DOMAIN_MANAGER 3
2156+#define DOMAIN_VECTORS DOMAIN_USER
2157 #else
2158+
2159+#ifdef CONFIG_PAX_KERNEXEC
2160 #define DOMAIN_MANAGER 1
2161+#define DOMAIN_KERNEXEC 3
2162+#else
2163+#define DOMAIN_MANAGER 1
2164+#endif
2165+
2166+#ifdef CONFIG_PAX_MEMORY_UDEREF
2167+#define DOMAIN_USERCLIENT 0
2168+#define DOMAIN_UDEREF 1
2169+#define DOMAIN_VECTORS DOMAIN_KERNEL
2170+#else
2171+#define DOMAIN_USERCLIENT 1
2172+#define DOMAIN_VECTORS DOMAIN_USER
2173+#endif
2174+#define DOMAIN_KERNELCLIENT 1
2175+
2176 #endif
2177
2178 #define domain_val(dom,type) ((type) << (2*(dom)))
2179
2180 #ifndef __ASSEMBLY__
2181
2182-#ifdef CONFIG_CPU_USE_DOMAINS
2183+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2184 static inline void set_domain(unsigned val)
2185 {
2186 asm volatile(
2187@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
2188 isb();
2189 }
2190
2191-#define modify_domain(dom,type) \
2192- do { \
2193- struct thread_info *thread = current_thread_info(); \
2194- unsigned int domain = thread->cpu_domain; \
2195- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
2196- thread->cpu_domain = domain | domain_val(dom, type); \
2197- set_domain(thread->cpu_domain); \
2198- } while (0)
2199-
2200+extern void modify_domain(unsigned int dom, unsigned int type);
2201 #else
2202 static inline void set_domain(unsigned val) { }
2203 static inline void modify_domain(unsigned dom, unsigned type) { }
2204diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
2205index f4b46d3..abc9b2b 100644
2206--- a/arch/arm/include/asm/elf.h
2207+++ b/arch/arm/include/asm/elf.h
2208@@ -114,7 +114,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
2209 the loader. We need to make sure that it is out of the way of the program
2210 that it will "exec", and that there is sufficient room for the brk. */
2211
2212-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
2213+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2214+
2215+#ifdef CONFIG_PAX_ASLR
2216+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
2217+
2218+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
2219+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
2220+#endif
2221
2222 /* When the program starts, a1 contains a pointer to a function to be
2223 registered with atexit, as per the SVR4 ABI. A value of 0 means we
2224@@ -124,10 +131,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
2225 extern void elf_set_personality(const struct elf32_hdr *);
2226 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
2227
2228-struct mm_struct;
2229-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2230-#define arch_randomize_brk arch_randomize_brk
2231-
2232 #ifdef CONFIG_MMU
2233 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2234 struct linux_binprm;
2235diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
2236index de53547..52b9a28 100644
2237--- a/arch/arm/include/asm/fncpy.h
2238+++ b/arch/arm/include/asm/fncpy.h
2239@@ -81,7 +81,9 @@
2240 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
2241 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
2242 \
2243+ pax_open_kernel(); \
2244 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
2245+ pax_close_kernel(); \
2246 flush_icache_range((unsigned long)(dest_buf), \
2247 (unsigned long)(dest_buf) + (size)); \
2248 \
2249diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
2250index 53e69da..3fdc896 100644
2251--- a/arch/arm/include/asm/futex.h
2252+++ b/arch/arm/include/asm/futex.h
2253@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
2254 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
2255 return -EFAULT;
2256
2257+ pax_open_userland();
2258+
2259 smp_mb();
2260 /* Prefetching cannot fault */
2261 prefetchw(uaddr);
2262@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
2263 : "cc", "memory");
2264 smp_mb();
2265
2266+ pax_close_userland();
2267+
2268 *uval = val;
2269 return ret;
2270 }
2271@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
2272 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
2273 return -EFAULT;
2274
2275+ pax_open_userland();
2276+
2277 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
2278 "1: " TUSER(ldr) " %1, [%4]\n"
2279 " teq %1, %2\n"
2280@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
2281 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
2282 : "cc", "memory");
2283
2284+ pax_close_userland();
2285+
2286 *uval = val;
2287 return ret;
2288 }
2289@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
2290 return -EFAULT;
2291
2292 pagefault_disable(); /* implies preempt_disable() */
2293+ pax_open_userland();
2294
2295 switch (op) {
2296 case FUTEX_OP_SET:
2297@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
2298 ret = -ENOSYS;
2299 }
2300
2301+ pax_close_userland();
2302 pagefault_enable(); /* subsumes preempt_enable() */
2303
2304 if (!ret) {
2305diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
2306index 83eb2f7..ed77159 100644
2307--- a/arch/arm/include/asm/kmap_types.h
2308+++ b/arch/arm/include/asm/kmap_types.h
2309@@ -4,6 +4,6 @@
2310 /*
2311 * This is the "bare minimum". AIO seems to require this.
2312 */
2313-#define KM_TYPE_NR 16
2314+#define KM_TYPE_NR 17
2315
2316 #endif
2317diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
2318index 9e614a1..3302cca 100644
2319--- a/arch/arm/include/asm/mach/dma.h
2320+++ b/arch/arm/include/asm/mach/dma.h
2321@@ -22,7 +22,7 @@ struct dma_ops {
2322 int (*residue)(unsigned int, dma_t *); /* optional */
2323 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
2324 const char *type;
2325-};
2326+} __do_const;
2327
2328 struct dma_struct {
2329 void *addr; /* single DMA address */
2330diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
2331index f98c7f3..e5c626d 100644
2332--- a/arch/arm/include/asm/mach/map.h
2333+++ b/arch/arm/include/asm/mach/map.h
2334@@ -23,17 +23,19 @@ struct map_desc {
2335
2336 /* types 0-3 are defined in asm/io.h */
2337 enum {
2338- MT_UNCACHED = 4,
2339- MT_CACHECLEAN,
2340- MT_MINICLEAN,
2341+ MT_UNCACHED_RW = 4,
2342+ MT_CACHECLEAN_RO,
2343+ MT_MINICLEAN_RO,
2344 MT_LOW_VECTORS,
2345 MT_HIGH_VECTORS,
2346- MT_MEMORY_RWX,
2347+ __MT_MEMORY_RWX,
2348 MT_MEMORY_RW,
2349- MT_ROM,
2350- MT_MEMORY_RWX_NONCACHED,
2351+ MT_MEMORY_RX,
2352+ MT_ROM_RX,
2353+ MT_MEMORY_RW_NONCACHED,
2354+ MT_MEMORY_RX_NONCACHED,
2355 MT_MEMORY_RW_DTCM,
2356- MT_MEMORY_RWX_ITCM,
2357+ MT_MEMORY_RX_ITCM,
2358 MT_MEMORY_RW_SO,
2359 MT_MEMORY_DMA_READY,
2360 };
2361diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
2362index 891a56b..48f337e 100644
2363--- a/arch/arm/include/asm/outercache.h
2364+++ b/arch/arm/include/asm/outercache.h
2365@@ -36,7 +36,7 @@ struct outer_cache_fns {
2366
2367 /* This is an ARM L2C thing */
2368 void (*write_sec)(unsigned long, unsigned);
2369-};
2370+} __no_const;
2371
2372 extern struct outer_cache_fns outer_cache;
2373
2374diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
2375index 4355f0e..cd9168e 100644
2376--- a/arch/arm/include/asm/page.h
2377+++ b/arch/arm/include/asm/page.h
2378@@ -23,6 +23,7 @@
2379
2380 #else
2381
2382+#include <linux/compiler.h>
2383 #include <asm/glue.h>
2384
2385 /*
2386@@ -114,7 +115,7 @@ struct cpu_user_fns {
2387 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
2388 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
2389 unsigned long vaddr, struct vm_area_struct *vma);
2390-};
2391+} __no_const;
2392
2393 #ifdef MULTI_USER
2394 extern struct cpu_user_fns cpu_user;
2395diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
2396index 78a7793..e3dc06c 100644
2397--- a/arch/arm/include/asm/pgalloc.h
2398+++ b/arch/arm/include/asm/pgalloc.h
2399@@ -17,6 +17,7 @@
2400 #include <asm/processor.h>
2401 #include <asm/cacheflush.h>
2402 #include <asm/tlbflush.h>
2403+#include <asm/system_info.h>
2404
2405 #define check_pgt_cache() do { } while (0)
2406
2407@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2408 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
2409 }
2410
2411+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2412+{
2413+ pud_populate(mm, pud, pmd);
2414+}
2415+
2416 #else /* !CONFIG_ARM_LPAE */
2417
2418 /*
2419@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2420 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
2421 #define pmd_free(mm, pmd) do { } while (0)
2422 #define pud_populate(mm,pmd,pte) BUG()
2423+#define pud_populate_kernel(mm,pmd,pte) BUG()
2424
2425 #endif /* CONFIG_ARM_LPAE */
2426
2427@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
2428 __free_page(pte);
2429 }
2430
2431+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
2432+{
2433+#ifdef CONFIG_ARM_LPAE
2434+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
2435+#else
2436+ if (addr & SECTION_SIZE)
2437+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
2438+ else
2439+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
2440+#endif
2441+ flush_pmd_entry(pmdp);
2442+}
2443+
2444 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
2445 pmdval_t prot)
2446 {
2447@@ -157,7 +177,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
2448 static inline void
2449 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
2450 {
2451- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
2452+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
2453 }
2454 #define pmd_pgtable(pmd) pmd_page(pmd)
2455
2456diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
2457index 5cfba15..f415e1a 100644
2458--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
2459+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
2460@@ -20,12 +20,15 @@
2461 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
2462 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
2463 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
2464+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
2465 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
2466 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
2467 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
2468+
2469 /*
2470 * - section
2471 */
2472+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
2473 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
2474 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
2475 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
2476@@ -37,6 +40,7 @@
2477 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
2478 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
2479 #define PMD_SECT_AF (_AT(pmdval_t, 0))
2480+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
2481
2482 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
2483 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
2484@@ -66,6 +70,7 @@
2485 * - extended small page/tiny page
2486 */
2487 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
2488+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
2489 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
2490 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
2491 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
2492diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
2493index 219ac88..73ec32a 100644
2494--- a/arch/arm/include/asm/pgtable-2level.h
2495+++ b/arch/arm/include/asm/pgtable-2level.h
2496@@ -126,6 +126,9 @@
2497 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
2498 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
2499
2500+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
2501+#define L_PTE_PXN (_AT(pteval_t, 0))
2502+
2503 /*
2504 * These are the memory types, defined to be compatible with
2505 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
2506diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
2507index 626989f..9d67a33 100644
2508--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
2509+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
2510@@ -75,6 +75,7 @@
2511 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
2512 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
2513 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
2514+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2515 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
2516
2517 /*
2518diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
2519index 85c60ad..b0bbd7e 100644
2520--- a/arch/arm/include/asm/pgtable-3level.h
2521+++ b/arch/arm/include/asm/pgtable-3level.h
2522@@ -82,6 +82,7 @@
2523 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
2524 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
2525 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
2526+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2527 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
2528 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
2529 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
2530@@ -95,6 +96,7 @@
2531 /*
2532 * To be used in assembly code with the upper page attributes.
2533 */
2534+#define L_PTE_PXN_HIGH (1 << (53 - 32))
2535 #define L_PTE_XN_HIGH (1 << (54 - 32))
2536 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
2537
2538diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
2539index 5478e5d..f5b5cb3 100644
2540--- a/arch/arm/include/asm/pgtable.h
2541+++ b/arch/arm/include/asm/pgtable.h
2542@@ -33,6 +33,9 @@
2543 #include <asm/pgtable-2level.h>
2544 #endif
2545
2546+#define ktla_ktva(addr) (addr)
2547+#define ktva_ktla(addr) (addr)
2548+
2549 /*
2550 * Just any arbitrary offset to the start of the vmalloc VM area: the
2551 * current 8MB value just means that there will be a 8MB "hole" after the
2552@@ -48,6 +51,9 @@
2553 #define LIBRARY_TEXT_START 0x0c000000
2554
2555 #ifndef __ASSEMBLY__
2556+extern pteval_t __supported_pte_mask;
2557+extern pmdval_t __supported_pmd_mask;
2558+
2559 extern void __pte_error(const char *file, int line, pte_t);
2560 extern void __pmd_error(const char *file, int line, pmd_t);
2561 extern void __pgd_error(const char *file, int line, pgd_t);
2562@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2563 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2564 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2565
2566+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2567+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2568+
2569+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2570+#include <asm/domain.h>
2571+#include <linux/thread_info.h>
2572+#include <linux/preempt.h>
2573+
2574+static inline int test_domain(int domain, int domaintype)
2575+{
2576+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2577+}
2578+#endif
2579+
2580+#ifdef CONFIG_PAX_KERNEXEC
2581+static inline unsigned long pax_open_kernel(void) {
2582+#ifdef CONFIG_ARM_LPAE
2583+ /* TODO */
2584+#else
2585+ preempt_disable();
2586+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2587+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2588+#endif
2589+ return 0;
2590+}
2591+
2592+static inline unsigned long pax_close_kernel(void) {
2593+#ifdef CONFIG_ARM_LPAE
2594+ /* TODO */
2595+#else
2596+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2597+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2598+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2599+ preempt_enable_no_resched();
2600+#endif
2601+ return 0;
2602+}
2603+#else
2604+static inline unsigned long pax_open_kernel(void) { return 0; }
2605+static inline unsigned long pax_close_kernel(void) { return 0; }
2606+#endif
2607+
2608 /*
2609 * This is the lowest virtual address we can permit any user space
2610 * mapping to be mapped at. This is particularly important for
2611@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2612 /*
2613 * The pgprot_* and protection_map entries will be fixed up in runtime
2614 * to include the cachable and bufferable bits based on memory policy,
2615- * as well as any architecture dependent bits like global/ASID and SMP
2616- * shared mapping bits.
2617+ * as well as any architecture dependent bits like global/ASID, PXN,
2618+ * and SMP shared mapping bits.
2619 */
2620 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2621
2622@@ -265,7 +313,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2623 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2624 {
2625 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2626- L_PTE_NONE | L_PTE_VALID;
2627+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2628 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2629 return pte;
2630 }
2631diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2632index c25ef3e..735f14b 100644
2633--- a/arch/arm/include/asm/psci.h
2634+++ b/arch/arm/include/asm/psci.h
2635@@ -32,7 +32,7 @@ struct psci_operations {
2636 int (*affinity_info)(unsigned long target_affinity,
2637 unsigned long lowest_affinity_level);
2638 int (*migrate_info_type)(void);
2639-};
2640+} __no_const;
2641
2642 extern struct psci_operations psci_ops;
2643 extern struct smp_operations psci_smp_ops;
2644diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2645index 2ec765c..beb1fe16 100644
2646--- a/arch/arm/include/asm/smp.h
2647+++ b/arch/arm/include/asm/smp.h
2648@@ -113,7 +113,7 @@ struct smp_operations {
2649 int (*cpu_disable)(unsigned int cpu);
2650 #endif
2651 #endif
2652-};
2653+} __no_const;
2654
2655 struct of_cpu_method {
2656 const char *method;
2657diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2658index e4e4208..086684a 100644
2659--- a/arch/arm/include/asm/thread_info.h
2660+++ b/arch/arm/include/asm/thread_info.h
2661@@ -88,9 +88,9 @@ struct thread_info {
2662 .flags = 0, \
2663 .preempt_count = INIT_PREEMPT_COUNT, \
2664 .addr_limit = KERNEL_DS, \
2665- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2666- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2667- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2668+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2669+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2670+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2671 .restart_block = { \
2672 .fn = do_no_restart_syscall, \
2673 }, \
2674@@ -164,7 +164,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2675 #define TIF_SYSCALL_AUDIT 9
2676 #define TIF_SYSCALL_TRACEPOINT 10
2677 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2678-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2679+/* within 8 bits of TIF_SYSCALL_TRACE
2680+ * to meet flexible second operand requirements
2681+ */
2682+#define TIF_GRSEC_SETXID 12
2683+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2684 #define TIF_USING_IWMMXT 17
2685 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2686 #define TIF_RESTORE_SIGMASK 20
2687@@ -178,10 +182,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2688 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2689 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2690 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2691+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2692
2693 /* Checks for any syscall work in entry-common.S */
2694 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2695- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2696+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2697
2698 /*
2699 * Change these and you break ASM code in entry-common.S
2700diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2701index 75d9579..b5b40e4 100644
2702--- a/arch/arm/include/asm/uaccess.h
2703+++ b/arch/arm/include/asm/uaccess.h
2704@@ -18,6 +18,7 @@
2705 #include <asm/domain.h>
2706 #include <asm/unified.h>
2707 #include <asm/compiler.h>
2708+#include <asm/pgtable.h>
2709
2710 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2711 #include <asm-generic/uaccess-unaligned.h>
2712@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2713 static inline void set_fs(mm_segment_t fs)
2714 {
2715 current_thread_info()->addr_limit = fs;
2716- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2717+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2718 }
2719
2720 #define segment_eq(a,b) ((a) == (b))
2721
2722+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2723+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2724+
2725+static inline void pax_open_userland(void)
2726+{
2727+
2728+#ifdef CONFIG_PAX_MEMORY_UDEREF
2729+ if (segment_eq(get_fs(), USER_DS)) {
2730+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2731+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2732+ }
2733+#endif
2734+
2735+}
2736+
2737+static inline void pax_close_userland(void)
2738+{
2739+
2740+#ifdef CONFIG_PAX_MEMORY_UDEREF
2741+ if (segment_eq(get_fs(), USER_DS)) {
2742+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2743+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2744+ }
2745+#endif
2746+
2747+}
2748+
2749 #define __addr_ok(addr) ({ \
2750 unsigned long flag; \
2751 __asm__("cmp %2, %0; movlo %0, #0" \
2752@@ -150,8 +178,12 @@ extern int __get_user_4(void *);
2753
2754 #define get_user(x,p) \
2755 ({ \
2756+ int __e; \
2757 might_fault(); \
2758- __get_user_check(x,p); \
2759+ pax_open_userland(); \
2760+ __e = __get_user_check(x,p); \
2761+ pax_close_userland(); \
2762+ __e; \
2763 })
2764
2765 extern int __put_user_1(void *, unsigned int);
2766@@ -196,8 +228,12 @@ extern int __put_user_8(void *, unsigned long long);
2767
2768 #define put_user(x,p) \
2769 ({ \
2770+ int __e; \
2771 might_fault(); \
2772- __put_user_check(x,p); \
2773+ pax_open_userland(); \
2774+ __e = __put_user_check(x,p); \
2775+ pax_close_userland(); \
2776+ __e; \
2777 })
2778
2779 #else /* CONFIG_MMU */
2780@@ -221,6 +257,7 @@ static inline void set_fs(mm_segment_t fs)
2781
2782 #endif /* CONFIG_MMU */
2783
2784+#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size))
2785 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
2786
2787 #define user_addr_max() \
2788@@ -238,13 +275,17 @@ static inline void set_fs(mm_segment_t fs)
2789 #define __get_user(x,ptr) \
2790 ({ \
2791 long __gu_err = 0; \
2792+ pax_open_userland(); \
2793 __get_user_err((x),(ptr),__gu_err); \
2794+ pax_close_userland(); \
2795 __gu_err; \
2796 })
2797
2798 #define __get_user_error(x,ptr,err) \
2799 ({ \
2800+ pax_open_userland(); \
2801 __get_user_err((x),(ptr),err); \
2802+ pax_close_userland(); \
2803 (void) 0; \
2804 })
2805
2806@@ -320,13 +361,17 @@ do { \
2807 #define __put_user(x,ptr) \
2808 ({ \
2809 long __pu_err = 0; \
2810+ pax_open_userland(); \
2811 __put_user_err((x),(ptr),__pu_err); \
2812+ pax_close_userland(); \
2813 __pu_err; \
2814 })
2815
2816 #define __put_user_error(x,ptr,err) \
2817 ({ \
2818+ pax_open_userland(); \
2819 __put_user_err((x),(ptr),err); \
2820+ pax_close_userland(); \
2821 (void) 0; \
2822 })
2823
2824@@ -426,11 +471,44 @@ do { \
2825
2826
2827 #ifdef CONFIG_MMU
2828-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2829-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2830+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2831+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2832+
2833+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2834+{
2835+ unsigned long ret;
2836+
2837+ check_object_size(to, n, false);
2838+ pax_open_userland();
2839+ ret = ___copy_from_user(to, from, n);
2840+ pax_close_userland();
2841+ return ret;
2842+}
2843+
2844+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2845+{
2846+ unsigned long ret;
2847+
2848+ check_object_size(from, n, true);
2849+ pax_open_userland();
2850+ ret = ___copy_to_user(to, from, n);
2851+ pax_close_userland();
2852+ return ret;
2853+}
2854+
2855 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2856-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2857+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2858 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2859+
2860+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2861+{
2862+ unsigned long ret;
2863+ pax_open_userland();
2864+ ret = ___clear_user(addr, n);
2865+ pax_close_userland();
2866+ return ret;
2867+}
2868+
2869 #else
2870 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2871 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2872@@ -439,6 +517,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2873
2874 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2875 {
2876+ if ((long)n < 0)
2877+ return n;
2878+
2879 if (access_ok(VERIFY_READ, from, n))
2880 n = __copy_from_user(to, from, n);
2881 else /* security hole - plug it */
2882@@ -448,6 +529,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2883
2884 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2885 {
2886+ if ((long)n < 0)
2887+ return n;
2888+
2889 if (access_ok(VERIFY_WRITE, to, n))
2890 n = __copy_to_user(to, from, n);
2891 return n;
2892diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2893index 5af0ed1..cea83883 100644
2894--- a/arch/arm/include/uapi/asm/ptrace.h
2895+++ b/arch/arm/include/uapi/asm/ptrace.h
2896@@ -92,7 +92,7 @@
2897 * ARMv7 groups of PSR bits
2898 */
2899 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2900-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2901+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2902 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2903 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2904
2905diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2906index f7b450f..f5364c5 100644
2907--- a/arch/arm/kernel/armksyms.c
2908+++ b/arch/arm/kernel/armksyms.c
2909@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2910
2911 /* networking */
2912 EXPORT_SYMBOL(csum_partial);
2913-EXPORT_SYMBOL(csum_partial_copy_from_user);
2914+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2915 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2916 EXPORT_SYMBOL(__csum_ipv6_magic);
2917
2918@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
2919 #ifdef CONFIG_MMU
2920 EXPORT_SYMBOL(copy_page);
2921
2922-EXPORT_SYMBOL(__copy_from_user);
2923-EXPORT_SYMBOL(__copy_to_user);
2924-EXPORT_SYMBOL(__clear_user);
2925+EXPORT_SYMBOL(___copy_from_user);
2926+EXPORT_SYMBOL(___copy_to_user);
2927+EXPORT_SYMBOL(___clear_user);
2928
2929 EXPORT_SYMBOL(__get_user_1);
2930 EXPORT_SYMBOL(__get_user_2);
2931diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2932index 52a949a..d8bbcab 100644
2933--- a/arch/arm/kernel/entry-armv.S
2934+++ b/arch/arm/kernel/entry-armv.S
2935@@ -47,6 +47,87 @@
2936 9997:
2937 .endm
2938
2939+ .macro pax_enter_kernel
2940+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2941+ @ make aligned space for saved DACR
2942+ sub sp, sp, #8
2943+ @ save regs
2944+ stmdb sp!, {r1, r2}
2945+ @ read DACR from cpu_domain into r1
2946+ mov r2, sp
2947+ @ assume 8K pages, since we have to split the immediate in two
2948+ bic r2, r2, #(0x1fc0)
2949+ bic r2, r2, #(0x3f)
2950+ ldr r1, [r2, #TI_CPU_DOMAIN]
2951+ @ store old DACR on stack
2952+ str r1, [sp, #8]
2953+#ifdef CONFIG_PAX_KERNEXEC
2954+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2955+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2956+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2957+#endif
2958+#ifdef CONFIG_PAX_MEMORY_UDEREF
2959+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2960+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2961+#endif
2962+ @ write r1 to current_thread_info()->cpu_domain
2963+ str r1, [r2, #TI_CPU_DOMAIN]
2964+ @ write r1 to DACR
2965+ mcr p15, 0, r1, c3, c0, 0
2966+ @ instruction sync
2967+ instr_sync
2968+ @ restore regs
2969+ ldmia sp!, {r1, r2}
2970+#endif
2971+ .endm
2972+
2973+ .macro pax_open_userland
2974+#ifdef CONFIG_PAX_MEMORY_UDEREF
2975+ @ save regs
2976+ stmdb sp!, {r0, r1}
2977+ @ read DACR from cpu_domain into r1
2978+ mov r0, sp
2979+ @ assume 8K pages, since we have to split the immediate in two
2980+ bic r0, r0, #(0x1fc0)
2981+ bic r0, r0, #(0x3f)
2982+ ldr r1, [r0, #TI_CPU_DOMAIN]
2983+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2984+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2985+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2986+ @ write r1 to current_thread_info()->cpu_domain
2987+ str r1, [r0, #TI_CPU_DOMAIN]
2988+ @ write r1 to DACR
2989+ mcr p15, 0, r1, c3, c0, 0
2990+ @ instruction sync
2991+ instr_sync
2992+ @ restore regs
2993+ ldmia sp!, {r0, r1}
2994+#endif
2995+ .endm
2996+
2997+ .macro pax_close_userland
2998+#ifdef CONFIG_PAX_MEMORY_UDEREF
2999+ @ save regs
3000+ stmdb sp!, {r0, r1}
3001+ @ read DACR from cpu_domain into r1
3002+ mov r0, sp
3003+ @ assume 8K pages, since we have to split the immediate in two
3004+ bic r0, r0, #(0x1fc0)
3005+ bic r0, r0, #(0x3f)
3006+ ldr r1, [r0, #TI_CPU_DOMAIN]
3007+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
3008+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
3009+ @ write r1 to current_thread_info()->cpu_domain
3010+ str r1, [r0, #TI_CPU_DOMAIN]
3011+ @ write r1 to DACR
3012+ mcr p15, 0, r1, c3, c0, 0
3013+ @ instruction sync
3014+ instr_sync
3015+ @ restore regs
3016+ ldmia sp!, {r0, r1}
3017+#endif
3018+ .endm
3019+
3020 .macro pabt_helper
3021 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
3022 #ifdef MULTI_PABORT
3023@@ -89,11 +170,15 @@
3024 * Invalid mode handlers
3025 */
3026 .macro inv_entry, reason
3027+
3028+ pax_enter_kernel
3029+
3030 sub sp, sp, #S_FRAME_SIZE
3031 ARM( stmib sp, {r1 - lr} )
3032 THUMB( stmia sp, {r0 - r12} )
3033 THUMB( str sp, [sp, #S_SP] )
3034 THUMB( str lr, [sp, #S_LR] )
3035+
3036 mov r1, #\reason
3037 .endm
3038
3039@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
3040 .macro svc_entry, stack_hole=0
3041 UNWIND(.fnstart )
3042 UNWIND(.save {r0 - pc} )
3043+
3044+ pax_enter_kernel
3045+
3046 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
3047+
3048 #ifdef CONFIG_THUMB2_KERNEL
3049 SPFIX( str r0, [sp] ) @ temporarily saved
3050 SPFIX( mov r0, sp )
3051@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
3052 ldmia r0, {r3 - r5}
3053 add r7, sp, #S_SP - 4 @ here for interlock avoidance
3054 mov r6, #-1 @ "" "" "" ""
3055+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3056+ @ offset sp by 8 as done in pax_enter_kernel
3057+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
3058+#else
3059 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
3060+#endif
3061 SPFIX( addeq r2, r2, #4 )
3062 str r3, [sp, #-4]! @ save the "real" r0 copied
3063 @ from the exception stack
3064@@ -317,6 +411,9 @@ ENDPROC(__pabt_svc)
3065 .macro usr_entry
3066 UNWIND(.fnstart )
3067 UNWIND(.cantunwind ) @ don't unwind the user space
3068+
3069+ pax_enter_kernel_user
3070+
3071 sub sp, sp, #S_FRAME_SIZE
3072 ARM( stmib sp, {r1 - r12} )
3073 THUMB( stmia sp, {r0 - r12} )
3074@@ -421,7 +518,9 @@ __und_usr:
3075 tst r3, #PSR_T_BIT @ Thumb mode?
3076 bne __und_usr_thumb
3077 sub r4, r2, #4 @ ARM instr at LR - 4
3078+ pax_open_userland
3079 1: ldrt r0, [r4]
3080+ pax_close_userland
3081 ARM_BE8(rev r0, r0) @ little endian instruction
3082
3083 @ r0 = 32-bit ARM instruction which caused the exception
3084@@ -455,11 +554,15 @@ __und_usr_thumb:
3085 */
3086 .arch armv6t2
3087 #endif
3088+ pax_open_userland
3089 2: ldrht r5, [r4]
3090+ pax_close_userland
3091 ARM_BE8(rev16 r5, r5) @ little endian instruction
3092 cmp r5, #0xe800 @ 32bit instruction if xx != 0
3093 blo __und_usr_fault_16 @ 16bit undefined instruction
3094+ pax_open_userland
3095 3: ldrht r0, [r2]
3096+ pax_close_userland
3097 ARM_BE8(rev16 r0, r0) @ little endian instruction
3098 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
3099 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
3100@@ -489,7 +592,8 @@ ENDPROC(__und_usr)
3101 */
3102 .pushsection .fixup, "ax"
3103 .align 2
3104-4: str r4, [sp, #S_PC] @ retry current instruction
3105+4: pax_close_userland
3106+ str r4, [sp, #S_PC] @ retry current instruction
3107 mov pc, r9
3108 .popsection
3109 .pushsection __ex_table,"a"
3110@@ -698,7 +802,7 @@ ENTRY(__switch_to)
3111 THUMB( str lr, [ip], #4 )
3112 ldr r4, [r2, #TI_TP_VALUE]
3113 ldr r5, [r2, #TI_TP_VALUE + 4]
3114-#ifdef CONFIG_CPU_USE_DOMAINS
3115+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3116 ldr r6, [r2, #TI_CPU_DOMAIN]
3117 #endif
3118 switch_tls r1, r4, r5, r3, r7
3119@@ -707,7 +811,7 @@ ENTRY(__switch_to)
3120 ldr r8, =__stack_chk_guard
3121 ldr r7, [r7, #TSK_STACK_CANARY]
3122 #endif
3123-#ifdef CONFIG_CPU_USE_DOMAINS
3124+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3125 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
3126 #endif
3127 mov r5, r0
3128diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
3129index 7139d4a..feaf37f 100644
3130--- a/arch/arm/kernel/entry-common.S
3131+++ b/arch/arm/kernel/entry-common.S
3132@@ -10,18 +10,46 @@
3133
3134 #include <asm/unistd.h>
3135 #include <asm/ftrace.h>
3136+#include <asm/domain.h>
3137 #include <asm/unwind.h>
3138
3139+#include "entry-header.S"
3140+
3141 #ifdef CONFIG_NEED_RET_TO_USER
3142 #include <mach/entry-macro.S>
3143 #else
3144 .macro arch_ret_to_user, tmp1, tmp2
3145+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3146+ @ save regs
3147+ stmdb sp!, {r1, r2}
3148+ @ read DACR from cpu_domain into r1
3149+ mov r2, sp
3150+ @ assume 8K pages, since we have to split the immediate in two
3151+ bic r2, r2, #(0x1fc0)
3152+ bic r2, r2, #(0x3f)
3153+ ldr r1, [r2, #TI_CPU_DOMAIN]
3154+#ifdef CONFIG_PAX_KERNEXEC
3155+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
3156+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
3157+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
3158+#endif
3159+#ifdef CONFIG_PAX_MEMORY_UDEREF
3160+ @ set current DOMAIN_USER to DOMAIN_UDEREF
3161+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
3162+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
3163+#endif
3164+ @ write r1 to current_thread_info()->cpu_domain
3165+ str r1, [r2, #TI_CPU_DOMAIN]
3166+ @ write r1 to DACR
3167+ mcr p15, 0, r1, c3, c0, 0
3168+ @ instruction sync
3169+ instr_sync
3170+ @ restore regs
3171+ ldmia sp!, {r1, r2}
3172+#endif
3173 .endm
3174 #endif
3175
3176-#include "entry-header.S"
3177-
3178-
3179 .align 5
3180 /*
3181 * This is the fast syscall return path. We do as little as
3182@@ -405,6 +433,12 @@ ENTRY(vector_swi)
3183 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
3184 #endif
3185
3186+ /*
3187+ * do this here to avoid a performance hit of wrapping the code above
3188+ * that directly dereferences userland to parse the SWI instruction
3189+ */
3190+ pax_enter_kernel_user
3191+
3192 adr tbl, sys_call_table @ load syscall table pointer
3193
3194 #if defined(CONFIG_OABI_COMPAT)
3195diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
3196index 5d702f8..f5fc51a 100644
3197--- a/arch/arm/kernel/entry-header.S
3198+++ b/arch/arm/kernel/entry-header.S
3199@@ -188,6 +188,60 @@
3200 msr cpsr_c, \rtemp @ switch back to the SVC mode
3201 .endm
3202
3203+ .macro pax_enter_kernel_user
3204+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3205+ @ save regs
3206+ stmdb sp!, {r0, r1}
3207+ @ read DACR from cpu_domain into r1
3208+ mov r0, sp
3209+ @ assume 8K pages, since we have to split the immediate in two
3210+ bic r0, r0, #(0x1fc0)
3211+ bic r0, r0, #(0x3f)
3212+ ldr r1, [r0, #TI_CPU_DOMAIN]
3213+#ifdef CONFIG_PAX_MEMORY_UDEREF
3214+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
3215+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
3216+#endif
3217+#ifdef CONFIG_PAX_KERNEXEC
3218+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
3219+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
3220+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
3221+#endif
3222+ @ write r1 to current_thread_info()->cpu_domain
3223+ str r1, [r0, #TI_CPU_DOMAIN]
3224+ @ write r1 to DACR
3225+ mcr p15, 0, r1, c3, c0, 0
3226+ @ instruction sync
3227+ instr_sync
3228+ @ restore regs
3229+ ldmia sp!, {r0, r1}
3230+#endif
3231+ .endm
3232+
3233+ .macro pax_exit_kernel
3234+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3235+ @ save regs
3236+ stmdb sp!, {r0, r1}
3237+ @ read old DACR from stack into r1
3238+ ldr r1, [sp, #(8 + S_SP)]
3239+ sub r1, r1, #8
3240+ ldr r1, [r1]
3241+
3242+ @ write r1 to current_thread_info()->cpu_domain
3243+ mov r0, sp
3244+ @ assume 8K pages, since we have to split the immediate in two
3245+ bic r0, r0, #(0x1fc0)
3246+ bic r0, r0, #(0x3f)
3247+ str r1, [r0, #TI_CPU_DOMAIN]
3248+ @ write r1 to DACR
3249+ mcr p15, 0, r1, c3, c0, 0
3250+ @ instruction sync
3251+ instr_sync
3252+ @ restore regs
3253+ ldmia sp!, {r0, r1}
3254+#endif
3255+ .endm
3256+
3257 #ifndef CONFIG_THUMB2_KERNEL
3258 .macro svc_exit, rpsr, irq = 0
3259 .if \irq != 0
3260@@ -207,6 +261,9 @@
3261 blne trace_hardirqs_off
3262 #endif
3263 .endif
3264+
3265+ pax_exit_kernel
3266+
3267 msr spsr_cxsf, \rpsr
3268 #if defined(CONFIG_CPU_V6)
3269 ldr r0, [sp]
3270@@ -265,6 +322,9 @@
3271 blne trace_hardirqs_off
3272 #endif
3273 .endif
3274+
3275+ pax_exit_kernel
3276+
3277 ldr lr, [sp, #S_SP] @ top of the stack
3278 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
3279 clrex @ clear the exclusive monitor
3280diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
3281index 918875d..cd5fa27 100644
3282--- a/arch/arm/kernel/fiq.c
3283+++ b/arch/arm/kernel/fiq.c
3284@@ -87,7 +87,10 @@ void set_fiq_handler(void *start, unsigned int length)
3285 void *base = vectors_page;
3286 unsigned offset = FIQ_OFFSET;
3287
3288+ pax_open_kernel();
3289 memcpy(base + offset, start, length);
3290+ pax_close_kernel();
3291+
3292 if (!cache_is_vipt_nonaliasing())
3293 flush_icache_range((unsigned long)base + offset, offset +
3294 length);
3295diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
3296index 2c35f0f..7747ee6 100644
3297--- a/arch/arm/kernel/head.S
3298+++ b/arch/arm/kernel/head.S
3299@@ -437,7 +437,7 @@ __enable_mmu:
3300 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
3301 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
3302 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
3303- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
3304+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
3305 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
3306 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
3307 #endif
3308diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
3309index 45e4781..8eac93d 100644
3310--- a/arch/arm/kernel/module.c
3311+++ b/arch/arm/kernel/module.c
3312@@ -38,12 +38,39 @@
3313 #endif
3314
3315 #ifdef CONFIG_MMU
3316-void *module_alloc(unsigned long size)
3317+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
3318 {
3319+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
3320+ return NULL;
3321 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
3322- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
3323+ GFP_KERNEL, prot, NUMA_NO_NODE,
3324 __builtin_return_address(0));
3325 }
3326+
3327+void *module_alloc(unsigned long size)
3328+{
3329+
3330+#ifdef CONFIG_PAX_KERNEXEC
3331+ return __module_alloc(size, PAGE_KERNEL);
3332+#else
3333+ return __module_alloc(size, PAGE_KERNEL_EXEC);
3334+#endif
3335+
3336+}
3337+
3338+#ifdef CONFIG_PAX_KERNEXEC
3339+void module_free_exec(struct module *mod, void *module_region)
3340+{
3341+ module_free(mod, module_region);
3342+}
3343+EXPORT_SYMBOL(module_free_exec);
3344+
3345+void *module_alloc_exec(unsigned long size)
3346+{
3347+ return __module_alloc(size, PAGE_KERNEL_EXEC);
3348+}
3349+EXPORT_SYMBOL(module_alloc_exec);
3350+#endif
3351 #endif
3352
3353 int
3354diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
3355index 07314af..c46655c 100644
3356--- a/arch/arm/kernel/patch.c
3357+++ b/arch/arm/kernel/patch.c
3358@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
3359 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
3360 int size;
3361
3362+ pax_open_kernel();
3363 if (thumb2 && __opcode_is_thumb16(insn)) {
3364 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
3365 size = sizeof(u16);
3366@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
3367 *(u32 *)addr = insn;
3368 size = sizeof(u32);
3369 }
3370+ pax_close_kernel();
3371
3372 flush_icache_range((uintptr_t)(addr),
3373 (uintptr_t)(addr) + size);
3374diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
3375index 81ef686..f4130b8 100644
3376--- a/arch/arm/kernel/process.c
3377+++ b/arch/arm/kernel/process.c
3378@@ -212,6 +212,7 @@ void machine_power_off(void)
3379
3380 if (pm_power_off)
3381 pm_power_off();
3382+ BUG();
3383 }
3384
3385 /*
3386@@ -225,7 +226,7 @@ void machine_power_off(void)
3387 * executing pre-reset code, and using RAM that the primary CPU's code wishes
3388 * to use. Implementing such co-ordination would be essentially impossible.
3389 */
3390-void machine_restart(char *cmd)
3391+__noreturn void machine_restart(char *cmd)
3392 {
3393 local_irq_disable();
3394 smp_send_stop();
3395@@ -248,8 +249,8 @@ void __show_regs(struct pt_regs *regs)
3396
3397 show_regs_print_info(KERN_DEFAULT);
3398
3399- print_symbol("PC is at %s\n", instruction_pointer(regs));
3400- print_symbol("LR is at %s\n", regs->ARM_lr);
3401+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
3402+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
3403 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
3404 "sp : %08lx ip : %08lx fp : %08lx\n",
3405 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
3406@@ -425,12 +426,6 @@ unsigned long get_wchan(struct task_struct *p)
3407 return 0;
3408 }
3409
3410-unsigned long arch_randomize_brk(struct mm_struct *mm)
3411-{
3412- unsigned long range_end = mm->brk + 0x02000000;
3413- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
3414-}
3415-
3416 #ifdef CONFIG_MMU
3417 #ifdef CONFIG_KUSER_HELPERS
3418 /*
3419@@ -446,7 +441,7 @@ static struct vm_area_struct gate_vma = {
3420
3421 static int __init gate_vma_init(void)
3422 {
3423- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
3424+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
3425 return 0;
3426 }
3427 arch_initcall(gate_vma_init);
3428@@ -472,41 +467,16 @@ int in_gate_area_no_mm(unsigned long addr)
3429
3430 const char *arch_vma_name(struct vm_area_struct *vma)
3431 {
3432- return is_gate_vma(vma) ? "[vectors]" :
3433- (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
3434- "[sigpage]" : NULL;
3435+ return is_gate_vma(vma) ? "[vectors]" : NULL;
3436 }
3437
3438-static struct page *signal_page;
3439-extern struct page *get_signal_page(void);
3440-
3441 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3442 {
3443 struct mm_struct *mm = current->mm;
3444- unsigned long addr;
3445- int ret;
3446-
3447- if (!signal_page)
3448- signal_page = get_signal_page();
3449- if (!signal_page)
3450- return -ENOMEM;
3451
3452 down_write(&mm->mmap_sem);
3453- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
3454- if (IS_ERR_VALUE(addr)) {
3455- ret = addr;
3456- goto up_fail;
3457- }
3458-
3459- ret = install_special_mapping(mm, addr, PAGE_SIZE,
3460- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
3461- &signal_page);
3462-
3463- if (ret == 0)
3464- mm->context.sigpage = addr;
3465-
3466- up_fail:
3467+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
3468 up_write(&mm->mmap_sem);
3469- return ret;
3470+ return 0;
3471 }
3472 #endif
3473diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
3474index f73891b..cf3004e 100644
3475--- a/arch/arm/kernel/psci.c
3476+++ b/arch/arm/kernel/psci.c
3477@@ -28,7 +28,7 @@
3478 #include <asm/psci.h>
3479 #include <asm/system_misc.h>
3480
3481-struct psci_operations psci_ops;
3482+struct psci_operations psci_ops __read_only;
3483
3484 static int (*invoke_psci_fn)(u32, u32, u32, u32);
3485 typedef int (*psci_initcall_t)(const struct device_node *);
3486diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3487index 0c27ed6..b67388e 100644
3488--- a/arch/arm/kernel/ptrace.c
3489+++ b/arch/arm/kernel/ptrace.c
3490@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3491 regs->ARM_ip = ip;
3492 }
3493
3494+#ifdef CONFIG_GRKERNSEC_SETXID
3495+extern void gr_delayed_cred_worker(void);
3496+#endif
3497+
3498 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3499 {
3500 current_thread_info()->syscall = scno;
3501
3502+#ifdef CONFIG_GRKERNSEC_SETXID
3503+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3504+ gr_delayed_cred_worker();
3505+#endif
3506+
3507 /* Do the secure computing check first; failures should be fast. */
3508 if (secure_computing(scno) == -1)
3509 return -1;
3510diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3511index 8a16ee5..4f560e5 100644
3512--- a/arch/arm/kernel/setup.c
3513+++ b/arch/arm/kernel/setup.c
3514@@ -104,21 +104,23 @@ EXPORT_SYMBOL(elf_hwcap);
3515 unsigned int elf_hwcap2 __read_mostly;
3516 EXPORT_SYMBOL(elf_hwcap2);
3517
3518+pteval_t __supported_pte_mask __read_only;
3519+pmdval_t __supported_pmd_mask __read_only;
3520
3521 #ifdef MULTI_CPU
3522-struct processor processor __read_mostly;
3523+struct processor processor __read_only;
3524 #endif
3525 #ifdef MULTI_TLB
3526-struct cpu_tlb_fns cpu_tlb __read_mostly;
3527+struct cpu_tlb_fns cpu_tlb __read_only;
3528 #endif
3529 #ifdef MULTI_USER
3530-struct cpu_user_fns cpu_user __read_mostly;
3531+struct cpu_user_fns cpu_user __read_only;
3532 #endif
3533 #ifdef MULTI_CACHE
3534-struct cpu_cache_fns cpu_cache __read_mostly;
3535+struct cpu_cache_fns cpu_cache __read_only;
3536 #endif
3537 #ifdef CONFIG_OUTER_CACHE
3538-struct outer_cache_fns outer_cache __read_mostly;
3539+struct outer_cache_fns outer_cache __read_only;
3540 EXPORT_SYMBOL(outer_cache);
3541 #endif
3542
3543@@ -251,9 +253,13 @@ static int __get_cpu_architecture(void)
3544 asm("mrc p15, 0, %0, c0, c1, 4"
3545 : "=r" (mmfr0));
3546 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3547- (mmfr0 & 0x000000f0) >= 0x00000030)
3548+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3549 cpu_arch = CPU_ARCH_ARMv7;
3550- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3551+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3552+ __supported_pte_mask |= L_PTE_PXN;
3553+ __supported_pmd_mask |= PMD_PXNTABLE;
3554+ }
3555+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3556 (mmfr0 & 0x000000f0) == 0x00000020)
3557 cpu_arch = CPU_ARCH_ARMv6;
3558 else
3559diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3560index bd19834..e4d8c66 100644
3561--- a/arch/arm/kernel/signal.c
3562+++ b/arch/arm/kernel/signal.c
3563@@ -24,8 +24,6 @@
3564
3565 extern const unsigned long sigreturn_codes[7];
3566
3567-static unsigned long signal_return_offset;
3568-
3569 #ifdef CONFIG_CRUNCH
3570 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3571 {
3572@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3573 * except when the MPU has protected the vectors
3574 * page from PL0
3575 */
3576- retcode = mm->context.sigpage + signal_return_offset +
3577- (idx << 2) + thumb;
3578+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3579 } else
3580 #endif
3581 {
3582@@ -604,33 +601,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3583 } while (thread_flags & _TIF_WORK_MASK);
3584 return 0;
3585 }
3586-
3587-struct page *get_signal_page(void)
3588-{
3589- unsigned long ptr;
3590- unsigned offset;
3591- struct page *page;
3592- void *addr;
3593-
3594- page = alloc_pages(GFP_KERNEL, 0);
3595-
3596- if (!page)
3597- return NULL;
3598-
3599- addr = page_address(page);
3600-
3601- /* Give the signal return code some randomness */
3602- offset = 0x200 + (get_random_int() & 0x7fc);
3603- signal_return_offset = offset;
3604-
3605- /*
3606- * Copy signal return handlers into the vector page, and
3607- * set sigreturn to be a pointer to these.
3608- */
3609- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3610-
3611- ptr = (unsigned long)addr + offset;
3612- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3613-
3614- return page;
3615-}
3616diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3617index 7c4fada..8581286 100644
3618--- a/arch/arm/kernel/smp.c
3619+++ b/arch/arm/kernel/smp.c
3620@@ -73,7 +73,7 @@ enum ipi_msg_type {
3621
3622 static DECLARE_COMPLETION(cpu_running);
3623
3624-static struct smp_operations smp_ops;
3625+static struct smp_operations smp_ops __read_only;
3626
3627 void __init smp_set_ops(struct smp_operations *ops)
3628 {
3629diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3630index 7a3be1d..b00c7de 100644
3631--- a/arch/arm/kernel/tcm.c
3632+++ b/arch/arm/kernel/tcm.c
3633@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3634 .virtual = ITCM_OFFSET,
3635 .pfn = __phys_to_pfn(ITCM_OFFSET),
3636 .length = 0,
3637- .type = MT_MEMORY_RWX_ITCM,
3638+ .type = MT_MEMORY_RX_ITCM,
3639 }
3640 };
3641
3642@@ -267,7 +267,9 @@ no_dtcm:
3643 start = &__sitcm_text;
3644 end = &__eitcm_text;
3645 ram = &__itcm_start;
3646+ pax_open_kernel();
3647 memcpy(start, ram, itcm_code_sz);
3648+ pax_close_kernel();
3649 pr_debug("CPU ITCM: copied code from %p - %p\n",
3650 start, end);
3651 itcm_present = true;
3652diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3653index abd2fc0..895dbb6 100644
3654--- a/arch/arm/kernel/traps.c
3655+++ b/arch/arm/kernel/traps.c
3656@@ -62,7 +62,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3657 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3658 {
3659 #ifdef CONFIG_KALLSYMS
3660- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3661+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3662 #else
3663 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3664 #endif
3665@@ -264,6 +264,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3666 static int die_owner = -1;
3667 static unsigned int die_nest_count;
3668
3669+extern void gr_handle_kernel_exploit(void);
3670+
3671 static unsigned long oops_begin(void)
3672 {
3673 int cpu;
3674@@ -306,6 +308,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3675 panic("Fatal exception in interrupt");
3676 if (panic_on_oops)
3677 panic("Fatal exception");
3678+
3679+ gr_handle_kernel_exploit();
3680+
3681 if (signr)
3682 do_exit(signr);
3683 }
3684@@ -643,7 +648,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3685 * The user helper at 0xffff0fe0 must be used instead.
3686 * (see entry-armv.S for details)
3687 */
3688+ pax_open_kernel();
3689 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3690+ pax_close_kernel();
3691 }
3692 return 0;
3693
3694@@ -900,7 +907,11 @@ void __init early_trap_init(void *vectors_base)
3695 kuser_init(vectors_base);
3696
3697 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3698- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3699+
3700+#ifndef CONFIG_PAX_MEMORY_UDEREF
3701+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3702+#endif
3703+
3704 #else /* ifndef CONFIG_CPU_V7M */
3705 /*
3706 * on V7-M there is no need to copy the vector table to a dedicated
3707diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3708index 7bcee5c..e2f3249 100644
3709--- a/arch/arm/kernel/vmlinux.lds.S
3710+++ b/arch/arm/kernel/vmlinux.lds.S
3711@@ -8,7 +8,11 @@
3712 #include <asm/thread_info.h>
3713 #include <asm/memory.h>
3714 #include <asm/page.h>
3715-
3716+
3717+#ifdef CONFIG_PAX_KERNEXEC
3718+#include <asm/pgtable.h>
3719+#endif
3720+
3721 #define PROC_INFO \
3722 . = ALIGN(4); \
3723 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3724@@ -34,7 +38,7 @@
3725 #endif
3726
3727 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3728- defined(CONFIG_GENERIC_BUG)
3729+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3730 #define ARM_EXIT_KEEP(x) x
3731 #define ARM_EXIT_DISCARD(x)
3732 #else
3733@@ -90,6 +94,11 @@ SECTIONS
3734 _text = .;
3735 HEAD_TEXT
3736 }
3737+
3738+#ifdef CONFIG_PAX_KERNEXEC
3739+ . = ALIGN(1<<SECTION_SHIFT);
3740+#endif
3741+
3742 .text : { /* Real text segment */
3743 _stext = .; /* Text and read-only data */
3744 __exception_text_start = .;
3745@@ -112,6 +121,8 @@ SECTIONS
3746 ARM_CPU_KEEP(PROC_INFO)
3747 }
3748
3749+ _etext = .; /* End of text section */
3750+
3751 RO_DATA(PAGE_SIZE)
3752
3753 . = ALIGN(4);
3754@@ -142,7 +153,9 @@ SECTIONS
3755
3756 NOTES
3757
3758- _etext = .; /* End of text and rodata section */
3759+#ifdef CONFIG_PAX_KERNEXEC
3760+ . = ALIGN(1<<SECTION_SHIFT);
3761+#endif
3762
3763 #ifndef CONFIG_XIP_KERNEL
3764 . = ALIGN(PAGE_SIZE);
3765@@ -220,6 +233,11 @@ SECTIONS
3766 . = PAGE_OFFSET + TEXT_OFFSET;
3767 #else
3768 __init_end = .;
3769+
3770+#ifdef CONFIG_PAX_KERNEXEC
3771+ . = ALIGN(1<<SECTION_SHIFT);
3772+#endif
3773+
3774 . = ALIGN(THREAD_SIZE);
3775 __data_loc = .;
3776 #endif
3777diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3778index 3c82b37..69fa3d2 100644
3779--- a/arch/arm/kvm/arm.c
3780+++ b/arch/arm/kvm/arm.c
3781@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3782 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3783
3784 /* The VMID used in the VTTBR */
3785-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3786+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3787 static u8 kvm_next_vmid;
3788 static DEFINE_SPINLOCK(kvm_vmid_lock);
3789
3790@@ -409,7 +409,7 @@ void force_vm_exit(const cpumask_t *mask)
3791 */
3792 static bool need_new_vmid_gen(struct kvm *kvm)
3793 {
3794- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3795+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3796 }
3797
3798 /**
3799@@ -442,7 +442,7 @@ static void update_vttbr(struct kvm *kvm)
3800
3801 /* First user of a new VMID generation? */
3802 if (unlikely(kvm_next_vmid == 0)) {
3803- atomic64_inc(&kvm_vmid_gen);
3804+ atomic64_inc_unchecked(&kvm_vmid_gen);
3805 kvm_next_vmid = 1;
3806
3807 /*
3808@@ -459,7 +459,7 @@ static void update_vttbr(struct kvm *kvm)
3809 kvm_call_hyp(__kvm_flush_vm_context);
3810 }
3811
3812- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3813+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3814 kvm->arch.vmid = kvm_next_vmid;
3815 kvm_next_vmid++;
3816
3817@@ -1034,7 +1034,7 @@ static void check_kvm_target_cpu(void *ret)
3818 /**
3819 * Initialize Hyp-mode and memory mappings on all CPUs.
3820 */
3821-int kvm_arch_init(void *opaque)
3822+int kvm_arch_init(const void *opaque)
3823 {
3824 int err;
3825 int ret, cpu;
3826diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3827index 14a0d98..7771a7d 100644
3828--- a/arch/arm/lib/clear_user.S
3829+++ b/arch/arm/lib/clear_user.S
3830@@ -12,14 +12,14 @@
3831
3832 .text
3833
3834-/* Prototype: int __clear_user(void *addr, size_t sz)
3835+/* Prototype: int ___clear_user(void *addr, size_t sz)
3836 * Purpose : clear some user memory
3837 * Params : addr - user memory address to clear
3838 * : sz - number of bytes to clear
3839 * Returns : number of bytes NOT cleared
3840 */
3841 ENTRY(__clear_user_std)
3842-WEAK(__clear_user)
3843+WEAK(___clear_user)
3844 stmfd sp!, {r1, lr}
3845 mov r2, #0
3846 cmp r1, #4
3847@@ -44,7 +44,7 @@ WEAK(__clear_user)
3848 USER( strnebt r2, [r0])
3849 mov r0, #0
3850 ldmfd sp!, {r1, pc}
3851-ENDPROC(__clear_user)
3852+ENDPROC(___clear_user)
3853 ENDPROC(__clear_user_std)
3854
3855 .pushsection .fixup,"ax"
3856diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3857index 66a477a..bee61d3 100644
3858--- a/arch/arm/lib/copy_from_user.S
3859+++ b/arch/arm/lib/copy_from_user.S
3860@@ -16,7 +16,7 @@
3861 /*
3862 * Prototype:
3863 *
3864- * size_t __copy_from_user(void *to, const void *from, size_t n)
3865+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3866 *
3867 * Purpose:
3868 *
3869@@ -84,11 +84,11 @@
3870
3871 .text
3872
3873-ENTRY(__copy_from_user)
3874+ENTRY(___copy_from_user)
3875
3876 #include "copy_template.S"
3877
3878-ENDPROC(__copy_from_user)
3879+ENDPROC(___copy_from_user)
3880
3881 .pushsection .fixup,"ax"
3882 .align 0
3883diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3884index 6ee2f67..d1cce76 100644
3885--- a/arch/arm/lib/copy_page.S
3886+++ b/arch/arm/lib/copy_page.S
3887@@ -10,6 +10,7 @@
3888 * ASM optimised string functions
3889 */
3890 #include <linux/linkage.h>
3891+#include <linux/const.h>
3892 #include <asm/assembler.h>
3893 #include <asm/asm-offsets.h>
3894 #include <asm/cache.h>
3895diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3896index d066df6..df28194 100644
3897--- a/arch/arm/lib/copy_to_user.S
3898+++ b/arch/arm/lib/copy_to_user.S
3899@@ -16,7 +16,7 @@
3900 /*
3901 * Prototype:
3902 *
3903- * size_t __copy_to_user(void *to, const void *from, size_t n)
3904+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3905 *
3906 * Purpose:
3907 *
3908@@ -88,11 +88,11 @@
3909 .text
3910
3911 ENTRY(__copy_to_user_std)
3912-WEAK(__copy_to_user)
3913+WEAK(___copy_to_user)
3914
3915 #include "copy_template.S"
3916
3917-ENDPROC(__copy_to_user)
3918+ENDPROC(___copy_to_user)
3919 ENDPROC(__copy_to_user_std)
3920
3921 .pushsection .fixup,"ax"
3922diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3923index 7d08b43..f7ca7ea 100644
3924--- a/arch/arm/lib/csumpartialcopyuser.S
3925+++ b/arch/arm/lib/csumpartialcopyuser.S
3926@@ -57,8 +57,8 @@
3927 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3928 */
3929
3930-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3931-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3932+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3933+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3934
3935 #include "csumpartialcopygeneric.S"
3936
3937diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3938index 5306de3..aed6d03 100644
3939--- a/arch/arm/lib/delay.c
3940+++ b/arch/arm/lib/delay.c
3941@@ -28,7 +28,7 @@
3942 /*
3943 * Default to the loop-based delay implementation.
3944 */
3945-struct arm_delay_ops arm_delay_ops = {
3946+struct arm_delay_ops arm_delay_ops __read_only = {
3947 .delay = __loop_delay,
3948 .const_udelay = __loop_const_udelay,
3949 .udelay = __loop_udelay,
3950diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3951index 3e58d71..029817c 100644
3952--- a/arch/arm/lib/uaccess_with_memcpy.c
3953+++ b/arch/arm/lib/uaccess_with_memcpy.c
3954@@ -136,7 +136,7 @@ out:
3955 }
3956
3957 unsigned long
3958-__copy_to_user(void __user *to, const void *from, unsigned long n)
3959+___copy_to_user(void __user *to, const void *from, unsigned long n)
3960 {
3961 /*
3962 * This test is stubbed out of the main function above to keep
3963@@ -190,7 +190,7 @@ out:
3964 return n;
3965 }
3966
3967-unsigned long __clear_user(void __user *addr, unsigned long n)
3968+unsigned long ___clear_user(void __user *addr, unsigned long n)
3969 {
3970 /* See rational for this in __copy_to_user() above. */
3971 if (n < 64)
3972diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
3973index f7a07a5..258e1f7 100644
3974--- a/arch/arm/mach-at91/setup.c
3975+++ b/arch/arm/mach-at91/setup.c
3976@@ -81,7 +81,7 @@ void __init at91_init_sram(int bank, unsigned long base, unsigned int length)
3977
3978 desc->pfn = __phys_to_pfn(base);
3979 desc->length = length;
3980- desc->type = MT_MEMORY_RWX_NONCACHED;
3981+ desc->type = MT_MEMORY_RW_NONCACHED;
3982
3983 pr_info("AT91: sram at 0x%lx of 0x%x mapped at 0x%lx\n",
3984 base, length, desc->virtual);
3985diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3986index 255f33a..507b157 100644
3987--- a/arch/arm/mach-kirkwood/common.c
3988+++ b/arch/arm/mach-kirkwood/common.c
3989@@ -157,7 +157,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3990 clk_gate_ops.disable(hw);
3991 }
3992
3993-static struct clk_ops clk_gate_fn_ops;
3994+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3995+{
3996+ return clk_gate_ops.is_enabled(hw);
3997+}
3998+
3999+static struct clk_ops clk_gate_fn_ops = {
4000+ .enable = clk_gate_fn_enable,
4001+ .disable = clk_gate_fn_disable,
4002+ .is_enabled = clk_gate_fn_is_enabled,
4003+};
4004
4005 static struct clk __init *clk_register_gate_fn(struct device *dev,
4006 const char *name,
4007@@ -191,14 +200,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
4008 gate_fn->fn_en = fn_en;
4009 gate_fn->fn_dis = fn_dis;
4010
4011- /* ops is the gate ops, but with our enable/disable functions */
4012- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
4013- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
4014- clk_gate_fn_ops = clk_gate_ops;
4015- clk_gate_fn_ops.enable = clk_gate_fn_enable;
4016- clk_gate_fn_ops.disable = clk_gate_fn_disable;
4017- }
4018-
4019 clk = clk_register(dev, &gate_fn->gate.hw);
4020
4021 if (IS_ERR(clk))
4022diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
4023index 2bdc323..cf1c607 100644
4024--- a/arch/arm/mach-mvebu/coherency.c
4025+++ b/arch/arm/mach-mvebu/coherency.c
4026@@ -316,7 +316,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
4027
4028 /*
4029 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
4030- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
4031+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
4032 * is needed as a workaround for a deadlock issue between the PCIe
4033 * interface and the cache controller.
4034 */
4035@@ -329,7 +329,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
4036 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
4037
4038 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
4039- mtype = MT_UNCACHED;
4040+ mtype = MT_UNCACHED_RW;
4041
4042 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
4043 }
4044diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
4045index aead77a..a2253fa 100644
4046--- a/arch/arm/mach-omap2/board-n8x0.c
4047+++ b/arch/arm/mach-omap2/board-n8x0.c
4048@@ -568,7 +568,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
4049 }
4050 #endif
4051
4052-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
4053+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
4054 .late_init = n8x0_menelaus_late_init,
4055 };
4056
4057diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
4058index 8bc1338..8b28b69 100644
4059--- a/arch/arm/mach-omap2/gpmc.c
4060+++ b/arch/arm/mach-omap2/gpmc.c
4061@@ -151,7 +151,6 @@ struct omap3_gpmc_regs {
4062 };
4063
4064 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
4065-static struct irq_chip gpmc_irq_chip;
4066 static int gpmc_irq_start;
4067
4068 static struct resource gpmc_mem_root;
4069@@ -736,6 +735,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
4070
4071 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
4072
4073+static struct irq_chip gpmc_irq_chip = {
4074+ .name = "gpmc",
4075+ .irq_startup = gpmc_irq_noop_ret,
4076+ .irq_enable = gpmc_irq_enable,
4077+ .irq_disable = gpmc_irq_disable,
4078+ .irq_shutdown = gpmc_irq_noop,
4079+ .irq_ack = gpmc_irq_noop,
4080+ .irq_mask = gpmc_irq_noop,
4081+ .irq_unmask = gpmc_irq_noop,
4082+
4083+};
4084+
4085 static int gpmc_setup_irq(void)
4086 {
4087 int i;
4088@@ -750,15 +761,6 @@ static int gpmc_setup_irq(void)
4089 return gpmc_irq_start;
4090 }
4091
4092- gpmc_irq_chip.name = "gpmc";
4093- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
4094- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
4095- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
4096- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
4097- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
4098- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
4099- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
4100-
4101 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
4102 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
4103
4104diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
4105index 4001325..b14e2a0 100644
4106--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
4107+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
4108@@ -84,7 +84,7 @@ struct cpu_pm_ops {
4109 int (*finish_suspend)(unsigned long cpu_state);
4110 void (*resume)(void);
4111 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
4112-};
4113+} __no_const;
4114
4115 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
4116 static struct powerdomain *mpuss_pd;
4117@@ -102,7 +102,7 @@ static void dummy_cpu_resume(void)
4118 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
4119 {}
4120
4121-struct cpu_pm_ops omap_pm_ops = {
4122+static struct cpu_pm_ops omap_pm_ops __read_only = {
4123 .finish_suspend = default_finish_suspend,
4124 .resume = dummy_cpu_resume,
4125 .scu_prepare = dummy_scu_prepare,
4126diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
4127index 37843a7..a98df13 100644
4128--- a/arch/arm/mach-omap2/omap-wakeupgen.c
4129+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
4130@@ -343,7 +343,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
4131 return NOTIFY_OK;
4132 }
4133
4134-static struct notifier_block __refdata irq_hotplug_notifier = {
4135+static struct notifier_block irq_hotplug_notifier = {
4136 .notifier_call = irq_cpu_hotplug_notify,
4137 };
4138
4139diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
4140index 01ef59d..32ae28a8 100644
4141--- a/arch/arm/mach-omap2/omap_device.c
4142+++ b/arch/arm/mach-omap2/omap_device.c
4143@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
4144 struct platform_device __init *omap_device_build(const char *pdev_name,
4145 int pdev_id,
4146 struct omap_hwmod *oh,
4147- void *pdata, int pdata_len)
4148+ const void *pdata, int pdata_len)
4149 {
4150 struct omap_hwmod *ohs[] = { oh };
4151
4152@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
4153 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
4154 int pdev_id,
4155 struct omap_hwmod **ohs,
4156- int oh_cnt, void *pdata,
4157+ int oh_cnt, const void *pdata,
4158 int pdata_len)
4159 {
4160 int ret = -ENOMEM;
4161diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
4162index 78c02b3..c94109a 100644
4163--- a/arch/arm/mach-omap2/omap_device.h
4164+++ b/arch/arm/mach-omap2/omap_device.h
4165@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
4166 /* Core code interface */
4167
4168 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
4169- struct omap_hwmod *oh, void *pdata,
4170+ struct omap_hwmod *oh, const void *pdata,
4171 int pdata_len);
4172
4173 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
4174 struct omap_hwmod **oh, int oh_cnt,
4175- void *pdata, int pdata_len);
4176+ const void *pdata, int pdata_len);
4177
4178 struct omap_device *omap_device_alloc(struct platform_device *pdev,
4179 struct omap_hwmod **ohs, int oh_cnt);
4180diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
4181index da1b256..ab2a327 100644
4182--- a/arch/arm/mach-omap2/omap_hwmod.c
4183+++ b/arch/arm/mach-omap2/omap_hwmod.c
4184@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
4185 int (*init_clkdm)(struct omap_hwmod *oh);
4186 void (*update_context_lost)(struct omap_hwmod *oh);
4187 int (*get_context_lost)(struct omap_hwmod *oh);
4188-};
4189+} __no_const;
4190
4191 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
4192-static struct omap_hwmod_soc_ops soc_ops;
4193+static struct omap_hwmod_soc_ops soc_ops __read_only;
4194
4195 /* omap_hwmod_list contains all registered struct omap_hwmods */
4196 static LIST_HEAD(omap_hwmod_list);
4197diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
4198index 95fee54..cfa9cf1 100644
4199--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
4200+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
4201@@ -10,6 +10,7 @@
4202
4203 #include <linux/kernel.h>
4204 #include <linux/init.h>
4205+#include <asm/pgtable.h>
4206
4207 #include "powerdomain.h"
4208
4209@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
4210
4211 void __init am43xx_powerdomains_init(void)
4212 {
4213- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
4214+ pax_open_kernel();
4215+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
4216+ pax_close_kernel();
4217 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
4218 pwrdm_register_pwrdms(powerdomains_am43xx);
4219 pwrdm_complete_init();
4220diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
4221index 97d6607..8429d14 100644
4222--- a/arch/arm/mach-omap2/wd_timer.c
4223+++ b/arch/arm/mach-omap2/wd_timer.c
4224@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
4225 struct omap_hwmod *oh;
4226 char *oh_name = "wd_timer2";
4227 char *dev_name = "omap_wdt";
4228- struct omap_wd_timer_platform_data pdata;
4229+ static struct omap_wd_timer_platform_data pdata = {
4230+ .read_reset_sources = prm_read_reset_sources
4231+ };
4232
4233 if (!cpu_class_is_omap2() || of_have_populated_dt())
4234 return 0;
4235@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
4236 return -EINVAL;
4237 }
4238
4239- pdata.read_reset_sources = prm_read_reset_sources;
4240-
4241 pdev = omap_device_build(dev_name, id, oh, &pdata,
4242 sizeof(struct omap_wd_timer_platform_data));
4243 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
4244diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
4245index b82dcae..44ee5b6 100644
4246--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
4247+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
4248@@ -180,7 +180,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
4249 bool entered_lp2 = false;
4250
4251 if (tegra_pending_sgi())
4252- ACCESS_ONCE(abort_flag) = true;
4253+ ACCESS_ONCE_RW(abort_flag) = true;
4254
4255 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
4256
4257diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
4258index 2dea8b5..6499da2 100644
4259--- a/arch/arm/mach-ux500/setup.h
4260+++ b/arch/arm/mach-ux500/setup.h
4261@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
4262 .type = MT_DEVICE, \
4263 }
4264
4265-#define __MEM_DEV_DESC(x, sz) { \
4266- .virtual = IO_ADDRESS(x), \
4267- .pfn = __phys_to_pfn(x), \
4268- .length = sz, \
4269- .type = MT_MEMORY_RWX, \
4270-}
4271-
4272 extern struct smp_operations ux500_smp_ops;
4273 extern void ux500_cpu_die(unsigned int cpu);
4274
4275diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
4276index c348eae..456a1a4 100644
4277--- a/arch/arm/mm/Kconfig
4278+++ b/arch/arm/mm/Kconfig
4279@@ -446,6 +446,7 @@ config CPU_32v5
4280
4281 config CPU_32v6
4282 bool
4283+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
4284 select TLS_REG_EMUL if !CPU_32v6K && !MMU
4285
4286 config CPU_32v6K
4287@@ -600,6 +601,7 @@ config CPU_CP15_MPU
4288
4289 config CPU_USE_DOMAINS
4290 bool
4291+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
4292 help
4293 This option enables or disables the use of domain switching
4294 via the set_fs() function.
4295@@ -799,6 +801,7 @@ config NEED_KUSER_HELPERS
4296 config KUSER_HELPERS
4297 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
4298 default y
4299+ depends on !(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND
4300 help
4301 Warning: disabling this option may break user programs.
4302
4303@@ -811,7 +814,7 @@ config KUSER_HELPERS
4304 See Documentation/arm/kernel_user_helpers.txt for details.
4305
4306 However, the fixed address nature of these helpers can be used
4307- by ROP (return orientated programming) authors when creating
4308+ by ROP (Return Oriented Programming) authors when creating
4309 exploits.
4310
4311 If all of the binaries and libraries which run on your platform
4312diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
4313index b8cb1a2..6a5624a 100644
4314--- a/arch/arm/mm/alignment.c
4315+++ b/arch/arm/mm/alignment.c
4316@@ -214,10 +214,12 @@ union offset_union {
4317 #define __get16_unaligned_check(ins,val,addr) \
4318 do { \
4319 unsigned int err = 0, v, a = addr; \
4320+ pax_open_userland(); \
4321 __get8_unaligned_check(ins,v,a,err); \
4322 val = v << ((BE) ? 8 : 0); \
4323 __get8_unaligned_check(ins,v,a,err); \
4324 val |= v << ((BE) ? 0 : 8); \
4325+ pax_close_userland(); \
4326 if (err) \
4327 goto fault; \
4328 } while (0)
4329@@ -231,6 +233,7 @@ union offset_union {
4330 #define __get32_unaligned_check(ins,val,addr) \
4331 do { \
4332 unsigned int err = 0, v, a = addr; \
4333+ pax_open_userland(); \
4334 __get8_unaligned_check(ins,v,a,err); \
4335 val = v << ((BE) ? 24 : 0); \
4336 __get8_unaligned_check(ins,v,a,err); \
4337@@ -239,6 +242,7 @@ union offset_union {
4338 val |= v << ((BE) ? 8 : 16); \
4339 __get8_unaligned_check(ins,v,a,err); \
4340 val |= v << ((BE) ? 0 : 24); \
4341+ pax_close_userland(); \
4342 if (err) \
4343 goto fault; \
4344 } while (0)
4345@@ -252,6 +256,7 @@ union offset_union {
4346 #define __put16_unaligned_check(ins,val,addr) \
4347 do { \
4348 unsigned int err = 0, v = val, a = addr; \
4349+ pax_open_userland(); \
4350 __asm__( FIRST_BYTE_16 \
4351 ARM( "1: "ins" %1, [%2], #1\n" ) \
4352 THUMB( "1: "ins" %1, [%2]\n" ) \
4353@@ -271,6 +276,7 @@ union offset_union {
4354 " .popsection\n" \
4355 : "=r" (err), "=&r" (v), "=&r" (a) \
4356 : "0" (err), "1" (v), "2" (a)); \
4357+ pax_close_userland(); \
4358 if (err) \
4359 goto fault; \
4360 } while (0)
4361@@ -284,6 +290,7 @@ union offset_union {
4362 #define __put32_unaligned_check(ins,val,addr) \
4363 do { \
4364 unsigned int err = 0, v = val, a = addr; \
4365+ pax_open_userland(); \
4366 __asm__( FIRST_BYTE_32 \
4367 ARM( "1: "ins" %1, [%2], #1\n" ) \
4368 THUMB( "1: "ins" %1, [%2]\n" ) \
4369@@ -313,6 +320,7 @@ union offset_union {
4370 " .popsection\n" \
4371 : "=r" (err), "=&r" (v), "=&r" (a) \
4372 : "0" (err), "1" (v), "2" (a)); \
4373+ pax_close_userland(); \
4374 if (err) \
4375 goto fault; \
4376 } while (0)
4377diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
4378index 7c3fb41..bfb87d8 100644
4379--- a/arch/arm/mm/cache-l2x0.c
4380+++ b/arch/arm/mm/cache-l2x0.c
4381@@ -41,7 +41,7 @@ struct l2c_init_data {
4382 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
4383 void (*save)(void __iomem *);
4384 struct outer_cache_fns outer_cache;
4385-};
4386+} __do_const;
4387
4388 #define CACHE_LINE_SIZE 32
4389
4390diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
4391index 6eb97b3..ac509f6 100644
4392--- a/arch/arm/mm/context.c
4393+++ b/arch/arm/mm/context.c
4394@@ -43,7 +43,7 @@
4395 #define NUM_USER_ASIDS ASID_FIRST_VERSION
4396
4397 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
4398-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
4399+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
4400 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
4401
4402 static DEFINE_PER_CPU(atomic64_t, active_asids);
4403@@ -182,7 +182,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
4404 {
4405 static u32 cur_idx = 1;
4406 u64 asid = atomic64_read(&mm->context.id);
4407- u64 generation = atomic64_read(&asid_generation);
4408+ u64 generation = atomic64_read_unchecked(&asid_generation);
4409
4410 if (asid != 0 && is_reserved_asid(asid)) {
4411 /*
4412@@ -203,7 +203,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
4413 */
4414 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
4415 if (asid == NUM_USER_ASIDS) {
4416- generation = atomic64_add_return(ASID_FIRST_VERSION,
4417+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
4418 &asid_generation);
4419 flush_context(cpu);
4420 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
4421@@ -234,14 +234,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
4422 cpu_set_reserved_ttbr0();
4423
4424 asid = atomic64_read(&mm->context.id);
4425- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
4426+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
4427 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
4428 goto switch_mm_fastpath;
4429
4430 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
4431 /* Check that our ASID belongs to the current generation. */
4432 asid = atomic64_read(&mm->context.id);
4433- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
4434+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
4435 asid = new_context(mm, cpu);
4436 atomic64_set(&mm->context.id, asid);
4437 }
4438diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
4439index eb8830a..5360ce7 100644
4440--- a/arch/arm/mm/fault.c
4441+++ b/arch/arm/mm/fault.c
4442@@ -25,6 +25,7 @@
4443 #include <asm/system_misc.h>
4444 #include <asm/system_info.h>
4445 #include <asm/tlbflush.h>
4446+#include <asm/sections.h>
4447
4448 #include "fault.h"
4449
4450@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
4451 if (fixup_exception(regs))
4452 return;
4453
4454+#ifdef CONFIG_PAX_MEMORY_UDEREF
4455+ if (addr < TASK_SIZE) {
4456+ if (current->signal->curr_ip)
4457+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4458+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4459+ else
4460+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4461+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4462+ }
4463+#endif
4464+
4465+#ifdef CONFIG_PAX_KERNEXEC
4466+ if ((fsr & FSR_WRITE) &&
4467+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
4468+ (MODULES_VADDR <= addr && addr < MODULES_END)))
4469+ {
4470+ if (current->signal->curr_ip)
4471+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4472+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
4473+ else
4474+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
4475+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
4476+ }
4477+#endif
4478+
4479 /*
4480 * No handler, we'll have to terminate things with extreme prejudice.
4481 */
4482@@ -174,6 +200,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
4483 }
4484 #endif
4485
4486+#ifdef CONFIG_PAX_PAGEEXEC
4487+ if (fsr & FSR_LNX_PF) {
4488+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
4489+ do_group_exit(SIGKILL);
4490+ }
4491+#endif
4492+
4493 tsk->thread.address = addr;
4494 tsk->thread.error_code = fsr;
4495 tsk->thread.trap_no = 14;
4496@@ -401,6 +434,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
4497 }
4498 #endif /* CONFIG_MMU */
4499
4500+#ifdef CONFIG_PAX_PAGEEXEC
4501+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4502+{
4503+ long i;
4504+
4505+ printk(KERN_ERR "PAX: bytes at PC: ");
4506+ for (i = 0; i < 20; i++) {
4507+ unsigned char c;
4508+ if (get_user(c, (__force unsigned char __user *)pc+i))
4509+ printk(KERN_CONT "?? ");
4510+ else
4511+ printk(KERN_CONT "%02x ", c);
4512+ }
4513+ printk("\n");
4514+
4515+ printk(KERN_ERR "PAX: bytes at SP-4: ");
4516+ for (i = -1; i < 20; i++) {
4517+ unsigned long c;
4518+ if (get_user(c, (__force unsigned long __user *)sp+i))
4519+ printk(KERN_CONT "???????? ");
4520+ else
4521+ printk(KERN_CONT "%08lx ", c);
4522+ }
4523+ printk("\n");
4524+}
4525+#endif
4526+
4527 /*
4528 * First Level Translation Fault Handler
4529 *
4530@@ -548,9 +608,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
4531 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
4532 struct siginfo info;
4533
4534+#ifdef CONFIG_PAX_MEMORY_UDEREF
4535+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
4536+ if (current->signal->curr_ip)
4537+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4538+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4539+ else
4540+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4541+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4542+ goto die;
4543+ }
4544+#endif
4545+
4546 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
4547 return;
4548
4549+die:
4550 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
4551 inf->name, fsr, addr);
4552
4553@@ -574,15 +647,98 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
4554 ifsr_info[nr].name = name;
4555 }
4556
4557+asmlinkage int sys_sigreturn(struct pt_regs *regs);
4558+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4559+
4560 asmlinkage void __exception
4561 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4562 {
4563 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4564 struct siginfo info;
4565+ unsigned long pc = instruction_pointer(regs);
4566+
4567+ if (user_mode(regs)) {
4568+ unsigned long sigpage = current->mm->context.sigpage;
4569+
4570+ if (sigpage <= pc && pc < sigpage + 7*4) {
4571+ if (pc < sigpage + 3*4)
4572+ sys_sigreturn(regs);
4573+ else
4574+ sys_rt_sigreturn(regs);
4575+ return;
4576+ }
4577+ if (pc == 0xffff0f60UL) {
4578+ /*
4579+ * PaX: __kuser_cmpxchg64 emulation
4580+ */
4581+ // TODO
4582+ //regs->ARM_pc = regs->ARM_lr;
4583+ //return;
4584+ }
4585+ if (pc == 0xffff0fa0UL) {
4586+ /*
4587+ * PaX: __kuser_memory_barrier emulation
4588+ */
4589+ // dmb(); implied by the exception
4590+ regs->ARM_pc = regs->ARM_lr;
4591+ return;
4592+ }
4593+ if (pc == 0xffff0fc0UL) {
4594+ /*
4595+ * PaX: __kuser_cmpxchg emulation
4596+ */
4597+ // TODO
4598+ //long new;
4599+ //int op;
4600+
4601+ //op = FUTEX_OP_SET << 28;
4602+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4603+ //regs->ARM_r0 = old != new;
4604+ //regs->ARM_pc = regs->ARM_lr;
4605+ //return;
4606+ }
4607+ if (pc == 0xffff0fe0UL) {
4608+ /*
4609+ * PaX: __kuser_get_tls emulation
4610+ */
4611+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4612+ regs->ARM_pc = regs->ARM_lr;
4613+ return;
4614+ }
4615+ }
4616+
4617+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4618+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4619+ if (current->signal->curr_ip)
4620+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4621+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4622+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4623+ else
4624+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4625+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4626+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4627+ goto die;
4628+ }
4629+#endif
4630+
4631+#ifdef CONFIG_PAX_REFCOUNT
4632+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4633+ unsigned int bkpt;
4634+
4635+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4636+ current->thread.error_code = ifsr;
4637+ current->thread.trap_no = 0;
4638+ pax_report_refcount_overflow(regs);
4639+ fixup_exception(regs);
4640+ return;
4641+ }
4642+ }
4643+#endif
4644
4645 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4646 return;
4647
4648+die:
4649 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4650 inf->name, ifsr, addr);
4651
4652diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4653index cf08bdf..772656c 100644
4654--- a/arch/arm/mm/fault.h
4655+++ b/arch/arm/mm/fault.h
4656@@ -3,6 +3,7 @@
4657
4658 /*
4659 * Fault status register encodings. We steal bit 31 for our own purposes.
4660+ * Set when the FSR value is from an instruction fault.
4661 */
4662 #define FSR_LNX_PF (1 << 31)
4663 #define FSR_WRITE (1 << 11)
4664@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4665 }
4666 #endif
4667
4668+/* valid for LPAE and !LPAE */
4669+static inline int is_xn_fault(unsigned int fsr)
4670+{
4671+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4672+}
4673+
4674+static inline int is_domain_fault(unsigned int fsr)
4675+{
4676+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4677+}
4678+
4679 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4680 unsigned long search_exception_table(unsigned long addr);
4681
4682diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4683index 659c75d..6f8c029 100644
4684--- a/arch/arm/mm/init.c
4685+++ b/arch/arm/mm/init.c
4686@@ -31,6 +31,8 @@
4687 #include <asm/setup.h>
4688 #include <asm/tlb.h>
4689 #include <asm/fixmap.h>
4690+#include <asm/system_info.h>
4691+#include <asm/cp15.h>
4692
4693 #include <asm/mach/arch.h>
4694 #include <asm/mach/map.h>
4695@@ -619,7 +621,46 @@ void free_initmem(void)
4696 {
4697 #ifdef CONFIG_HAVE_TCM
4698 extern char __tcm_start, __tcm_end;
4699+#endif
4700
4701+#ifdef CONFIG_PAX_KERNEXEC
4702+ unsigned long addr;
4703+ pgd_t *pgd;
4704+ pud_t *pud;
4705+ pmd_t *pmd;
4706+ int cpu_arch = cpu_architecture();
4707+ unsigned int cr = get_cr();
4708+
4709+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4710+ /* make pages tables, etc before .text NX */
4711+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4712+ pgd = pgd_offset_k(addr);
4713+ pud = pud_offset(pgd, addr);
4714+ pmd = pmd_offset(pud, addr);
4715+ __section_update(pmd, addr, PMD_SECT_XN);
4716+ }
4717+ /* make init NX */
4718+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4719+ pgd = pgd_offset_k(addr);
4720+ pud = pud_offset(pgd, addr);
4721+ pmd = pmd_offset(pud, addr);
4722+ __section_update(pmd, addr, PMD_SECT_XN);
4723+ }
4724+ /* make kernel code/rodata RX */
4725+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4726+ pgd = pgd_offset_k(addr);
4727+ pud = pud_offset(pgd, addr);
4728+ pmd = pmd_offset(pud, addr);
4729+#ifdef CONFIG_ARM_LPAE
4730+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4731+#else
4732+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4733+#endif
4734+ }
4735+ }
4736+#endif
4737+
4738+#ifdef CONFIG_HAVE_TCM
4739 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4740 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4741 #endif
4742diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4743index d1e5ad7..84dcbf2 100644
4744--- a/arch/arm/mm/ioremap.c
4745+++ b/arch/arm/mm/ioremap.c
4746@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4747 unsigned int mtype;
4748
4749 if (cached)
4750- mtype = MT_MEMORY_RWX;
4751+ mtype = MT_MEMORY_RX;
4752 else
4753- mtype = MT_MEMORY_RWX_NONCACHED;
4754+ mtype = MT_MEMORY_RX_NONCACHED;
4755
4756 return __arm_ioremap_caller(phys_addr, size, mtype,
4757 __builtin_return_address(0));
4758diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4759index 5e85ed3..b10a7ed 100644
4760--- a/arch/arm/mm/mmap.c
4761+++ b/arch/arm/mm/mmap.c
4762@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4763 struct vm_area_struct *vma;
4764 int do_align = 0;
4765 int aliasing = cache_is_vipt_aliasing();
4766+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4767 struct vm_unmapped_area_info info;
4768
4769 /*
4770@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4771 if (len > TASK_SIZE)
4772 return -ENOMEM;
4773
4774+#ifdef CONFIG_PAX_RANDMMAP
4775+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4776+#endif
4777+
4778 if (addr) {
4779 if (do_align)
4780 addr = COLOUR_ALIGN(addr, pgoff);
4781@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4782 addr = PAGE_ALIGN(addr);
4783
4784 vma = find_vma(mm, addr);
4785- if (TASK_SIZE - len >= addr &&
4786- (!vma || addr + len <= vma->vm_start))
4787+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4788 return addr;
4789 }
4790
4791@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4792 info.high_limit = TASK_SIZE;
4793 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4794 info.align_offset = pgoff << PAGE_SHIFT;
4795+ info.threadstack_offset = offset;
4796 return vm_unmapped_area(&info);
4797 }
4798
4799@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4800 unsigned long addr = addr0;
4801 int do_align = 0;
4802 int aliasing = cache_is_vipt_aliasing();
4803+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4804 struct vm_unmapped_area_info info;
4805
4806 /*
4807@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4808 return addr;
4809 }
4810
4811+#ifdef CONFIG_PAX_RANDMMAP
4812+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4813+#endif
4814+
4815 /* requesting a specific address */
4816 if (addr) {
4817 if (do_align)
4818@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4819 else
4820 addr = PAGE_ALIGN(addr);
4821 vma = find_vma(mm, addr);
4822- if (TASK_SIZE - len >= addr &&
4823- (!vma || addr + len <= vma->vm_start))
4824+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4825 return addr;
4826 }
4827
4828@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4829 info.high_limit = mm->mmap_base;
4830 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4831 info.align_offset = pgoff << PAGE_SHIFT;
4832+ info.threadstack_offset = offset;
4833 addr = vm_unmapped_area(&info);
4834
4835 /*
4836@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4837 {
4838 unsigned long random_factor = 0UL;
4839
4840+#ifdef CONFIG_PAX_RANDMMAP
4841+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4842+#endif
4843+
4844 /* 8 bits of randomness in 20 address space bits */
4845 if ((current->flags & PF_RANDOMIZE) &&
4846 !(current->personality & ADDR_NO_RANDOMIZE))
4847@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4848
4849 if (mmap_is_legacy()) {
4850 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4851+
4852+#ifdef CONFIG_PAX_RANDMMAP
4853+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4854+ mm->mmap_base += mm->delta_mmap;
4855+#endif
4856+
4857 mm->get_unmapped_area = arch_get_unmapped_area;
4858 } else {
4859 mm->mmap_base = mmap_base(random_factor);
4860+
4861+#ifdef CONFIG_PAX_RANDMMAP
4862+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4863+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4864+#endif
4865+
4866 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4867 }
4868 }
4869diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4870index 6e3ba8d..9cbb4d7 100644
4871--- a/arch/arm/mm/mmu.c
4872+++ b/arch/arm/mm/mmu.c
4873@@ -40,6 +40,22 @@
4874 #include "mm.h"
4875 #include "tcm.h"
4876
4877+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4878+void modify_domain(unsigned int dom, unsigned int type)
4879+{
4880+ struct thread_info *thread = current_thread_info();
4881+ unsigned int domain = thread->cpu_domain;
4882+ /*
4883+ * DOMAIN_MANAGER might be defined to some other value,
4884+ * use the arch-defined constant
4885+ */
4886+ domain &= ~domain_val(dom, 3);
4887+ thread->cpu_domain = domain | domain_val(dom, type);
4888+ set_domain(thread->cpu_domain);
4889+}
4890+EXPORT_SYMBOL(modify_domain);
4891+#endif
4892+
4893 /*
4894 * empty_zero_page is a special page that is used for
4895 * zero-initialized data and COW.
4896@@ -239,7 +255,15 @@ __setup("noalign", noalign_setup);
4897 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4898 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4899
4900-static struct mem_type mem_types[] = {
4901+#ifdef CONFIG_PAX_KERNEXEC
4902+#define L_PTE_KERNEXEC L_PTE_RDONLY
4903+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4904+#else
4905+#define L_PTE_KERNEXEC L_PTE_DIRTY
4906+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4907+#endif
4908+
4909+static struct mem_type mem_types[] __read_only = {
4910 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4911 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4912 L_PTE_SHARED,
4913@@ -268,19 +292,19 @@ static struct mem_type mem_types[] = {
4914 .prot_sect = PROT_SECT_DEVICE,
4915 .domain = DOMAIN_IO,
4916 },
4917- [MT_UNCACHED] = {
4918+ [MT_UNCACHED_RW] = {
4919 .prot_pte = PROT_PTE_DEVICE,
4920 .prot_l1 = PMD_TYPE_TABLE,
4921 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4922 .domain = DOMAIN_IO,
4923 },
4924- [MT_CACHECLEAN] = {
4925- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4926+ [MT_CACHECLEAN_RO] = {
4927+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4928 .domain = DOMAIN_KERNEL,
4929 },
4930 #ifndef CONFIG_ARM_LPAE
4931- [MT_MINICLEAN] = {
4932- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4933+ [MT_MINICLEAN_RO] = {
4934+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4935 .domain = DOMAIN_KERNEL,
4936 },
4937 #endif
4938@@ -288,15 +312,15 @@ static struct mem_type mem_types[] = {
4939 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4940 L_PTE_RDONLY,
4941 .prot_l1 = PMD_TYPE_TABLE,
4942- .domain = DOMAIN_USER,
4943+ .domain = DOMAIN_VECTORS,
4944 },
4945 [MT_HIGH_VECTORS] = {
4946 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4947 L_PTE_USER | L_PTE_RDONLY,
4948 .prot_l1 = PMD_TYPE_TABLE,
4949- .domain = DOMAIN_USER,
4950+ .domain = DOMAIN_VECTORS,
4951 },
4952- [MT_MEMORY_RWX] = {
4953+ [__MT_MEMORY_RWX] = {
4954 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4955 .prot_l1 = PMD_TYPE_TABLE,
4956 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4957@@ -309,17 +333,30 @@ static struct mem_type mem_types[] = {
4958 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4959 .domain = DOMAIN_KERNEL,
4960 },
4961- [MT_ROM] = {
4962- .prot_sect = PMD_TYPE_SECT,
4963+ [MT_MEMORY_RX] = {
4964+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4965+ .prot_l1 = PMD_TYPE_TABLE,
4966+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4967+ .domain = DOMAIN_KERNEL,
4968+ },
4969+ [MT_ROM_RX] = {
4970+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4971 .domain = DOMAIN_KERNEL,
4972 },
4973- [MT_MEMORY_RWX_NONCACHED] = {
4974+ [MT_MEMORY_RW_NONCACHED] = {
4975 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4976 L_PTE_MT_BUFFERABLE,
4977 .prot_l1 = PMD_TYPE_TABLE,
4978 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4979 .domain = DOMAIN_KERNEL,
4980 },
4981+ [MT_MEMORY_RX_NONCACHED] = {
4982+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4983+ L_PTE_MT_BUFFERABLE,
4984+ .prot_l1 = PMD_TYPE_TABLE,
4985+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4986+ .domain = DOMAIN_KERNEL,
4987+ },
4988 [MT_MEMORY_RW_DTCM] = {
4989 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4990 L_PTE_XN,
4991@@ -327,9 +364,10 @@ static struct mem_type mem_types[] = {
4992 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4993 .domain = DOMAIN_KERNEL,
4994 },
4995- [MT_MEMORY_RWX_ITCM] = {
4996- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4997+ [MT_MEMORY_RX_ITCM] = {
4998+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4999 .prot_l1 = PMD_TYPE_TABLE,
5000+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
5001 .domain = DOMAIN_KERNEL,
5002 },
5003 [MT_MEMORY_RW_SO] = {
5004@@ -547,9 +585,14 @@ static void __init build_mem_type_table(void)
5005 * Mark cache clean areas and XIP ROM read only
5006 * from SVC mode and no access from userspace.
5007 */
5008- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5009- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5010- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5011+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5012+#ifdef CONFIG_PAX_KERNEXEC
5013+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5014+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5015+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5016+#endif
5017+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5018+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
5019 #endif
5020
5021 /*
5022@@ -566,13 +609,17 @@ static void __init build_mem_type_table(void)
5023 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
5024 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
5025 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
5026- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
5027- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
5028+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
5029+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
5030 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
5031 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
5032+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
5033+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
5034 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
5035- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
5036- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
5037+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
5038+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
5039+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
5040+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
5041 }
5042 }
5043
5044@@ -583,15 +630,20 @@ static void __init build_mem_type_table(void)
5045 if (cpu_arch >= CPU_ARCH_ARMv6) {
5046 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
5047 /* Non-cacheable Normal is XCB = 001 */
5048- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
5049+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
5050+ PMD_SECT_BUFFERED;
5051+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
5052 PMD_SECT_BUFFERED;
5053 } else {
5054 /* For both ARMv6 and non-TEX-remapping ARMv7 */
5055- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
5056+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
5057+ PMD_SECT_TEX(1);
5058+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
5059 PMD_SECT_TEX(1);
5060 }
5061 } else {
5062- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
5063+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
5064+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
5065 }
5066
5067 #ifdef CONFIG_ARM_LPAE
5068@@ -607,6 +659,8 @@ static void __init build_mem_type_table(void)
5069 vecs_pgprot |= PTE_EXT_AF;
5070 #endif
5071
5072+ user_pgprot |= __supported_pte_mask;
5073+
5074 for (i = 0; i < 16; i++) {
5075 pteval_t v = pgprot_val(protection_map[i]);
5076 protection_map[i] = __pgprot(v | user_pgprot);
5077@@ -624,21 +678,24 @@ static void __init build_mem_type_table(void)
5078
5079 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
5080 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
5081- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
5082- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
5083+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
5084+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
5085 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
5086 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
5087+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
5088+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
5089 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
5090- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
5091- mem_types[MT_ROM].prot_sect |= cp->pmd;
5092+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
5093+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
5094+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
5095
5096 switch (cp->pmd) {
5097 case PMD_SECT_WT:
5098- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
5099+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
5100 break;
5101 case PMD_SECT_WB:
5102 case PMD_SECT_WBWA:
5103- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
5104+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
5105 break;
5106 }
5107 pr_info("Memory policy: %sData cache %s\n",
5108@@ -856,7 +913,7 @@ static void __init create_mapping(struct map_desc *md)
5109 return;
5110 }
5111
5112- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
5113+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
5114 md->virtual >= PAGE_OFFSET &&
5115 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
5116 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
5117@@ -1224,18 +1281,15 @@ void __init arm_mm_memblock_reserve(void)
5118 * called function. This means you can't use any function or debugging
5119 * method which may touch any device, otherwise the kernel _will_ crash.
5120 */
5121+
5122+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
5123+
5124 static void __init devicemaps_init(const struct machine_desc *mdesc)
5125 {
5126 struct map_desc map;
5127 unsigned long addr;
5128- void *vectors;
5129
5130- /*
5131- * Allocate the vector page early.
5132- */
5133- vectors = early_alloc(PAGE_SIZE * 2);
5134-
5135- early_trap_init(vectors);
5136+ early_trap_init(&vectors);
5137
5138 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
5139 pmd_clear(pmd_off_k(addr));
5140@@ -1248,7 +1302,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
5141 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
5142 map.virtual = MODULES_VADDR;
5143 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
5144- map.type = MT_ROM;
5145+ map.type = MT_ROM_RX;
5146 create_mapping(&map);
5147 #endif
5148
5149@@ -1259,14 +1313,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
5150 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
5151 map.virtual = FLUSH_BASE;
5152 map.length = SZ_1M;
5153- map.type = MT_CACHECLEAN;
5154+ map.type = MT_CACHECLEAN_RO;
5155 create_mapping(&map);
5156 #endif
5157 #ifdef FLUSH_BASE_MINICACHE
5158 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
5159 map.virtual = FLUSH_BASE_MINICACHE;
5160 map.length = SZ_1M;
5161- map.type = MT_MINICLEAN;
5162+ map.type = MT_MINICLEAN_RO;
5163 create_mapping(&map);
5164 #endif
5165
5166@@ -1275,7 +1329,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
5167 * location (0xffff0000). If we aren't using high-vectors, also
5168 * create a mapping at the low-vectors virtual address.
5169 */
5170- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
5171+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
5172 map.virtual = 0xffff0000;
5173 map.length = PAGE_SIZE;
5174 #ifdef CONFIG_KUSER_HELPERS
5175@@ -1335,8 +1389,10 @@ static void __init kmap_init(void)
5176 static void __init map_lowmem(void)
5177 {
5178 struct memblock_region *reg;
5179+#ifndef CONFIG_PAX_KERNEXEC
5180 unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
5181 unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
5182+#endif
5183
5184 /* Map all the lowmem memory banks. */
5185 for_each_memblock(memory, reg) {
5186@@ -1349,11 +1405,48 @@ static void __init map_lowmem(void)
5187 if (start >= end)
5188 break;
5189
5190+#ifdef CONFIG_PAX_KERNEXEC
5191+ map.pfn = __phys_to_pfn(start);
5192+ map.virtual = __phys_to_virt(start);
5193+ map.length = end - start;
5194+
5195+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
5196+ struct map_desc kernel;
5197+ struct map_desc initmap;
5198+
5199+ /* when freeing initmem we will make this RW */
5200+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
5201+ initmap.virtual = (unsigned long)__init_begin;
5202+ initmap.length = _sdata - __init_begin;
5203+ initmap.type = __MT_MEMORY_RWX;
5204+ create_mapping(&initmap);
5205+
5206+ /* when freeing initmem we will make this RX */
5207+ kernel.pfn = __phys_to_pfn(__pa(_stext));
5208+ kernel.virtual = (unsigned long)_stext;
5209+ kernel.length = __init_begin - _stext;
5210+ kernel.type = __MT_MEMORY_RWX;
5211+ create_mapping(&kernel);
5212+
5213+ if (map.virtual < (unsigned long)_stext) {
5214+ map.length = (unsigned long)_stext - map.virtual;
5215+ map.type = __MT_MEMORY_RWX;
5216+ create_mapping(&map);
5217+ }
5218+
5219+ map.pfn = __phys_to_pfn(__pa(_sdata));
5220+ map.virtual = (unsigned long)_sdata;
5221+ map.length = end - __pa(_sdata);
5222+ }
5223+
5224+ map.type = MT_MEMORY_RW;
5225+ create_mapping(&map);
5226+#else
5227 if (end < kernel_x_start || start >= kernel_x_end) {
5228 map.pfn = __phys_to_pfn(start);
5229 map.virtual = __phys_to_virt(start);
5230 map.length = end - start;
5231- map.type = MT_MEMORY_RWX;
5232+ map.type = __MT_MEMORY_RWX;
5233
5234 create_mapping(&map);
5235 } else {
5236@@ -1370,7 +1463,7 @@ static void __init map_lowmem(void)
5237 map.pfn = __phys_to_pfn(kernel_x_start);
5238 map.virtual = __phys_to_virt(kernel_x_start);
5239 map.length = kernel_x_end - kernel_x_start;
5240- map.type = MT_MEMORY_RWX;
5241+ map.type = __MT_MEMORY_RWX;
5242
5243 create_mapping(&map);
5244
5245@@ -1383,6 +1476,7 @@ static void __init map_lowmem(void)
5246 create_mapping(&map);
5247 }
5248 }
5249+#endif
5250 }
5251 }
5252
5253diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
5254index 5b217f4..c23f40e 100644
5255--- a/arch/arm/plat-iop/setup.c
5256+++ b/arch/arm/plat-iop/setup.c
5257@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
5258 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
5259 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
5260 .length = IOP3XX_PERIPHERAL_SIZE,
5261- .type = MT_UNCACHED,
5262+ .type = MT_UNCACHED_RW,
5263 },
5264 };
5265
5266diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
5267index a5bc92d..0bb4730 100644
5268--- a/arch/arm/plat-omap/sram.c
5269+++ b/arch/arm/plat-omap/sram.c
5270@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
5271 * Looks like we need to preserve some bootloader code at the
5272 * beginning of SRAM for jumping to flash for reboot to work...
5273 */
5274+ pax_open_kernel();
5275 memset_io(omap_sram_base + omap_sram_skip, 0,
5276 omap_sram_size - omap_sram_skip);
5277+ pax_close_kernel();
5278 }
5279diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
5280index ce6d763..cfea917 100644
5281--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
5282+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
5283@@ -47,7 +47,7 @@ struct samsung_dma_ops {
5284 int (*started)(unsigned ch);
5285 int (*flush)(unsigned ch);
5286 int (*stop)(unsigned ch);
5287-};
5288+} __no_const;
5289
5290 extern void *samsung_dmadev_get_ops(void);
5291 extern void *s3c_dma_get_ops(void);
5292diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
5293index 6389d60..b5d3bdd 100644
5294--- a/arch/arm64/include/asm/barrier.h
5295+++ b/arch/arm64/include/asm/barrier.h
5296@@ -41,7 +41,7 @@
5297 do { \
5298 compiletime_assert_atomic_type(*p); \
5299 barrier(); \
5300- ACCESS_ONCE(*p) = (v); \
5301+ ACCESS_ONCE_RW(*p) = (v); \
5302 } while (0)
5303
5304 #define smp_load_acquire(p) \
5305diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
5306index 3bf8f4e..5dd5491 100644
5307--- a/arch/arm64/include/asm/uaccess.h
5308+++ b/arch/arm64/include/asm/uaccess.h
5309@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
5310 flag; \
5311 })
5312
5313+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5314 #define access_ok(type, addr, size) __range_ok(addr, size)
5315 #define user_addr_max get_fs
5316
5317diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
5318index c3a58a1..78fbf54 100644
5319--- a/arch/avr32/include/asm/cache.h
5320+++ b/arch/avr32/include/asm/cache.h
5321@@ -1,8 +1,10 @@
5322 #ifndef __ASM_AVR32_CACHE_H
5323 #define __ASM_AVR32_CACHE_H
5324
5325+#include <linux/const.h>
5326+
5327 #define L1_CACHE_SHIFT 5
5328-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5329+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5330
5331 /*
5332 * Memory returned by kmalloc() may be used for DMA, so we must make
5333diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
5334index d232888..87c8df1 100644
5335--- a/arch/avr32/include/asm/elf.h
5336+++ b/arch/avr32/include/asm/elf.h
5337@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
5338 the loader. We need to make sure that it is out of the way of the program
5339 that it will "exec", and that there is sufficient room for the brk. */
5340
5341-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
5342+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
5343
5344+#ifdef CONFIG_PAX_ASLR
5345+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
5346+
5347+#define PAX_DELTA_MMAP_LEN 15
5348+#define PAX_DELTA_STACK_LEN 15
5349+#endif
5350
5351 /* This yields a mask that user programs can use to figure out what
5352 instruction set this CPU supports. This could be done in user space,
5353diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
5354index 479330b..53717a8 100644
5355--- a/arch/avr32/include/asm/kmap_types.h
5356+++ b/arch/avr32/include/asm/kmap_types.h
5357@@ -2,9 +2,9 @@
5358 #define __ASM_AVR32_KMAP_TYPES_H
5359
5360 #ifdef CONFIG_DEBUG_HIGHMEM
5361-# define KM_TYPE_NR 29
5362+# define KM_TYPE_NR 30
5363 #else
5364-# define KM_TYPE_NR 14
5365+# define KM_TYPE_NR 15
5366 #endif
5367
5368 #endif /* __ASM_AVR32_KMAP_TYPES_H */
5369diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
5370index 0eca933..eb78c7b 100644
5371--- a/arch/avr32/mm/fault.c
5372+++ b/arch/avr32/mm/fault.c
5373@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
5374
5375 int exception_trace = 1;
5376
5377+#ifdef CONFIG_PAX_PAGEEXEC
5378+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5379+{
5380+ unsigned long i;
5381+
5382+ printk(KERN_ERR "PAX: bytes at PC: ");
5383+ for (i = 0; i < 20; i++) {
5384+ unsigned char c;
5385+ if (get_user(c, (unsigned char *)pc+i))
5386+ printk(KERN_CONT "???????? ");
5387+ else
5388+ printk(KERN_CONT "%02x ", c);
5389+ }
5390+ printk("\n");
5391+}
5392+#endif
5393+
5394 /*
5395 * This routine handles page faults. It determines the address and the
5396 * problem, and then passes it off to one of the appropriate routines.
5397@@ -176,6 +193,16 @@ bad_area:
5398 up_read(&mm->mmap_sem);
5399
5400 if (user_mode(regs)) {
5401+
5402+#ifdef CONFIG_PAX_PAGEEXEC
5403+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
5404+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
5405+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
5406+ do_group_exit(SIGKILL);
5407+ }
5408+ }
5409+#endif
5410+
5411 if (exception_trace && printk_ratelimit())
5412 printk("%s%s[%d]: segfault at %08lx pc %08lx "
5413 "sp %08lx ecr %lu\n",
5414diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
5415index 568885a..f8008df 100644
5416--- a/arch/blackfin/include/asm/cache.h
5417+++ b/arch/blackfin/include/asm/cache.h
5418@@ -7,6 +7,7 @@
5419 #ifndef __ARCH_BLACKFIN_CACHE_H
5420 #define __ARCH_BLACKFIN_CACHE_H
5421
5422+#include <linux/const.h>
5423 #include <linux/linkage.h> /* for asmlinkage */
5424
5425 /*
5426@@ -14,7 +15,7 @@
5427 * Blackfin loads 32 bytes for cache
5428 */
5429 #define L1_CACHE_SHIFT 5
5430-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5431+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5432 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5433
5434 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5435diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
5436index aea2718..3639a60 100644
5437--- a/arch/cris/include/arch-v10/arch/cache.h
5438+++ b/arch/cris/include/arch-v10/arch/cache.h
5439@@ -1,8 +1,9 @@
5440 #ifndef _ASM_ARCH_CACHE_H
5441 #define _ASM_ARCH_CACHE_H
5442
5443+#include <linux/const.h>
5444 /* Etrax 100LX have 32-byte cache-lines. */
5445-#define L1_CACHE_BYTES 32
5446 #define L1_CACHE_SHIFT 5
5447+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5448
5449 #endif /* _ASM_ARCH_CACHE_H */
5450diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
5451index 7caf25d..ee65ac5 100644
5452--- a/arch/cris/include/arch-v32/arch/cache.h
5453+++ b/arch/cris/include/arch-v32/arch/cache.h
5454@@ -1,11 +1,12 @@
5455 #ifndef _ASM_CRIS_ARCH_CACHE_H
5456 #define _ASM_CRIS_ARCH_CACHE_H
5457
5458+#include <linux/const.h>
5459 #include <arch/hwregs/dma.h>
5460
5461 /* A cache-line is 32 bytes. */
5462-#define L1_CACHE_BYTES 32
5463 #define L1_CACHE_SHIFT 5
5464+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5465
5466 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5467
5468diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
5469index f6c3a16..cd422a4 100644
5470--- a/arch/frv/include/asm/atomic.h
5471+++ b/arch/frv/include/asm/atomic.h
5472@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
5473 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
5474 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
5475
5476+#define atomic64_read_unchecked(v) atomic64_read(v)
5477+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5478+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5479+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5480+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5481+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5482+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5483+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5484+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5485+
5486 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5487 {
5488 int c, old;
5489diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
5490index 2797163..c2a401d 100644
5491--- a/arch/frv/include/asm/cache.h
5492+++ b/arch/frv/include/asm/cache.h
5493@@ -12,10 +12,11 @@
5494 #ifndef __ASM_CACHE_H
5495 #define __ASM_CACHE_H
5496
5497+#include <linux/const.h>
5498
5499 /* bytes per L1 cache line */
5500 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
5501-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5502+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5503
5504 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5505 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5506diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
5507index 43901f2..0d8b865 100644
5508--- a/arch/frv/include/asm/kmap_types.h
5509+++ b/arch/frv/include/asm/kmap_types.h
5510@@ -2,6 +2,6 @@
5511 #ifndef _ASM_KMAP_TYPES_H
5512 #define _ASM_KMAP_TYPES_H
5513
5514-#define KM_TYPE_NR 17
5515+#define KM_TYPE_NR 18
5516
5517 #endif
5518diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
5519index 836f147..4cf23f5 100644
5520--- a/arch/frv/mm/elf-fdpic.c
5521+++ b/arch/frv/mm/elf-fdpic.c
5522@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5523 {
5524 struct vm_area_struct *vma;
5525 struct vm_unmapped_area_info info;
5526+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5527
5528 if (len > TASK_SIZE)
5529 return -ENOMEM;
5530@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5531 if (addr) {
5532 addr = PAGE_ALIGN(addr);
5533 vma = find_vma(current->mm, addr);
5534- if (TASK_SIZE - len >= addr &&
5535- (!vma || addr + len <= vma->vm_start))
5536+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5537 goto success;
5538 }
5539
5540@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5541 info.high_limit = (current->mm->start_stack - 0x00200000);
5542 info.align_mask = 0;
5543 info.align_offset = 0;
5544+ info.threadstack_offset = offset;
5545 addr = vm_unmapped_area(&info);
5546 if (!(addr & ~PAGE_MASK))
5547 goto success;
5548diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
5549index f4ca594..adc72fd6 100644
5550--- a/arch/hexagon/include/asm/cache.h
5551+++ b/arch/hexagon/include/asm/cache.h
5552@@ -21,9 +21,11 @@
5553 #ifndef __ASM_CACHE_H
5554 #define __ASM_CACHE_H
5555
5556+#include <linux/const.h>
5557+
5558 /* Bytes per L1 cache line */
5559-#define L1_CACHE_SHIFT (5)
5560-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5561+#define L1_CACHE_SHIFT 5
5562+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5563
5564 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
5565 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
5566diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5567index 2f3abcf..e63c7fa 100644
5568--- a/arch/ia64/Kconfig
5569+++ b/arch/ia64/Kconfig
5570@@ -547,6 +547,7 @@ source "drivers/sn/Kconfig"
5571 config KEXEC
5572 bool "kexec system call"
5573 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5574+ depends on !GRKERNSEC_KMEM
5575 help
5576 kexec is a system call that implements the ability to shutdown your
5577 current kernel, and to start another kernel. It is like a reboot
5578diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5579index 0f8bf48..40ea950 100644
5580--- a/arch/ia64/include/asm/atomic.h
5581+++ b/arch/ia64/include/asm/atomic.h
5582@@ -209,4 +209,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5583 #define atomic64_inc(v) atomic64_add(1, (v))
5584 #define atomic64_dec(v) atomic64_sub(1, (v))
5585
5586+#define atomic64_read_unchecked(v) atomic64_read(v)
5587+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5588+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5589+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5590+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5591+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5592+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5593+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5594+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5595+
5596 #endif /* _ASM_IA64_ATOMIC_H */
5597diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5598index a48957c..e097b56 100644
5599--- a/arch/ia64/include/asm/barrier.h
5600+++ b/arch/ia64/include/asm/barrier.h
5601@@ -67,7 +67,7 @@
5602 do { \
5603 compiletime_assert_atomic_type(*p); \
5604 barrier(); \
5605- ACCESS_ONCE(*p) = (v); \
5606+ ACCESS_ONCE_RW(*p) = (v); \
5607 } while (0)
5608
5609 #define smp_load_acquire(p) \
5610diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5611index 988254a..e1ee885 100644
5612--- a/arch/ia64/include/asm/cache.h
5613+++ b/arch/ia64/include/asm/cache.h
5614@@ -1,6 +1,7 @@
5615 #ifndef _ASM_IA64_CACHE_H
5616 #define _ASM_IA64_CACHE_H
5617
5618+#include <linux/const.h>
5619
5620 /*
5621 * Copyright (C) 1998-2000 Hewlett-Packard Co
5622@@ -9,7 +10,7 @@
5623
5624 /* Bytes per L1 (data) cache line. */
5625 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5626-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5627+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5628
5629 #ifdef CONFIG_SMP
5630 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5631diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5632index 5a83c5c..4d7f553 100644
5633--- a/arch/ia64/include/asm/elf.h
5634+++ b/arch/ia64/include/asm/elf.h
5635@@ -42,6 +42,13 @@
5636 */
5637 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5638
5639+#ifdef CONFIG_PAX_ASLR
5640+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5641+
5642+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5643+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5644+#endif
5645+
5646 #define PT_IA_64_UNWIND 0x70000001
5647
5648 /* IA-64 relocations: */
5649diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5650index 5767cdf..7462574 100644
5651--- a/arch/ia64/include/asm/pgalloc.h
5652+++ b/arch/ia64/include/asm/pgalloc.h
5653@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5654 pgd_val(*pgd_entry) = __pa(pud);
5655 }
5656
5657+static inline void
5658+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5659+{
5660+ pgd_populate(mm, pgd_entry, pud);
5661+}
5662+
5663 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5664 {
5665 return quicklist_alloc(0, GFP_KERNEL, NULL);
5666@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5667 pud_val(*pud_entry) = __pa(pmd);
5668 }
5669
5670+static inline void
5671+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5672+{
5673+ pud_populate(mm, pud_entry, pmd);
5674+}
5675+
5676 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5677 {
5678 return quicklist_alloc(0, GFP_KERNEL, NULL);
5679diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5680index 7935115..c0eca6a 100644
5681--- a/arch/ia64/include/asm/pgtable.h
5682+++ b/arch/ia64/include/asm/pgtable.h
5683@@ -12,7 +12,7 @@
5684 * David Mosberger-Tang <davidm@hpl.hp.com>
5685 */
5686
5687-
5688+#include <linux/const.h>
5689 #include <asm/mman.h>
5690 #include <asm/page.h>
5691 #include <asm/processor.h>
5692@@ -142,6 +142,17 @@
5693 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5694 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5695 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5696+
5697+#ifdef CONFIG_PAX_PAGEEXEC
5698+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5699+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5700+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5701+#else
5702+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5703+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5704+# define PAGE_COPY_NOEXEC PAGE_COPY
5705+#endif
5706+
5707 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5708 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5709 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5710diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5711index 45698cd..e8e2dbc 100644
5712--- a/arch/ia64/include/asm/spinlock.h
5713+++ b/arch/ia64/include/asm/spinlock.h
5714@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5715 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5716
5717 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5718- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5719+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5720 }
5721
5722 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5723diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5724index 449c8c0..3d4b1e9 100644
5725--- a/arch/ia64/include/asm/uaccess.h
5726+++ b/arch/ia64/include/asm/uaccess.h
5727@@ -70,6 +70,7 @@
5728 && ((segment).seg == KERNEL_DS.seg \
5729 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5730 })
5731+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5732 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5733
5734 /*
5735@@ -240,12 +241,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5736 static inline unsigned long
5737 __copy_to_user (void __user *to, const void *from, unsigned long count)
5738 {
5739+ if (count > INT_MAX)
5740+ return count;
5741+
5742+ if (!__builtin_constant_p(count))
5743+ check_object_size(from, count, true);
5744+
5745 return __copy_user(to, (__force void __user *) from, count);
5746 }
5747
5748 static inline unsigned long
5749 __copy_from_user (void *to, const void __user *from, unsigned long count)
5750 {
5751+ if (count > INT_MAX)
5752+ return count;
5753+
5754+ if (!__builtin_constant_p(count))
5755+ check_object_size(to, count, false);
5756+
5757 return __copy_user((__force void __user *) to, from, count);
5758 }
5759
5760@@ -255,10 +268,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5761 ({ \
5762 void __user *__cu_to = (to); \
5763 const void *__cu_from = (from); \
5764- long __cu_len = (n); \
5765+ unsigned long __cu_len = (n); \
5766 \
5767- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5768+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5769+ if (!__builtin_constant_p(n)) \
5770+ check_object_size(__cu_from, __cu_len, true); \
5771 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5772+ } \
5773 __cu_len; \
5774 })
5775
5776@@ -266,11 +282,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5777 ({ \
5778 void *__cu_to = (to); \
5779 const void __user *__cu_from = (from); \
5780- long __cu_len = (n); \
5781+ unsigned long __cu_len = (n); \
5782 \
5783 __chk_user_ptr(__cu_from); \
5784- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5785+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5786+ if (!__builtin_constant_p(n)) \
5787+ check_object_size(__cu_to, __cu_len, false); \
5788 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5789+ } \
5790 __cu_len; \
5791 })
5792
5793diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5794index 24603be..948052d 100644
5795--- a/arch/ia64/kernel/module.c
5796+++ b/arch/ia64/kernel/module.c
5797@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
5798 void
5799 module_free (struct module *mod, void *module_region)
5800 {
5801- if (mod && mod->arch.init_unw_table &&
5802- module_region == mod->module_init) {
5803+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
5804 unw_remove_unwind_table(mod->arch.init_unw_table);
5805 mod->arch.init_unw_table = NULL;
5806 }
5807@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5808 }
5809
5810 static inline int
5811+in_init_rx (const struct module *mod, uint64_t addr)
5812+{
5813+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5814+}
5815+
5816+static inline int
5817+in_init_rw (const struct module *mod, uint64_t addr)
5818+{
5819+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5820+}
5821+
5822+static inline int
5823 in_init (const struct module *mod, uint64_t addr)
5824 {
5825- return addr - (uint64_t) mod->module_init < mod->init_size;
5826+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5827+}
5828+
5829+static inline int
5830+in_core_rx (const struct module *mod, uint64_t addr)
5831+{
5832+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5833+}
5834+
5835+static inline int
5836+in_core_rw (const struct module *mod, uint64_t addr)
5837+{
5838+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5839 }
5840
5841 static inline int
5842 in_core (const struct module *mod, uint64_t addr)
5843 {
5844- return addr - (uint64_t) mod->module_core < mod->core_size;
5845+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5846 }
5847
5848 static inline int
5849@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5850 break;
5851
5852 case RV_BDREL:
5853- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5854+ if (in_init_rx(mod, val))
5855+ val -= (uint64_t) mod->module_init_rx;
5856+ else if (in_init_rw(mod, val))
5857+ val -= (uint64_t) mod->module_init_rw;
5858+ else if (in_core_rx(mod, val))
5859+ val -= (uint64_t) mod->module_core_rx;
5860+ else if (in_core_rw(mod, val))
5861+ val -= (uint64_t) mod->module_core_rw;
5862 break;
5863
5864 case RV_LTV:
5865@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5866 * addresses have been selected...
5867 */
5868 uint64_t gp;
5869- if (mod->core_size > MAX_LTOFF)
5870+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5871 /*
5872 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5873 * at the end of the module.
5874 */
5875- gp = mod->core_size - MAX_LTOFF / 2;
5876+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5877 else
5878- gp = mod->core_size / 2;
5879- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5880+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5881+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5882 mod->arch.gp = gp;
5883 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5884 }
5885diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5886index c39c3cd..3c77738 100644
5887--- a/arch/ia64/kernel/palinfo.c
5888+++ b/arch/ia64/kernel/palinfo.c
5889@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5890 return NOTIFY_OK;
5891 }
5892
5893-static struct notifier_block __refdata palinfo_cpu_notifier =
5894+static struct notifier_block palinfo_cpu_notifier =
5895 {
5896 .notifier_call = palinfo_cpu_callback,
5897 .priority = 0,
5898diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5899index 41e33f8..65180b2a 100644
5900--- a/arch/ia64/kernel/sys_ia64.c
5901+++ b/arch/ia64/kernel/sys_ia64.c
5902@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5903 unsigned long align_mask = 0;
5904 struct mm_struct *mm = current->mm;
5905 struct vm_unmapped_area_info info;
5906+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5907
5908 if (len > RGN_MAP_LIMIT)
5909 return -ENOMEM;
5910@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5911 if (REGION_NUMBER(addr) == RGN_HPAGE)
5912 addr = 0;
5913 #endif
5914+
5915+#ifdef CONFIG_PAX_RANDMMAP
5916+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5917+ addr = mm->free_area_cache;
5918+ else
5919+#endif
5920+
5921 if (!addr)
5922 addr = TASK_UNMAPPED_BASE;
5923
5924@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5925 info.high_limit = TASK_SIZE;
5926 info.align_mask = align_mask;
5927 info.align_offset = 0;
5928+ info.threadstack_offset = offset;
5929 return vm_unmapped_area(&info);
5930 }
5931
5932diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5933index 84f8a52..7c76178 100644
5934--- a/arch/ia64/kernel/vmlinux.lds.S
5935+++ b/arch/ia64/kernel/vmlinux.lds.S
5936@@ -192,7 +192,7 @@ SECTIONS {
5937 /* Per-cpu data: */
5938 . = ALIGN(PERCPU_PAGE_SIZE);
5939 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5940- __phys_per_cpu_start = __per_cpu_load;
5941+ __phys_per_cpu_start = per_cpu_load;
5942 /*
5943 * ensure percpu data fits
5944 * into percpu page size
5945diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5946index 7225dad..2a7c8256 100644
5947--- a/arch/ia64/mm/fault.c
5948+++ b/arch/ia64/mm/fault.c
5949@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5950 return pte_present(pte);
5951 }
5952
5953+#ifdef CONFIG_PAX_PAGEEXEC
5954+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5955+{
5956+ unsigned long i;
5957+
5958+ printk(KERN_ERR "PAX: bytes at PC: ");
5959+ for (i = 0; i < 8; i++) {
5960+ unsigned int c;
5961+ if (get_user(c, (unsigned int *)pc+i))
5962+ printk(KERN_CONT "???????? ");
5963+ else
5964+ printk(KERN_CONT "%08x ", c);
5965+ }
5966+ printk("\n");
5967+}
5968+#endif
5969+
5970 # define VM_READ_BIT 0
5971 # define VM_WRITE_BIT 1
5972 # define VM_EXEC_BIT 2
5973@@ -151,8 +168,21 @@ retry:
5974 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5975 goto bad_area;
5976
5977- if ((vma->vm_flags & mask) != mask)
5978+ if ((vma->vm_flags & mask) != mask) {
5979+
5980+#ifdef CONFIG_PAX_PAGEEXEC
5981+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5982+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5983+ goto bad_area;
5984+
5985+ up_read(&mm->mmap_sem);
5986+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5987+ do_group_exit(SIGKILL);
5988+ }
5989+#endif
5990+
5991 goto bad_area;
5992+ }
5993
5994 /*
5995 * If for any reason at all we couldn't handle the fault, make
5996diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5997index 76069c1..c2aa816 100644
5998--- a/arch/ia64/mm/hugetlbpage.c
5999+++ b/arch/ia64/mm/hugetlbpage.c
6000@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
6001 unsigned long pgoff, unsigned long flags)
6002 {
6003 struct vm_unmapped_area_info info;
6004+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
6005
6006 if (len > RGN_MAP_LIMIT)
6007 return -ENOMEM;
6008@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
6009 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
6010 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
6011 info.align_offset = 0;
6012+ info.threadstack_offset = offset;
6013 return vm_unmapped_area(&info);
6014 }
6015
6016diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
6017index 25c3502..560dae7 100644
6018--- a/arch/ia64/mm/init.c
6019+++ b/arch/ia64/mm/init.c
6020@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
6021 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
6022 vma->vm_end = vma->vm_start + PAGE_SIZE;
6023 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
6024+
6025+#ifdef CONFIG_PAX_PAGEEXEC
6026+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
6027+ vma->vm_flags &= ~VM_EXEC;
6028+
6029+#ifdef CONFIG_PAX_MPROTECT
6030+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
6031+ vma->vm_flags &= ~VM_MAYEXEC;
6032+#endif
6033+
6034+ }
6035+#endif
6036+
6037 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6038 down_write(&current->mm->mmap_sem);
6039 if (insert_vm_struct(current->mm, vma)) {
6040diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
6041index 40b3ee9..8c2c112 100644
6042--- a/arch/m32r/include/asm/cache.h
6043+++ b/arch/m32r/include/asm/cache.h
6044@@ -1,8 +1,10 @@
6045 #ifndef _ASM_M32R_CACHE_H
6046 #define _ASM_M32R_CACHE_H
6047
6048+#include <linux/const.h>
6049+
6050 /* L1 cache line size */
6051 #define L1_CACHE_SHIFT 4
6052-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6053+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6054
6055 #endif /* _ASM_M32R_CACHE_H */
6056diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
6057index 82abd15..d95ae5d 100644
6058--- a/arch/m32r/lib/usercopy.c
6059+++ b/arch/m32r/lib/usercopy.c
6060@@ -14,6 +14,9 @@
6061 unsigned long
6062 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
6063 {
6064+ if ((long)n < 0)
6065+ return n;
6066+
6067 prefetch(from);
6068 if (access_ok(VERIFY_WRITE, to, n))
6069 __copy_user(to,from,n);
6070@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
6071 unsigned long
6072 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
6073 {
6074+ if ((long)n < 0)
6075+ return n;
6076+
6077 prefetchw(to);
6078 if (access_ok(VERIFY_READ, from, n))
6079 __copy_user_zeroing(to,from,n);
6080diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
6081index 0395c51..5f26031 100644
6082--- a/arch/m68k/include/asm/cache.h
6083+++ b/arch/m68k/include/asm/cache.h
6084@@ -4,9 +4,11 @@
6085 #ifndef __ARCH_M68K_CACHE_H
6086 #define __ARCH_M68K_CACHE_H
6087
6088+#include <linux/const.h>
6089+
6090 /* bytes per L1 cache line */
6091 #define L1_CACHE_SHIFT 4
6092-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
6093+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6094
6095 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
6096
6097diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
6098index c7591e8..ecef036 100644
6099--- a/arch/metag/include/asm/barrier.h
6100+++ b/arch/metag/include/asm/barrier.h
6101@@ -89,7 +89,7 @@ static inline void fence(void)
6102 do { \
6103 compiletime_assert_atomic_type(*p); \
6104 smp_mb(); \
6105- ACCESS_ONCE(*p) = (v); \
6106+ ACCESS_ONCE_RW(*p) = (v); \
6107 } while (0)
6108
6109 #define smp_load_acquire(p) \
6110diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
6111index 3c52fa6..11b2ad8 100644
6112--- a/arch/metag/mm/hugetlbpage.c
6113+++ b/arch/metag/mm/hugetlbpage.c
6114@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
6115 info.high_limit = TASK_SIZE;
6116 info.align_mask = PAGE_MASK & HUGEPT_MASK;
6117 info.align_offset = 0;
6118+ info.threadstack_offset = 0;
6119 return vm_unmapped_area(&info);
6120 }
6121
6122diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
6123index 4efe96a..60e8699 100644
6124--- a/arch/microblaze/include/asm/cache.h
6125+++ b/arch/microblaze/include/asm/cache.h
6126@@ -13,11 +13,12 @@
6127 #ifndef _ASM_MICROBLAZE_CACHE_H
6128 #define _ASM_MICROBLAZE_CACHE_H
6129
6130+#include <linux/const.h>
6131 #include <asm/registers.h>
6132
6133 #define L1_CACHE_SHIFT 5
6134 /* word-granular cache in microblaze */
6135-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6136+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6137
6138 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6139
6140diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
6141index 4e238e6..7c9ed92 100644
6142--- a/arch/mips/Kconfig
6143+++ b/arch/mips/Kconfig
6144@@ -2392,6 +2392,7 @@ source "kernel/Kconfig.preempt"
6145
6146 config KEXEC
6147 bool "Kexec system call"
6148+ depends on !GRKERNSEC_KMEM
6149 help
6150 kexec is a system call that implements the ability to shutdown your
6151 current kernel, and to start another kernel. It is like a reboot
6152diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
6153index 02f2444..506969c 100644
6154--- a/arch/mips/cavium-octeon/dma-octeon.c
6155+++ b/arch/mips/cavium-octeon/dma-octeon.c
6156@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
6157 if (dma_release_from_coherent(dev, order, vaddr))
6158 return;
6159
6160- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
6161+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
6162 }
6163
6164 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
6165diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
6166index 37b2bef..02122b8 100644
6167--- a/arch/mips/include/asm/atomic.h
6168+++ b/arch/mips/include/asm/atomic.h
6169@@ -21,15 +21,39 @@
6170 #include <asm/cmpxchg.h>
6171 #include <asm/war.h>
6172
6173+#ifdef CONFIG_GENERIC_ATOMIC64
6174+#include <asm-generic/atomic64.h>
6175+#endif
6176+
6177 #define ATOMIC_INIT(i) { (i) }
6178
6179+#ifdef CONFIG_64BIT
6180+#define _ASM_EXTABLE(from, to) \
6181+" .section __ex_table,\"a\"\n" \
6182+" .dword " #from ", " #to"\n" \
6183+" .previous\n"
6184+#else
6185+#define _ASM_EXTABLE(from, to) \
6186+" .section __ex_table,\"a\"\n" \
6187+" .word " #from ", " #to"\n" \
6188+" .previous\n"
6189+#endif
6190+
6191 /*
6192 * atomic_read - read atomic variable
6193 * @v: pointer of type atomic_t
6194 *
6195 * Atomically reads the value of @v.
6196 */
6197-#define atomic_read(v) (*(volatile int *)&(v)->counter)
6198+static inline int atomic_read(const atomic_t *v)
6199+{
6200+ return (*(volatile const int *) &v->counter);
6201+}
6202+
6203+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6204+{
6205+ return (*(volatile const int *) &v->counter);
6206+}
6207
6208 /*
6209 * atomic_set - set atomic variable
6210@@ -38,7 +62,15 @@
6211 *
6212 * Atomically sets the value of @v to @i.
6213 */
6214-#define atomic_set(v, i) ((v)->counter = (i))
6215+static inline void atomic_set(atomic_t *v, int i)
6216+{
6217+ v->counter = i;
6218+}
6219+
6220+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6221+{
6222+ v->counter = i;
6223+}
6224
6225 /*
6226 * atomic_add - add integer to atomic variable
6227@@ -47,7 +79,67 @@
6228 *
6229 * Atomically adds @i to @v.
6230 */
6231-static __inline__ void atomic_add(int i, atomic_t * v)
6232+static __inline__ void atomic_add(int i, atomic_t *v)
6233+{
6234+ int temp;
6235+
6236+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6237+ __asm__ __volatile__(
6238+ " .set mips3 \n"
6239+ "1: ll %0, %1 # atomic_add \n"
6240+#ifdef CONFIG_PAX_REFCOUNT
6241+ /* Exception on overflow. */
6242+ "2: add %0, %2 \n"
6243+#else
6244+ " addu %0, %2 \n"
6245+#endif
6246+ " sc %0, %1 \n"
6247+ " beqzl %0, 1b \n"
6248+#ifdef CONFIG_PAX_REFCOUNT
6249+ "3: \n"
6250+ _ASM_EXTABLE(2b, 3b)
6251+#endif
6252+ " .set mips0 \n"
6253+ : "=&r" (temp), "+m" (v->counter)
6254+ : "Ir" (i));
6255+ } else if (kernel_uses_llsc) {
6256+ __asm__ __volatile__(
6257+ " .set mips3 \n"
6258+ "1: ll %0, %1 # atomic_add \n"
6259+#ifdef CONFIG_PAX_REFCOUNT
6260+ /* Exception on overflow. */
6261+ "2: add %0, %2 \n"
6262+#else
6263+ " addu %0, %2 \n"
6264+#endif
6265+ " sc %0, %1 \n"
6266+ " beqz %0, 1b \n"
6267+#ifdef CONFIG_PAX_REFCOUNT
6268+ "3: \n"
6269+ _ASM_EXTABLE(2b, 3b)
6270+#endif
6271+ " .set mips0 \n"
6272+ : "=&r" (temp), "+m" (v->counter)
6273+ : "Ir" (i));
6274+ } else {
6275+ unsigned long flags;
6276+
6277+ raw_local_irq_save(flags);
6278+ __asm__ __volatile__(
6279+#ifdef CONFIG_PAX_REFCOUNT
6280+ /* Exception on overflow. */
6281+ "1: add %0, %1 \n"
6282+ "2: \n"
6283+ _ASM_EXTABLE(1b, 2b)
6284+#else
6285+ " addu %0, %1 \n"
6286+#endif
6287+ : "+r" (v->counter) : "Ir" (i));
6288+ raw_local_irq_restore(flags);
6289+ }
6290+}
6291+
6292+static __inline__ void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6293 {
6294 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6295 int temp;
6296@@ -90,7 +182,67 @@ static __inline__ void atomic_add(int i, atomic_t * v)
6297 *
6298 * Atomically subtracts @i from @v.
6299 */
6300-static __inline__ void atomic_sub(int i, atomic_t * v)
6301+static __inline__ void atomic_sub(int i, atomic_t *v)
6302+{
6303+ int temp;
6304+
6305+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6306+ __asm__ __volatile__(
6307+ " .set mips3 \n"
6308+ "1: ll %0, %1 # atomic64_sub \n"
6309+#ifdef CONFIG_PAX_REFCOUNT
6310+ /* Exception on overflow. */
6311+ "2: sub %0, %2 \n"
6312+#else
6313+ " subu %0, %2 \n"
6314+#endif
6315+ " sc %0, %1 \n"
6316+ " beqzl %0, 1b \n"
6317+#ifdef CONFIG_PAX_REFCOUNT
6318+ "3: \n"
6319+ _ASM_EXTABLE(2b, 3b)
6320+#endif
6321+ " .set mips0 \n"
6322+ : "=&r" (temp), "+m" (v->counter)
6323+ : "Ir" (i));
6324+ } else if (kernel_uses_llsc) {
6325+ __asm__ __volatile__(
6326+ " .set mips3 \n"
6327+ "1: ll %0, %1 # atomic64_sub \n"
6328+#ifdef CONFIG_PAX_REFCOUNT
6329+ /* Exception on overflow. */
6330+ "2: sub %0, %2 \n"
6331+#else
6332+ " subu %0, %2 \n"
6333+#endif
6334+ " sc %0, %1 \n"
6335+ " beqz %0, 1b \n"
6336+#ifdef CONFIG_PAX_REFCOUNT
6337+ "3: \n"
6338+ _ASM_EXTABLE(2b, 3b)
6339+#endif
6340+ " .set mips0 \n"
6341+ : "=&r" (temp), "+m" (v->counter)
6342+ : "Ir" (i));
6343+ } else {
6344+ unsigned long flags;
6345+
6346+ raw_local_irq_save(flags);
6347+ __asm__ __volatile__(
6348+#ifdef CONFIG_PAX_REFCOUNT
6349+ /* Exception on overflow. */
6350+ "1: sub %0, %1 \n"
6351+ "2: \n"
6352+ _ASM_EXTABLE(1b, 2b)
6353+#else
6354+ " subu %0, %1 \n"
6355+#endif
6356+ : "+r" (v->counter) : "Ir" (i));
6357+ raw_local_irq_restore(flags);
6358+ }
6359+}
6360+
6361+static __inline__ void atomic_sub_unchecked(long i, atomic_unchecked_t *v)
6362 {
6363 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6364 int temp;
6365@@ -129,7 +281,93 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
6366 /*
6367 * Same as above, but return the result value
6368 */
6369-static __inline__ int atomic_add_return(int i, atomic_t * v)
6370+static __inline__ int atomic_add_return(int i, atomic_t *v)
6371+{
6372+ int result;
6373+ int temp;
6374+
6375+ smp_mb__before_llsc();
6376+
6377+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6378+ __asm__ __volatile__(
6379+ " .set mips3 \n"
6380+ "1: ll %1, %2 # atomic_add_return \n"
6381+#ifdef CONFIG_PAX_REFCOUNT
6382+ "2: add %0, %1, %3 \n"
6383+#else
6384+ " addu %0, %1, %3 \n"
6385+#endif
6386+ " sc %0, %2 \n"
6387+ " beqzl %0, 1b \n"
6388+#ifdef CONFIG_PAX_REFCOUNT
6389+ " b 4f \n"
6390+ " .set noreorder \n"
6391+ "3: b 5f \n"
6392+ " move %0, %1 \n"
6393+ " .set reorder \n"
6394+ _ASM_EXTABLE(2b, 3b)
6395+#endif
6396+ "4: addu %0, %1, %3 \n"
6397+#ifdef CONFIG_PAX_REFCOUNT
6398+ "5: \n"
6399+#endif
6400+ " .set mips0 \n"
6401+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6402+ : "Ir" (i));
6403+ } else if (kernel_uses_llsc) {
6404+ __asm__ __volatile__(
6405+ " .set mips3 \n"
6406+ "1: ll %1, %2 # atomic_add_return \n"
6407+#ifdef CONFIG_PAX_REFCOUNT
6408+ "2: add %0, %1, %3 \n"
6409+#else
6410+ " addu %0, %1, %3 \n"
6411+#endif
6412+ " sc %0, %2 \n"
6413+ " bnez %0, 4f \n"
6414+ " b 1b \n"
6415+#ifdef CONFIG_PAX_REFCOUNT
6416+ " .set noreorder \n"
6417+ "3: b 5f \n"
6418+ " move %0, %1 \n"
6419+ " .set reorder \n"
6420+ _ASM_EXTABLE(2b, 3b)
6421+#endif
6422+ "4: addu %0, %1, %3 \n"
6423+#ifdef CONFIG_PAX_REFCOUNT
6424+ "5: \n"
6425+#endif
6426+ " .set mips0 \n"
6427+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6428+ : "Ir" (i));
6429+ } else {
6430+ unsigned long flags;
6431+
6432+ raw_local_irq_save(flags);
6433+ __asm__ __volatile__(
6434+ " lw %0, %1 \n"
6435+#ifdef CONFIG_PAX_REFCOUNT
6436+ /* Exception on overflow. */
6437+ "1: add %0, %2 \n"
6438+#else
6439+ " addu %0, %2 \n"
6440+#endif
6441+ " sw %0, %1 \n"
6442+#ifdef CONFIG_PAX_REFCOUNT
6443+ /* Note: Dest reg is not modified on overflow */
6444+ "2: \n"
6445+ _ASM_EXTABLE(1b, 2b)
6446+#endif
6447+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6448+ raw_local_irq_restore(flags);
6449+ }
6450+
6451+ smp_llsc_mb();
6452+
6453+ return result;
6454+}
6455+
6456+static __inline__ int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6457 {
6458 int result;
6459
6460@@ -178,7 +416,93 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
6461 return result;
6462 }
6463
6464-static __inline__ int atomic_sub_return(int i, atomic_t * v)
6465+static __inline__ int atomic_sub_return(int i, atomic_t *v)
6466+{
6467+ int result;
6468+ int temp;
6469+
6470+ smp_mb__before_llsc();
6471+
6472+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6473+ __asm__ __volatile__(
6474+ " .set mips3 \n"
6475+ "1: ll %1, %2 # atomic_sub_return \n"
6476+#ifdef CONFIG_PAX_REFCOUNT
6477+ "2: sub %0, %1, %3 \n"
6478+#else
6479+ " subu %0, %1, %3 \n"
6480+#endif
6481+ " sc %0, %2 \n"
6482+ " beqzl %0, 1b \n"
6483+#ifdef CONFIG_PAX_REFCOUNT
6484+ " b 4f \n"
6485+ " .set noreorder \n"
6486+ "3: b 5f \n"
6487+ " move %0, %1 \n"
6488+ " .set reorder \n"
6489+ _ASM_EXTABLE(2b, 3b)
6490+#endif
6491+ "4: subu %0, %1, %3 \n"
6492+#ifdef CONFIG_PAX_REFCOUNT
6493+ "5: \n"
6494+#endif
6495+ " .set mips0 \n"
6496+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6497+ : "Ir" (i), "m" (v->counter)
6498+ : "memory");
6499+ } else if (kernel_uses_llsc) {
6500+ __asm__ __volatile__(
6501+ " .set mips3 \n"
6502+ "1: ll %1, %2 # atomic_sub_return \n"
6503+#ifdef CONFIG_PAX_REFCOUNT
6504+ "2: sub %0, %1, %3 \n"
6505+#else
6506+ " subu %0, %1, %3 \n"
6507+#endif
6508+ " sc %0, %2 \n"
6509+ " bnez %0, 4f \n"
6510+ " b 1b \n"
6511+#ifdef CONFIG_PAX_REFCOUNT
6512+ " .set noreorder \n"
6513+ "3: b 5f \n"
6514+ " move %0, %1 \n"
6515+ " .set reorder \n"
6516+ _ASM_EXTABLE(2b, 3b)
6517+#endif
6518+ "4: subu %0, %1, %3 \n"
6519+#ifdef CONFIG_PAX_REFCOUNT
6520+ "5: \n"
6521+#endif
6522+ " .set mips0 \n"
6523+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6524+ : "Ir" (i));
6525+ } else {
6526+ unsigned long flags;
6527+
6528+ raw_local_irq_save(flags);
6529+ __asm__ __volatile__(
6530+ " lw %0, %1 \n"
6531+#ifdef CONFIG_PAX_REFCOUNT
6532+ /* Exception on overflow. */
6533+ "1: sub %0, %2 \n"
6534+#else
6535+ " subu %0, %2 \n"
6536+#endif
6537+ " sw %0, %1 \n"
6538+#ifdef CONFIG_PAX_REFCOUNT
6539+ /* Note: Dest reg is not modified on overflow */
6540+ "2: \n"
6541+ _ASM_EXTABLE(1b, 2b)
6542+#endif
6543+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6544+ raw_local_irq_restore(flags);
6545+ }
6546+
6547+ smp_llsc_mb();
6548+
6549+ return result;
6550+}
6551+static __inline__ int atomic_sub_return_unchecked(int i, atomic_unchecked_t *v)
6552 {
6553 int result;
6554
6555@@ -238,7 +562,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
6556 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6557 * The function returns the old value of @v minus @i.
6558 */
6559-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6560+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
6561 {
6562 int result;
6563
6564@@ -295,8 +619,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6565 return result;
6566 }
6567
6568-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
6569-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
6570+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6571+{
6572+ return cmpxchg(&v->counter, old, new);
6573+}
6574+
6575+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
6576+ int new)
6577+{
6578+ return cmpxchg(&(v->counter), old, new);
6579+}
6580+
6581+static inline int atomic_xchg(atomic_t *v, int new)
6582+{
6583+ return xchg(&v->counter, new);
6584+}
6585+
6586+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6587+{
6588+ return xchg(&(v->counter), new);
6589+}
6590
6591 /**
6592 * __atomic_add_unless - add unless the number is a given value
6593@@ -324,6 +666,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6594
6595 #define atomic_dec_return(v) atomic_sub_return(1, (v))
6596 #define atomic_inc_return(v) atomic_add_return(1, (v))
6597+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6598+{
6599+ return atomic_add_return_unchecked(1, v);
6600+}
6601
6602 /*
6603 * atomic_sub_and_test - subtract value from variable and test result
6604@@ -345,6 +691,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6605 * other cases.
6606 */
6607 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
6608+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6609+{
6610+ return atomic_add_return_unchecked(1, v) == 0;
6611+}
6612
6613 /*
6614 * atomic_dec_and_test - decrement by 1 and test
6615@@ -369,6 +719,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6616 * Atomically increments @v by 1.
6617 */
6618 #define atomic_inc(v) atomic_add(1, (v))
6619+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
6620+{
6621+ atomic_add_unchecked(1, v);
6622+}
6623
6624 /*
6625 * atomic_dec - decrement and test
6626@@ -377,6 +731,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6627 * Atomically decrements @v by 1.
6628 */
6629 #define atomic_dec(v) atomic_sub(1, (v))
6630+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6631+{
6632+ atomic_sub_unchecked(1, v);
6633+}
6634
6635 /*
6636 * atomic_add_negative - add and test if negative
6637@@ -398,14 +756,30 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6638 * @v: pointer of type atomic64_t
6639 *
6640 */
6641-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
6642+static inline long atomic64_read(const atomic64_t *v)
6643+{
6644+ return (*(volatile const long *) &v->counter);
6645+}
6646+
6647+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6648+{
6649+ return (*(volatile const long *) &v->counter);
6650+}
6651
6652 /*
6653 * atomic64_set - set atomic variable
6654 * @v: pointer of type atomic64_t
6655 * @i: required value
6656 */
6657-#define atomic64_set(v, i) ((v)->counter = (i))
6658+static inline void atomic64_set(atomic64_t *v, long i)
6659+{
6660+ v->counter = i;
6661+}
6662+
6663+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6664+{
6665+ v->counter = i;
6666+}
6667
6668 /*
6669 * atomic64_add - add integer to atomic variable
6670@@ -414,7 +788,66 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6671 *
6672 * Atomically adds @i to @v.
6673 */
6674-static __inline__ void atomic64_add(long i, atomic64_t * v)
6675+static __inline__ void atomic64_add(long i, atomic64_t *v)
6676+{
6677+ long temp;
6678+
6679+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6680+ __asm__ __volatile__(
6681+ " .set mips3 \n"
6682+ "1: lld %0, %1 # atomic64_add \n"
6683+#ifdef CONFIG_PAX_REFCOUNT
6684+ /* Exception on overflow. */
6685+ "2: dadd %0, %2 \n"
6686+#else
6687+ " daddu %0, %2 \n"
6688+#endif
6689+ " scd %0, %1 \n"
6690+ " beqzl %0, 1b \n"
6691+#ifdef CONFIG_PAX_REFCOUNT
6692+ "3: \n"
6693+ _ASM_EXTABLE(2b, 3b)
6694+#endif
6695+ " .set mips0 \n"
6696+ : "=&r" (temp), "+m" (v->counter)
6697+ : "Ir" (i));
6698+ } else if (kernel_uses_llsc) {
6699+ __asm__ __volatile__(
6700+ " .set mips3 \n"
6701+ "1: lld %0, %1 # atomic64_add \n"
6702+#ifdef CONFIG_PAX_REFCOUNT
6703+ /* Exception on overflow. */
6704+ "2: dadd %0, %2 \n"
6705+#else
6706+ " daddu %0, %2 \n"
6707+#endif
6708+ " scd %0, %1 \n"
6709+ " beqz %0, 1b \n"
6710+#ifdef CONFIG_PAX_REFCOUNT
6711+ "3: \n"
6712+ _ASM_EXTABLE(2b, 3b)
6713+#endif
6714+ " .set mips0 \n"
6715+ : "=&r" (temp), "+m" (v->counter)
6716+ : "Ir" (i));
6717+ } else {
6718+ unsigned long flags;
6719+
6720+ raw_local_irq_save(flags);
6721+ __asm__ __volatile__(
6722+#ifdef CONFIG_PAX_REFCOUNT
6723+ /* Exception on overflow. */
6724+ "1: dadd %0, %1 \n"
6725+ "2: \n"
6726+ _ASM_EXTABLE(1b, 2b)
6727+#else
6728+ " daddu %0, %1 \n"
6729+#endif
6730+ : "+r" (v->counter) : "Ir" (i));
6731+ raw_local_irq_restore(flags);
6732+ }
6733+}
6734+static __inline__ void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6735 {
6736 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6737 long temp;
6738@@ -457,7 +890,67 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
6739 *
6740 * Atomically subtracts @i from @v.
6741 */
6742-static __inline__ void atomic64_sub(long i, atomic64_t * v)
6743+static __inline__ void atomic64_sub(long i, atomic64_t *v)
6744+{
6745+ long temp;
6746+
6747+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6748+ __asm__ __volatile__(
6749+ " .set mips3 \n"
6750+ "1: lld %0, %1 # atomic64_sub \n"
6751+#ifdef CONFIG_PAX_REFCOUNT
6752+ /* Exception on overflow. */
6753+ "2: dsub %0, %2 \n"
6754+#else
6755+ " dsubu %0, %2 \n"
6756+#endif
6757+ " scd %0, %1 \n"
6758+ " beqzl %0, 1b \n"
6759+#ifdef CONFIG_PAX_REFCOUNT
6760+ "3: \n"
6761+ _ASM_EXTABLE(2b, 3b)
6762+#endif
6763+ " .set mips0 \n"
6764+ : "=&r" (temp), "+m" (v->counter)
6765+ : "Ir" (i));
6766+ } else if (kernel_uses_llsc) {
6767+ __asm__ __volatile__(
6768+ " .set mips3 \n"
6769+ "1: lld %0, %1 # atomic64_sub \n"
6770+#ifdef CONFIG_PAX_REFCOUNT
6771+ /* Exception on overflow. */
6772+ "2: dsub %0, %2 \n"
6773+#else
6774+ " dsubu %0, %2 \n"
6775+#endif
6776+ " scd %0, %1 \n"
6777+ " beqz %0, 1b \n"
6778+#ifdef CONFIG_PAX_REFCOUNT
6779+ "3: \n"
6780+ _ASM_EXTABLE(2b, 3b)
6781+#endif
6782+ " .set mips0 \n"
6783+ : "=&r" (temp), "+m" (v->counter)
6784+ : "Ir" (i));
6785+ } else {
6786+ unsigned long flags;
6787+
6788+ raw_local_irq_save(flags);
6789+ __asm__ __volatile__(
6790+#ifdef CONFIG_PAX_REFCOUNT
6791+ /* Exception on overflow. */
6792+ "1: dsub %0, %1 \n"
6793+ "2: \n"
6794+ _ASM_EXTABLE(1b, 2b)
6795+#else
6796+ " dsubu %0, %1 \n"
6797+#endif
6798+ : "+r" (v->counter) : "Ir" (i));
6799+ raw_local_irq_restore(flags);
6800+ }
6801+}
6802+
6803+static __inline__ void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6804 {
6805 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6806 long temp;
6807@@ -496,7 +989,93 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
6808 /*
6809 * Same as above, but return the result value
6810 */
6811-static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6812+static __inline__ long atomic64_add_return(long i, atomic64_t *v)
6813+{
6814+ long result;
6815+ long temp;
6816+
6817+ smp_mb__before_llsc();
6818+
6819+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6820+ __asm__ __volatile__(
6821+ " .set mips3 \n"
6822+ "1: lld %1, %2 # atomic64_add_return \n"
6823+#ifdef CONFIG_PAX_REFCOUNT
6824+ "2: dadd %0, %1, %3 \n"
6825+#else
6826+ " daddu %0, %1, %3 \n"
6827+#endif
6828+ " scd %0, %2 \n"
6829+ " beqzl %0, 1b \n"
6830+#ifdef CONFIG_PAX_REFCOUNT
6831+ " b 4f \n"
6832+ " .set noreorder \n"
6833+ "3: b 5f \n"
6834+ " move %0, %1 \n"
6835+ " .set reorder \n"
6836+ _ASM_EXTABLE(2b, 3b)
6837+#endif
6838+ "4: daddu %0, %1, %3 \n"
6839+#ifdef CONFIG_PAX_REFCOUNT
6840+ "5: \n"
6841+#endif
6842+ " .set mips0 \n"
6843+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6844+ : "Ir" (i));
6845+ } else if (kernel_uses_llsc) {
6846+ __asm__ __volatile__(
6847+ " .set mips3 \n"
6848+ "1: lld %1, %2 # atomic64_add_return \n"
6849+#ifdef CONFIG_PAX_REFCOUNT
6850+ "2: dadd %0, %1, %3 \n"
6851+#else
6852+ " daddu %0, %1, %3 \n"
6853+#endif
6854+ " scd %0, %2 \n"
6855+ " bnez %0, 4f \n"
6856+ " b 1b \n"
6857+#ifdef CONFIG_PAX_REFCOUNT
6858+ " .set noreorder \n"
6859+ "3: b 5f \n"
6860+ " move %0, %1 \n"
6861+ " .set reorder \n"
6862+ _ASM_EXTABLE(2b, 3b)
6863+#endif
6864+ "4: daddu %0, %1, %3 \n"
6865+#ifdef CONFIG_PAX_REFCOUNT
6866+ "5: \n"
6867+#endif
6868+ " .set mips0 \n"
6869+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6870+ : "Ir" (i), "m" (v->counter)
6871+ : "memory");
6872+ } else {
6873+ unsigned long flags;
6874+
6875+ raw_local_irq_save(flags);
6876+ __asm__ __volatile__(
6877+ " ld %0, %1 \n"
6878+#ifdef CONFIG_PAX_REFCOUNT
6879+ /* Exception on overflow. */
6880+ "1: dadd %0, %2 \n"
6881+#else
6882+ " daddu %0, %2 \n"
6883+#endif
6884+ " sd %0, %1 \n"
6885+#ifdef CONFIG_PAX_REFCOUNT
6886+ /* Note: Dest reg is not modified on overflow */
6887+ "2: \n"
6888+ _ASM_EXTABLE(1b, 2b)
6889+#endif
6890+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6891+ raw_local_irq_restore(flags);
6892+ }
6893+
6894+ smp_llsc_mb();
6895+
6896+ return result;
6897+}
6898+static __inline__ long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6899 {
6900 long result;
6901
6902@@ -546,7 +1125,97 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6903 return result;
6904 }
6905
6906-static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6907+static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
6908+{
6909+ long result;
6910+ long temp;
6911+
6912+ smp_mb__before_llsc();
6913+
6914+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6915+ long temp;
6916+
6917+ __asm__ __volatile__(
6918+ " .set mips3 \n"
6919+ "1: lld %1, %2 # atomic64_sub_return \n"
6920+#ifdef CONFIG_PAX_REFCOUNT
6921+ "2: dsub %0, %1, %3 \n"
6922+#else
6923+ " dsubu %0, %1, %3 \n"
6924+#endif
6925+ " scd %0, %2 \n"
6926+ " beqzl %0, 1b \n"
6927+#ifdef CONFIG_PAX_REFCOUNT
6928+ " b 4f \n"
6929+ " .set noreorder \n"
6930+ "3: b 5f \n"
6931+ " move %0, %1 \n"
6932+ " .set reorder \n"
6933+ _ASM_EXTABLE(2b, 3b)
6934+#endif
6935+ "4: dsubu %0, %1, %3 \n"
6936+#ifdef CONFIG_PAX_REFCOUNT
6937+ "5: \n"
6938+#endif
6939+ " .set mips0 \n"
6940+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6941+ : "Ir" (i), "m" (v->counter)
6942+ : "memory");
6943+ } else if (kernel_uses_llsc) {
6944+ __asm__ __volatile__(
6945+ " .set mips3 \n"
6946+ "1: lld %1, %2 # atomic64_sub_return \n"
6947+#ifdef CONFIG_PAX_REFCOUNT
6948+ "2: dsub %0, %1, %3 \n"
6949+#else
6950+ " dsubu %0, %1, %3 \n"
6951+#endif
6952+ " scd %0, %2 \n"
6953+ " bnez %0, 4f \n"
6954+ " b 1b \n"
6955+#ifdef CONFIG_PAX_REFCOUNT
6956+ " .set noreorder \n"
6957+ "3: b 5f \n"
6958+ " move %0, %1 \n"
6959+ " .set reorder \n"
6960+ _ASM_EXTABLE(2b, 3b)
6961+#endif
6962+ "4: dsubu %0, %1, %3 \n"
6963+#ifdef CONFIG_PAX_REFCOUNT
6964+ "5: \n"
6965+#endif
6966+ " .set mips0 \n"
6967+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6968+ : "Ir" (i), "m" (v->counter)
6969+ : "memory");
6970+ } else {
6971+ unsigned long flags;
6972+
6973+ raw_local_irq_save(flags);
6974+ __asm__ __volatile__(
6975+ " ld %0, %1 \n"
6976+#ifdef CONFIG_PAX_REFCOUNT
6977+ /* Exception on overflow. */
6978+ "1: dsub %0, %2 \n"
6979+#else
6980+ " dsubu %0, %2 \n"
6981+#endif
6982+ " sd %0, %1 \n"
6983+#ifdef CONFIG_PAX_REFCOUNT
6984+ /* Note: Dest reg is not modified on overflow */
6985+ "2: \n"
6986+ _ASM_EXTABLE(1b, 2b)
6987+#endif
6988+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6989+ raw_local_irq_restore(flags);
6990+ }
6991+
6992+ smp_llsc_mb();
6993+
6994+ return result;
6995+}
6996+
6997+static __inline__ long atomic64_sub_return_unchecked(long i, atomic64_unchecked_t *v)
6998 {
6999 long result;
7000
7001@@ -605,7 +1274,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
7002 * Atomically test @v and subtract @i if @v is greater or equal than @i.
7003 * The function returns the old value of @v minus @i.
7004 */
7005-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
7006+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
7007 {
7008 long result;
7009
7010@@ -662,9 +1331,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
7011 return result;
7012 }
7013
7014-#define atomic64_cmpxchg(v, o, n) \
7015- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
7016-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
7017+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7018+{
7019+ return cmpxchg(&v->counter, old, new);
7020+}
7021+
7022+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
7023+ long new)
7024+{
7025+ return cmpxchg(&(v->counter), old, new);
7026+}
7027+
7028+static inline long atomic64_xchg(atomic64_t *v, long new)
7029+{
7030+ return xchg(&v->counter, new);
7031+}
7032+
7033+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7034+{
7035+ return xchg(&(v->counter), new);
7036+}
7037
7038 /**
7039 * atomic64_add_unless - add unless the number is a given value
7040@@ -694,6 +1380,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
7041
7042 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
7043 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
7044+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
7045
7046 /*
7047 * atomic64_sub_and_test - subtract value from variable and test result
7048@@ -715,6 +1402,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
7049 * other cases.
7050 */
7051 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
7052+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
7053
7054 /*
7055 * atomic64_dec_and_test - decrement by 1 and test
7056@@ -739,6 +1427,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
7057 * Atomically increments @v by 1.
7058 */
7059 #define atomic64_inc(v) atomic64_add(1, (v))
7060+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
7061
7062 /*
7063 * atomic64_dec - decrement and test
7064@@ -747,6 +1436,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
7065 * Atomically decrements @v by 1.
7066 */
7067 #define atomic64_dec(v) atomic64_sub(1, (v))
7068+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
7069
7070 /*
7071 * atomic64_add_negative - add and test if negative
7072diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
7073index d0101dd..266982c 100644
7074--- a/arch/mips/include/asm/barrier.h
7075+++ b/arch/mips/include/asm/barrier.h
7076@@ -184,7 +184,7 @@
7077 do { \
7078 compiletime_assert_atomic_type(*p); \
7079 smp_mb(); \
7080- ACCESS_ONCE(*p) = (v); \
7081+ ACCESS_ONCE_RW(*p) = (v); \
7082 } while (0)
7083
7084 #define smp_load_acquire(p) \
7085diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
7086index b4db69f..8f3b093 100644
7087--- a/arch/mips/include/asm/cache.h
7088+++ b/arch/mips/include/asm/cache.h
7089@@ -9,10 +9,11 @@
7090 #ifndef _ASM_CACHE_H
7091 #define _ASM_CACHE_H
7092
7093+#include <linux/const.h>
7094 #include <kmalloc.h>
7095
7096 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
7097-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7098+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7099
7100 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
7101 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7102diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
7103index d414405..6bb4ba2 100644
7104--- a/arch/mips/include/asm/elf.h
7105+++ b/arch/mips/include/asm/elf.h
7106@@ -398,13 +398,16 @@ extern const char *__elf_platform;
7107 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7108 #endif
7109
7110+#ifdef CONFIG_PAX_ASLR
7111+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
7112+
7113+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7114+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7115+#endif
7116+
7117 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
7118 struct linux_binprm;
7119 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
7120 int uses_interp);
7121
7122-struct mm_struct;
7123-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7124-#define arch_randomize_brk arch_randomize_brk
7125-
7126 #endif /* _ASM_ELF_H */
7127diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
7128index c1f6afa..38cc6e9 100644
7129--- a/arch/mips/include/asm/exec.h
7130+++ b/arch/mips/include/asm/exec.h
7131@@ -12,6 +12,6 @@
7132 #ifndef _ASM_EXEC_H
7133 #define _ASM_EXEC_H
7134
7135-extern unsigned long arch_align_stack(unsigned long sp);
7136+#define arch_align_stack(x) ((x) & ~0xfUL)
7137
7138 #endif /* _ASM_EXEC_H */
7139diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
7140index 9e8ef59..1139d6b 100644
7141--- a/arch/mips/include/asm/hw_irq.h
7142+++ b/arch/mips/include/asm/hw_irq.h
7143@@ -10,7 +10,7 @@
7144
7145 #include <linux/atomic.h>
7146
7147-extern atomic_t irq_err_count;
7148+extern atomic_unchecked_t irq_err_count;
7149
7150 /*
7151 * interrupt-retrigger: NOP for now. This may not be appropriate for all
7152diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
7153index 46dfc3c..a16b13a 100644
7154--- a/arch/mips/include/asm/local.h
7155+++ b/arch/mips/include/asm/local.h
7156@@ -12,15 +12,25 @@ typedef struct
7157 atomic_long_t a;
7158 } local_t;
7159
7160+typedef struct {
7161+ atomic_long_unchecked_t a;
7162+} local_unchecked_t;
7163+
7164 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
7165
7166 #define local_read(l) atomic_long_read(&(l)->a)
7167+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
7168 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
7169+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
7170
7171 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
7172+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
7173 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
7174+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
7175 #define local_inc(l) atomic_long_inc(&(l)->a)
7176+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
7177 #define local_dec(l) atomic_long_dec(&(l)->a)
7178+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
7179
7180 /*
7181 * Same as above, but return the result value
7182@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
7183 return result;
7184 }
7185
7186+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
7187+{
7188+ unsigned long result;
7189+
7190+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
7191+ unsigned long temp;
7192+
7193+ __asm__ __volatile__(
7194+ " .set mips3 \n"
7195+ "1:" __LL "%1, %2 # local_add_return \n"
7196+ " addu %0, %1, %3 \n"
7197+ __SC "%0, %2 \n"
7198+ " beqzl %0, 1b \n"
7199+ " addu %0, %1, %3 \n"
7200+ " .set mips0 \n"
7201+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
7202+ : "Ir" (i), "m" (l->a.counter)
7203+ : "memory");
7204+ } else if (kernel_uses_llsc) {
7205+ unsigned long temp;
7206+
7207+ __asm__ __volatile__(
7208+ " .set mips3 \n"
7209+ "1:" __LL "%1, %2 # local_add_return \n"
7210+ " addu %0, %1, %3 \n"
7211+ __SC "%0, %2 \n"
7212+ " beqz %0, 1b \n"
7213+ " addu %0, %1, %3 \n"
7214+ " .set mips0 \n"
7215+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
7216+ : "Ir" (i), "m" (l->a.counter)
7217+ : "memory");
7218+ } else {
7219+ unsigned long flags;
7220+
7221+ local_irq_save(flags);
7222+ result = l->a.counter;
7223+ result += i;
7224+ l->a.counter = result;
7225+ local_irq_restore(flags);
7226+ }
7227+
7228+ return result;
7229+}
7230+
7231 static __inline__ long local_sub_return(long i, local_t * l)
7232 {
7233 unsigned long result;
7234@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
7235
7236 #define local_cmpxchg(l, o, n) \
7237 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
7238+#define local_cmpxchg_unchecked(l, o, n) \
7239+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
7240 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
7241
7242 /**
7243diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
7244index 5699ec3..95def83 100644
7245--- a/arch/mips/include/asm/page.h
7246+++ b/arch/mips/include/asm/page.h
7247@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
7248 #ifdef CONFIG_CPU_MIPS32
7249 typedef struct { unsigned long pte_low, pte_high; } pte_t;
7250 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
7251- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
7252+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
7253 #else
7254 typedef struct { unsigned long long pte; } pte_t;
7255 #define pte_val(x) ((x).pte)
7256diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
7257index b336037..5b874cc 100644
7258--- a/arch/mips/include/asm/pgalloc.h
7259+++ b/arch/mips/include/asm/pgalloc.h
7260@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
7261 {
7262 set_pud(pud, __pud((unsigned long)pmd));
7263 }
7264+
7265+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
7266+{
7267+ pud_populate(mm, pud, pmd);
7268+}
7269 #endif
7270
7271 /*
7272diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
7273index 539ddd1..8783f9a 100644
7274--- a/arch/mips/include/asm/pgtable.h
7275+++ b/arch/mips/include/asm/pgtable.h
7276@@ -20,6 +20,9 @@
7277 #include <asm/io.h>
7278 #include <asm/pgtable-bits.h>
7279
7280+#define ktla_ktva(addr) (addr)
7281+#define ktva_ktla(addr) (addr)
7282+
7283 struct mm_struct;
7284 struct vm_area_struct;
7285
7286diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
7287index 7de8658..c109224 100644
7288--- a/arch/mips/include/asm/thread_info.h
7289+++ b/arch/mips/include/asm/thread_info.h
7290@@ -105,6 +105,9 @@ static inline struct thread_info *current_thread_info(void)
7291 #define TIF_SECCOMP 4 /* secure computing */
7292 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
7293 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
7294+/* li takes a 32bit immediate */
7295+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
7296+
7297 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
7298 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
7299 #define TIF_NOHZ 19 /* in adaptive nohz mode */
7300@@ -138,14 +141,16 @@ static inline struct thread_info *current_thread_info(void)
7301 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
7302 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
7303 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
7304+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
7305
7306 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
7307 _TIF_SYSCALL_AUDIT | \
7308- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
7309+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
7310+ _TIF_GRSEC_SETXID)
7311
7312 /* work to do in syscall_trace_leave() */
7313 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
7314- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
7315+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
7316
7317 /* work to do on interrupt/exception return */
7318 #define _TIF_WORK_MASK \
7319@@ -153,7 +158,7 @@ static inline struct thread_info *current_thread_info(void)
7320 /* work to do on any return to u-space */
7321 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
7322 _TIF_WORK_SYSCALL_EXIT | \
7323- _TIF_SYSCALL_TRACEPOINT)
7324+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
7325
7326 /*
7327 * We stash processor id into a COP0 register to retrieve it fast
7328diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
7329index a109510..94ee3f6 100644
7330--- a/arch/mips/include/asm/uaccess.h
7331+++ b/arch/mips/include/asm/uaccess.h
7332@@ -130,6 +130,7 @@ extern u64 __ua_limit;
7333 __ok == 0; \
7334 })
7335
7336+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
7337 #define access_ok(type, addr, size) \
7338 likely(__access_ok((addr), (size), __access_mask))
7339
7340diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
7341index 1188e00..41cf144 100644
7342--- a/arch/mips/kernel/binfmt_elfn32.c
7343+++ b/arch/mips/kernel/binfmt_elfn32.c
7344@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
7345 #undef ELF_ET_DYN_BASE
7346 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
7347
7348+#ifdef CONFIG_PAX_ASLR
7349+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
7350+
7351+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7352+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7353+#endif
7354+
7355 #include <asm/processor.h>
7356 #include <linux/module.h>
7357 #include <linux/elfcore.h>
7358diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
7359index 7faf5f2..f3d3cf4 100644
7360--- a/arch/mips/kernel/binfmt_elfo32.c
7361+++ b/arch/mips/kernel/binfmt_elfo32.c
7362@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
7363 #undef ELF_ET_DYN_BASE
7364 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
7365
7366+#ifdef CONFIG_PAX_ASLR
7367+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
7368+
7369+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7370+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7371+#endif
7372+
7373 #include <asm/processor.h>
7374
7375 /*
7376diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
7377index 50b3648..c2f3cec 100644
7378--- a/arch/mips/kernel/i8259.c
7379+++ b/arch/mips/kernel/i8259.c
7380@@ -201,7 +201,7 @@ spurious_8259A_irq:
7381 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
7382 spurious_irq_mask |= irqmask;
7383 }
7384- atomic_inc(&irq_err_count);
7385+ atomic_inc_unchecked(&irq_err_count);
7386 /*
7387 * Theoretically we do not have to handle this IRQ,
7388 * but in Linux this does not cause problems and is
7389diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
7390index 44a1f79..2bd6aa3 100644
7391--- a/arch/mips/kernel/irq-gt641xx.c
7392+++ b/arch/mips/kernel/irq-gt641xx.c
7393@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
7394 }
7395 }
7396
7397- atomic_inc(&irq_err_count);
7398+ atomic_inc_unchecked(&irq_err_count);
7399 }
7400
7401 void __init gt641xx_irq_init(void)
7402diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
7403index d2bfbc2..a8eacd2 100644
7404--- a/arch/mips/kernel/irq.c
7405+++ b/arch/mips/kernel/irq.c
7406@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
7407 printk("unexpected IRQ # %d\n", irq);
7408 }
7409
7410-atomic_t irq_err_count;
7411+atomic_unchecked_t irq_err_count;
7412
7413 int arch_show_interrupts(struct seq_file *p, int prec)
7414 {
7415- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
7416+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
7417 return 0;
7418 }
7419
7420 asmlinkage void spurious_interrupt(void)
7421 {
7422- atomic_inc(&irq_err_count);
7423+ atomic_inc_unchecked(&irq_err_count);
7424 }
7425
7426 void __init init_IRQ(void)
7427@@ -109,7 +109,10 @@ void __init init_IRQ(void)
7428 #endif
7429 }
7430
7431+
7432 #ifdef DEBUG_STACKOVERFLOW
7433+extern void gr_handle_kernel_exploit(void);
7434+
7435 static inline void check_stack_overflow(void)
7436 {
7437 unsigned long sp;
7438@@ -125,6 +128,7 @@ static inline void check_stack_overflow(void)
7439 printk("do_IRQ: stack overflow: %ld\n",
7440 sp - sizeof(struct thread_info));
7441 dump_stack();
7442+ gr_handle_kernel_exploit();
7443 }
7444 }
7445 #else
7446diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
7447index c4c2069..bde8051 100644
7448--- a/arch/mips/kernel/pm-cps.c
7449+++ b/arch/mips/kernel/pm-cps.c
7450@@ -168,7 +168,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
7451 nc_core_ready_count = nc_addr;
7452
7453 /* Ensure ready_count is zero-initialised before the assembly runs */
7454- ACCESS_ONCE(*nc_core_ready_count) = 0;
7455+ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
7456 coupled_barrier(&per_cpu(pm_barrier, core), online);
7457
7458 /* Run the generated entry code */
7459diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
7460index 0a1ec0f..d9e93b6 100644
7461--- a/arch/mips/kernel/process.c
7462+++ b/arch/mips/kernel/process.c
7463@@ -572,15 +572,3 @@ unsigned long get_wchan(struct task_struct *task)
7464 out:
7465 return pc;
7466 }
7467-
7468-/*
7469- * Don't forget that the stack pointer must be aligned on a 8 bytes
7470- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
7471- */
7472-unsigned long arch_align_stack(unsigned long sp)
7473-{
7474- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7475- sp -= get_random_int() & ~PAGE_MASK;
7476-
7477- return sp & ALMASK;
7478-}
7479diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
7480index f639ccd..e4b110d 100644
7481--- a/arch/mips/kernel/ptrace.c
7482+++ b/arch/mips/kernel/ptrace.c
7483@@ -630,6 +630,10 @@ long arch_ptrace(struct task_struct *child, long request,
7484 return ret;
7485 }
7486
7487+#ifdef CONFIG_GRKERNSEC_SETXID
7488+extern void gr_delayed_cred_worker(void);
7489+#endif
7490+
7491 /*
7492 * Notification of system call entry/exit
7493 * - triggered by current->work.syscall_trace
7494@@ -646,6 +650,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
7495 tracehook_report_syscall_entry(regs))
7496 ret = -1;
7497
7498+#ifdef CONFIG_GRKERNSEC_SETXID
7499+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7500+ gr_delayed_cred_worker();
7501+#endif
7502+
7503 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
7504 trace_sys_enter(regs, regs->regs[2]);
7505
7506diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
7507index 07fc524..b9d7f28 100644
7508--- a/arch/mips/kernel/reset.c
7509+++ b/arch/mips/kernel/reset.c
7510@@ -13,6 +13,7 @@
7511 #include <linux/reboot.h>
7512
7513 #include <asm/reboot.h>
7514+#include <asm/bug.h>
7515
7516 /*
7517 * Urgs ... Too many MIPS machines to handle this in a generic way.
7518@@ -29,16 +30,19 @@ void machine_restart(char *command)
7519 {
7520 if (_machine_restart)
7521 _machine_restart(command);
7522+ BUG();
7523 }
7524
7525 void machine_halt(void)
7526 {
7527 if (_machine_halt)
7528 _machine_halt();
7529+ BUG();
7530 }
7531
7532 void machine_power_off(void)
7533 {
7534 if (pm_power_off)
7535 pm_power_off();
7536+ BUG();
7537 }
7538diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
7539index 2242bdd..b284048 100644
7540--- a/arch/mips/kernel/sync-r4k.c
7541+++ b/arch/mips/kernel/sync-r4k.c
7542@@ -18,8 +18,8 @@
7543 #include <asm/mipsregs.h>
7544
7545 static atomic_t count_start_flag = ATOMIC_INIT(0);
7546-static atomic_t count_count_start = ATOMIC_INIT(0);
7547-static atomic_t count_count_stop = ATOMIC_INIT(0);
7548+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
7549+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
7550 static atomic_t count_reference = ATOMIC_INIT(0);
7551
7552 #define COUNTON 100
7553@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
7554
7555 for (i = 0; i < NR_LOOPS; i++) {
7556 /* slaves loop on '!= 2' */
7557- while (atomic_read(&count_count_start) != 1)
7558+ while (atomic_read_unchecked(&count_count_start) != 1)
7559 mb();
7560- atomic_set(&count_count_stop, 0);
7561+ atomic_set_unchecked(&count_count_stop, 0);
7562 smp_wmb();
7563
7564 /* this lets the slaves write their count register */
7565- atomic_inc(&count_count_start);
7566+ atomic_inc_unchecked(&count_count_start);
7567
7568 /*
7569 * Everyone initialises count in the last loop:
7570@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
7571 /*
7572 * Wait for all slaves to leave the synchronization point:
7573 */
7574- while (atomic_read(&count_count_stop) != 1)
7575+ while (atomic_read_unchecked(&count_count_stop) != 1)
7576 mb();
7577- atomic_set(&count_count_start, 0);
7578+ atomic_set_unchecked(&count_count_start, 0);
7579 smp_wmb();
7580- atomic_inc(&count_count_stop);
7581+ atomic_inc_unchecked(&count_count_stop);
7582 }
7583 /* Arrange for an interrupt in a short while */
7584 write_c0_compare(read_c0_count() + COUNTON);
7585@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
7586 initcount = atomic_read(&count_reference);
7587
7588 for (i = 0; i < NR_LOOPS; i++) {
7589- atomic_inc(&count_count_start);
7590- while (atomic_read(&count_count_start) != 2)
7591+ atomic_inc_unchecked(&count_count_start);
7592+ while (atomic_read_unchecked(&count_count_start) != 2)
7593 mb();
7594
7595 /*
7596@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
7597 if (i == NR_LOOPS-1)
7598 write_c0_count(initcount);
7599
7600- atomic_inc(&count_count_stop);
7601- while (atomic_read(&count_count_stop) != 2)
7602+ atomic_inc_unchecked(&count_count_stop);
7603+ while (atomic_read_unchecked(&count_count_stop) != 2)
7604 mb();
7605 }
7606 /* Arrange for an interrupt in a short while */
7607diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
7608index 51706d6..ec1178c 100644
7609--- a/arch/mips/kernel/traps.c
7610+++ b/arch/mips/kernel/traps.c
7611@@ -687,7 +687,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
7612 siginfo_t info;
7613
7614 prev_state = exception_enter();
7615- die_if_kernel("Integer overflow", regs);
7616+ if (unlikely(!user_mode(regs))) {
7617+
7618+#ifdef CONFIG_PAX_REFCOUNT
7619+ if (fixup_exception(regs)) {
7620+ pax_report_refcount_overflow(regs);
7621+ exception_exit(prev_state);
7622+ return;
7623+ }
7624+#endif
7625+
7626+ die("Integer overflow", regs);
7627+ }
7628
7629 info.si_code = FPE_INTOVF;
7630 info.si_signo = SIGFPE;
7631diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
7632index f3c56a1..6a2f01c 100644
7633--- a/arch/mips/kvm/kvm_mips.c
7634+++ b/arch/mips/kvm/kvm_mips.c
7635@@ -841,7 +841,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
7636 return r;
7637 }
7638
7639-int kvm_arch_init(void *opaque)
7640+int kvm_arch_init(const void *opaque)
7641 {
7642 int ret;
7643
7644diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
7645index becc42b..9e43d4b 100644
7646--- a/arch/mips/mm/fault.c
7647+++ b/arch/mips/mm/fault.c
7648@@ -28,6 +28,23 @@
7649 #include <asm/highmem.h> /* For VMALLOC_END */
7650 #include <linux/kdebug.h>
7651
7652+#ifdef CONFIG_PAX_PAGEEXEC
7653+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7654+{
7655+ unsigned long i;
7656+
7657+ printk(KERN_ERR "PAX: bytes at PC: ");
7658+ for (i = 0; i < 5; i++) {
7659+ unsigned int c;
7660+ if (get_user(c, (unsigned int *)pc+i))
7661+ printk(KERN_CONT "???????? ");
7662+ else
7663+ printk(KERN_CONT "%08x ", c);
7664+ }
7665+ printk("\n");
7666+}
7667+#endif
7668+
7669 /*
7670 * This routine handles page faults. It determines the address,
7671 * and the problem, and then passes it off to one of the appropriate
7672@@ -199,6 +216,14 @@ bad_area:
7673 bad_area_nosemaphore:
7674 /* User mode accesses just cause a SIGSEGV */
7675 if (user_mode(regs)) {
7676+
7677+#ifdef CONFIG_PAX_PAGEEXEC
7678+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
7679+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
7680+ do_group_exit(SIGKILL);
7681+ }
7682+#endif
7683+
7684 tsk->thread.cp0_badvaddr = address;
7685 tsk->thread.error_code = write;
7686 #if 0
7687diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
7688index f1baadd..5472dca 100644
7689--- a/arch/mips/mm/mmap.c
7690+++ b/arch/mips/mm/mmap.c
7691@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7692 struct vm_area_struct *vma;
7693 unsigned long addr = addr0;
7694 int do_color_align;
7695+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7696 struct vm_unmapped_area_info info;
7697
7698 if (unlikely(len > TASK_SIZE))
7699@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7700 do_color_align = 1;
7701
7702 /* requesting a specific address */
7703+
7704+#ifdef CONFIG_PAX_RANDMMAP
7705+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
7706+#endif
7707+
7708 if (addr) {
7709 if (do_color_align)
7710 addr = COLOUR_ALIGN(addr, pgoff);
7711@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7712 addr = PAGE_ALIGN(addr);
7713
7714 vma = find_vma(mm, addr);
7715- if (TASK_SIZE - len >= addr &&
7716- (!vma || addr + len <= vma->vm_start))
7717+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7718 return addr;
7719 }
7720
7721 info.length = len;
7722 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
7723 info.align_offset = pgoff << PAGE_SHIFT;
7724+ info.threadstack_offset = offset;
7725
7726 if (dir == DOWN) {
7727 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
7728@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7729 {
7730 unsigned long random_factor = 0UL;
7731
7732+#ifdef CONFIG_PAX_RANDMMAP
7733+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7734+#endif
7735+
7736 if (current->flags & PF_RANDOMIZE) {
7737 random_factor = get_random_int();
7738 random_factor = random_factor << PAGE_SHIFT;
7739@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7740
7741 if (mmap_is_legacy()) {
7742 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
7743+
7744+#ifdef CONFIG_PAX_RANDMMAP
7745+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7746+ mm->mmap_base += mm->delta_mmap;
7747+#endif
7748+
7749 mm->get_unmapped_area = arch_get_unmapped_area;
7750 } else {
7751 mm->mmap_base = mmap_base(random_factor);
7752+
7753+#ifdef CONFIG_PAX_RANDMMAP
7754+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7755+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7756+#endif
7757+
7758 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7759 }
7760 }
7761
7762-static inline unsigned long brk_rnd(void)
7763-{
7764- unsigned long rnd = get_random_int();
7765-
7766- rnd = rnd << PAGE_SHIFT;
7767- /* 8MB for 32bit, 256MB for 64bit */
7768- if (TASK_IS_32BIT_ADDR)
7769- rnd = rnd & 0x7ffffful;
7770- else
7771- rnd = rnd & 0xffffffful;
7772-
7773- return rnd;
7774-}
7775-
7776-unsigned long arch_randomize_brk(struct mm_struct *mm)
7777-{
7778- unsigned long base = mm->brk;
7779- unsigned long ret;
7780-
7781- ret = PAGE_ALIGN(base + brk_rnd());
7782-
7783- if (ret < mm->brk)
7784- return mm->brk;
7785-
7786- return ret;
7787-}
7788-
7789 int __virt_addr_valid(const volatile void *kaddr)
7790 {
7791 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7792diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
7793index 59cccd9..f39ac2f 100644
7794--- a/arch/mips/pci/pci-octeon.c
7795+++ b/arch/mips/pci/pci-octeon.c
7796@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
7797
7798
7799 static struct pci_ops octeon_pci_ops = {
7800- octeon_read_config,
7801- octeon_write_config,
7802+ .read = octeon_read_config,
7803+ .write = octeon_write_config,
7804 };
7805
7806 static struct resource octeon_pci_mem_resource = {
7807diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
7808index 5e36c33..eb4a17b 100644
7809--- a/arch/mips/pci/pcie-octeon.c
7810+++ b/arch/mips/pci/pcie-octeon.c
7811@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
7812 }
7813
7814 static struct pci_ops octeon_pcie0_ops = {
7815- octeon_pcie0_read_config,
7816- octeon_pcie0_write_config,
7817+ .read = octeon_pcie0_read_config,
7818+ .write = octeon_pcie0_write_config,
7819 };
7820
7821 static struct resource octeon_pcie0_mem_resource = {
7822@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
7823 };
7824
7825 static struct pci_ops octeon_pcie1_ops = {
7826- octeon_pcie1_read_config,
7827- octeon_pcie1_write_config,
7828+ .read = octeon_pcie1_read_config,
7829+ .write = octeon_pcie1_write_config,
7830 };
7831
7832 static struct resource octeon_pcie1_mem_resource = {
7833@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
7834 };
7835
7836 static struct pci_ops octeon_dummy_ops = {
7837- octeon_dummy_read_config,
7838- octeon_dummy_write_config,
7839+ .read = octeon_dummy_read_config,
7840+ .write = octeon_dummy_write_config,
7841 };
7842
7843 static struct resource octeon_dummy_mem_resource = {
7844diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7845index a2358b4..7cead4f 100644
7846--- a/arch/mips/sgi-ip27/ip27-nmi.c
7847+++ b/arch/mips/sgi-ip27/ip27-nmi.c
7848@@ -187,9 +187,9 @@ void
7849 cont_nmi_dump(void)
7850 {
7851 #ifndef REAL_NMI_SIGNAL
7852- static atomic_t nmied_cpus = ATOMIC_INIT(0);
7853+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7854
7855- atomic_inc(&nmied_cpus);
7856+ atomic_inc_unchecked(&nmied_cpus);
7857 #endif
7858 /*
7859 * Only allow 1 cpu to proceed
7860@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7861 udelay(10000);
7862 }
7863 #else
7864- while (atomic_read(&nmied_cpus) != num_online_cpus());
7865+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7866 #endif
7867
7868 /*
7869diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7870index a046b30..6799527 100644
7871--- a/arch/mips/sni/rm200.c
7872+++ b/arch/mips/sni/rm200.c
7873@@ -270,7 +270,7 @@ spurious_8259A_irq:
7874 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7875 spurious_irq_mask |= irqmask;
7876 }
7877- atomic_inc(&irq_err_count);
7878+ atomic_inc_unchecked(&irq_err_count);
7879 /*
7880 * Theoretically we do not have to handle this IRQ,
7881 * but in Linux this does not cause problems and is
7882diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7883index 41e873b..34d33a7 100644
7884--- a/arch/mips/vr41xx/common/icu.c
7885+++ b/arch/mips/vr41xx/common/icu.c
7886@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7887
7888 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7889
7890- atomic_inc(&irq_err_count);
7891+ atomic_inc_unchecked(&irq_err_count);
7892
7893 return -1;
7894 }
7895diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7896index ae0e4ee..e8f0692 100644
7897--- a/arch/mips/vr41xx/common/irq.c
7898+++ b/arch/mips/vr41xx/common/irq.c
7899@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7900 irq_cascade_t *cascade;
7901
7902 if (irq >= NR_IRQS) {
7903- atomic_inc(&irq_err_count);
7904+ atomic_inc_unchecked(&irq_err_count);
7905 return;
7906 }
7907
7908@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7909 ret = cascade->get_irq(irq);
7910 irq = ret;
7911 if (ret < 0)
7912- atomic_inc(&irq_err_count);
7913+ atomic_inc_unchecked(&irq_err_count);
7914 else
7915 irq_dispatch(irq);
7916 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7917diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7918index 967d144..db12197 100644
7919--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7920+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7921@@ -11,12 +11,14 @@
7922 #ifndef _ASM_PROC_CACHE_H
7923 #define _ASM_PROC_CACHE_H
7924
7925+#include <linux/const.h>
7926+
7927 /* L1 cache */
7928
7929 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7930 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7931-#define L1_CACHE_BYTES 16 /* bytes per entry */
7932 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7933+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7934 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7935
7936 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7937diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7938index bcb5df2..84fabd2 100644
7939--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7940+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7941@@ -16,13 +16,15 @@
7942 #ifndef _ASM_PROC_CACHE_H
7943 #define _ASM_PROC_CACHE_H
7944
7945+#include <linux/const.h>
7946+
7947 /*
7948 * L1 cache
7949 */
7950 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7951 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7952-#define L1_CACHE_BYTES 32 /* bytes per entry */
7953 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7954+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7955 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7956
7957 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7958diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7959index 4ce7a01..449202a 100644
7960--- a/arch/openrisc/include/asm/cache.h
7961+++ b/arch/openrisc/include/asm/cache.h
7962@@ -19,11 +19,13 @@
7963 #ifndef __ASM_OPENRISC_CACHE_H
7964 #define __ASM_OPENRISC_CACHE_H
7965
7966+#include <linux/const.h>
7967+
7968 /* FIXME: How can we replace these with values from the CPU...
7969 * they shouldn't be hard-coded!
7970 */
7971
7972-#define L1_CACHE_BYTES 16
7973 #define L1_CACHE_SHIFT 4
7974+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7975
7976 #endif /* __ASM_OPENRISC_CACHE_H */
7977diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7978index 0be2db2..1b0f26d 100644
7979--- a/arch/parisc/include/asm/atomic.h
7980+++ b/arch/parisc/include/asm/atomic.h
7981@@ -248,6 +248,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7982 return dec;
7983 }
7984
7985+#define atomic64_read_unchecked(v) atomic64_read(v)
7986+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7987+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7988+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7989+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7990+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7991+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7992+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7993+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7994+
7995 #endif /* !CONFIG_64BIT */
7996
7997
7998diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7999index 47f11c7..3420df2 100644
8000--- a/arch/parisc/include/asm/cache.h
8001+++ b/arch/parisc/include/asm/cache.h
8002@@ -5,6 +5,7 @@
8003 #ifndef __ARCH_PARISC_CACHE_H
8004 #define __ARCH_PARISC_CACHE_H
8005
8006+#include <linux/const.h>
8007
8008 /*
8009 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
8010@@ -15,13 +16,13 @@
8011 * just ruin performance.
8012 */
8013 #ifdef CONFIG_PA20
8014-#define L1_CACHE_BYTES 64
8015 #define L1_CACHE_SHIFT 6
8016 #else
8017-#define L1_CACHE_BYTES 32
8018 #define L1_CACHE_SHIFT 5
8019 #endif
8020
8021+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8022+
8023 #ifndef __ASSEMBLY__
8024
8025 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8026diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
8027index 3391d06..c23a2cc 100644
8028--- a/arch/parisc/include/asm/elf.h
8029+++ b/arch/parisc/include/asm/elf.h
8030@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
8031
8032 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
8033
8034+#ifdef CONFIG_PAX_ASLR
8035+#define PAX_ELF_ET_DYN_BASE 0x10000UL
8036+
8037+#define PAX_DELTA_MMAP_LEN 16
8038+#define PAX_DELTA_STACK_LEN 16
8039+#endif
8040+
8041 /* This yields a mask that user programs can use to figure out what
8042 instruction set this CPU supports. This could be done in user space,
8043 but it's not easy, and we've already done it here. */
8044diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
8045index f213f5b..0af3e8e 100644
8046--- a/arch/parisc/include/asm/pgalloc.h
8047+++ b/arch/parisc/include/asm/pgalloc.h
8048@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
8049 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
8050 }
8051
8052+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
8053+{
8054+ pgd_populate(mm, pgd, pmd);
8055+}
8056+
8057 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
8058 {
8059 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
8060@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
8061 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
8062 #define pmd_free(mm, x) do { } while (0)
8063 #define pgd_populate(mm, pmd, pte) BUG()
8064+#define pgd_populate_kernel(mm, pmd, pte) BUG()
8065
8066 #endif
8067
8068diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
8069index 22b89d1..ce34230 100644
8070--- a/arch/parisc/include/asm/pgtable.h
8071+++ b/arch/parisc/include/asm/pgtable.h
8072@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
8073 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
8074 #define PAGE_COPY PAGE_EXECREAD
8075 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
8076+
8077+#ifdef CONFIG_PAX_PAGEEXEC
8078+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
8079+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
8080+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
8081+#else
8082+# define PAGE_SHARED_NOEXEC PAGE_SHARED
8083+# define PAGE_COPY_NOEXEC PAGE_COPY
8084+# define PAGE_READONLY_NOEXEC PAGE_READONLY
8085+#endif
8086+
8087 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
8088 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
8089 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
8090diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
8091index 4006964..fcb3cc2 100644
8092--- a/arch/parisc/include/asm/uaccess.h
8093+++ b/arch/parisc/include/asm/uaccess.h
8094@@ -246,10 +246,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
8095 const void __user *from,
8096 unsigned long n)
8097 {
8098- int sz = __compiletime_object_size(to);
8099+ size_t sz = __compiletime_object_size(to);
8100 int ret = -EFAULT;
8101
8102- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
8103+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
8104 ret = __copy_from_user(to, from, n);
8105 else
8106 copy_from_user_overflow();
8107diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
8108index 50dfafc..b9fc230 100644
8109--- a/arch/parisc/kernel/module.c
8110+++ b/arch/parisc/kernel/module.c
8111@@ -98,16 +98,38 @@
8112
8113 /* three functions to determine where in the module core
8114 * or init pieces the location is */
8115+static inline int in_init_rx(struct module *me, void *loc)
8116+{
8117+ return (loc >= me->module_init_rx &&
8118+ loc < (me->module_init_rx + me->init_size_rx));
8119+}
8120+
8121+static inline int in_init_rw(struct module *me, void *loc)
8122+{
8123+ return (loc >= me->module_init_rw &&
8124+ loc < (me->module_init_rw + me->init_size_rw));
8125+}
8126+
8127 static inline int in_init(struct module *me, void *loc)
8128 {
8129- return (loc >= me->module_init &&
8130- loc <= (me->module_init + me->init_size));
8131+ return in_init_rx(me, loc) || in_init_rw(me, loc);
8132+}
8133+
8134+static inline int in_core_rx(struct module *me, void *loc)
8135+{
8136+ return (loc >= me->module_core_rx &&
8137+ loc < (me->module_core_rx + me->core_size_rx));
8138+}
8139+
8140+static inline int in_core_rw(struct module *me, void *loc)
8141+{
8142+ return (loc >= me->module_core_rw &&
8143+ loc < (me->module_core_rw + me->core_size_rw));
8144 }
8145
8146 static inline int in_core(struct module *me, void *loc)
8147 {
8148- return (loc >= me->module_core &&
8149- loc <= (me->module_core + me->core_size));
8150+ return in_core_rx(me, loc) || in_core_rw(me, loc);
8151 }
8152
8153 static inline int in_local(struct module *me, void *loc)
8154@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
8155 }
8156
8157 /* align things a bit */
8158- me->core_size = ALIGN(me->core_size, 16);
8159- me->arch.got_offset = me->core_size;
8160- me->core_size += gots * sizeof(struct got_entry);
8161+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
8162+ me->arch.got_offset = me->core_size_rw;
8163+ me->core_size_rw += gots * sizeof(struct got_entry);
8164
8165- me->core_size = ALIGN(me->core_size, 16);
8166- me->arch.fdesc_offset = me->core_size;
8167- me->core_size += fdescs * sizeof(Elf_Fdesc);
8168+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
8169+ me->arch.fdesc_offset = me->core_size_rw;
8170+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
8171
8172 me->arch.got_max = gots;
8173 me->arch.fdesc_max = fdescs;
8174@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
8175
8176 BUG_ON(value == 0);
8177
8178- got = me->module_core + me->arch.got_offset;
8179+ got = me->module_core_rw + me->arch.got_offset;
8180 for (i = 0; got[i].addr; i++)
8181 if (got[i].addr == value)
8182 goto out;
8183@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
8184 #ifdef CONFIG_64BIT
8185 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
8186 {
8187- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
8188+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
8189
8190 if (!value) {
8191 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
8192@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
8193
8194 /* Create new one */
8195 fdesc->addr = value;
8196- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
8197+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
8198 return (Elf_Addr)fdesc;
8199 }
8200 #endif /* CONFIG_64BIT */
8201@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
8202
8203 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
8204 end = table + sechdrs[me->arch.unwind_section].sh_size;
8205- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
8206+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
8207
8208 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
8209 me->arch.unwind_section, table, end, gp);
8210diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
8211index e1ffea2..46ed66e 100644
8212--- a/arch/parisc/kernel/sys_parisc.c
8213+++ b/arch/parisc/kernel/sys_parisc.c
8214@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8215 unsigned long task_size = TASK_SIZE;
8216 int do_color_align, last_mmap;
8217 struct vm_unmapped_area_info info;
8218+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
8219
8220 if (len > task_size)
8221 return -ENOMEM;
8222@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8223 goto found_addr;
8224 }
8225
8226+#ifdef CONFIG_PAX_RANDMMAP
8227+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8228+#endif
8229+
8230 if (addr) {
8231 if (do_color_align && last_mmap)
8232 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
8233@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8234 info.high_limit = mmap_upper_limit();
8235 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
8236 info.align_offset = shared_align_offset(last_mmap, pgoff);
8237+ info.threadstack_offset = offset;
8238 addr = vm_unmapped_area(&info);
8239
8240 found_addr:
8241@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8242 unsigned long addr = addr0;
8243 int do_color_align, last_mmap;
8244 struct vm_unmapped_area_info info;
8245+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
8246
8247 #ifdef CONFIG_64BIT
8248 /* This should only ever run for 32-bit processes. */
8249@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8250 }
8251
8252 /* requesting a specific address */
8253+#ifdef CONFIG_PAX_RANDMMAP
8254+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8255+#endif
8256+
8257 if (addr) {
8258 if (do_color_align && last_mmap)
8259 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
8260@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8261 info.high_limit = mm->mmap_base;
8262 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
8263 info.align_offset = shared_align_offset(last_mmap, pgoff);
8264+ info.threadstack_offset = offset;
8265 addr = vm_unmapped_area(&info);
8266 if (!(addr & ~PAGE_MASK))
8267 goto found_addr;
8268@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8269 mm->mmap_legacy_base = mmap_legacy_base();
8270 mm->mmap_base = mmap_upper_limit();
8271
8272+#ifdef CONFIG_PAX_RANDMMAP
8273+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
8274+ mm->mmap_legacy_base += mm->delta_mmap;
8275+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8276+ }
8277+#endif
8278+
8279 if (mmap_is_legacy()) {
8280 mm->mmap_base = mm->mmap_legacy_base;
8281 mm->get_unmapped_area = arch_get_unmapped_area;
8282diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
8283index 47ee620..1107387 100644
8284--- a/arch/parisc/kernel/traps.c
8285+++ b/arch/parisc/kernel/traps.c
8286@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
8287
8288 down_read(&current->mm->mmap_sem);
8289 vma = find_vma(current->mm,regs->iaoq[0]);
8290- if (vma && (regs->iaoq[0] >= vma->vm_start)
8291- && (vma->vm_flags & VM_EXEC)) {
8292-
8293+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
8294 fault_address = regs->iaoq[0];
8295 fault_space = regs->iasq[0];
8296
8297diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
8298index 3ca9c11..d163ef7 100644
8299--- a/arch/parisc/mm/fault.c
8300+++ b/arch/parisc/mm/fault.c
8301@@ -15,6 +15,7 @@
8302 #include <linux/sched.h>
8303 #include <linux/interrupt.h>
8304 #include <linux/module.h>
8305+#include <linux/unistd.h>
8306
8307 #include <asm/uaccess.h>
8308 #include <asm/traps.h>
8309@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
8310 static unsigned long
8311 parisc_acctyp(unsigned long code, unsigned int inst)
8312 {
8313- if (code == 6 || code == 16)
8314+ if (code == 6 || code == 7 || code == 16)
8315 return VM_EXEC;
8316
8317 switch (inst & 0xf0000000) {
8318@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
8319 }
8320 #endif
8321
8322+#ifdef CONFIG_PAX_PAGEEXEC
8323+/*
8324+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
8325+ *
8326+ * returns 1 when task should be killed
8327+ * 2 when rt_sigreturn trampoline was detected
8328+ * 3 when unpatched PLT trampoline was detected
8329+ */
8330+static int pax_handle_fetch_fault(struct pt_regs *regs)
8331+{
8332+
8333+#ifdef CONFIG_PAX_EMUPLT
8334+ int err;
8335+
8336+ do { /* PaX: unpatched PLT emulation */
8337+ unsigned int bl, depwi;
8338+
8339+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
8340+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
8341+
8342+ if (err)
8343+ break;
8344+
8345+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
8346+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
8347+
8348+ err = get_user(ldw, (unsigned int *)addr);
8349+ err |= get_user(bv, (unsigned int *)(addr+4));
8350+ err |= get_user(ldw2, (unsigned int *)(addr+8));
8351+
8352+ if (err)
8353+ break;
8354+
8355+ if (ldw == 0x0E801096U &&
8356+ bv == 0xEAC0C000U &&
8357+ ldw2 == 0x0E881095U)
8358+ {
8359+ unsigned int resolver, map;
8360+
8361+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
8362+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
8363+ if (err)
8364+ break;
8365+
8366+ regs->gr[20] = instruction_pointer(regs)+8;
8367+ regs->gr[21] = map;
8368+ regs->gr[22] = resolver;
8369+ regs->iaoq[0] = resolver | 3UL;
8370+ regs->iaoq[1] = regs->iaoq[0] + 4;
8371+ return 3;
8372+ }
8373+ }
8374+ } while (0);
8375+#endif
8376+
8377+#ifdef CONFIG_PAX_EMUTRAMP
8378+
8379+#ifndef CONFIG_PAX_EMUSIGRT
8380+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
8381+ return 1;
8382+#endif
8383+
8384+ do { /* PaX: rt_sigreturn emulation */
8385+ unsigned int ldi1, ldi2, bel, nop;
8386+
8387+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
8388+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
8389+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
8390+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
8391+
8392+ if (err)
8393+ break;
8394+
8395+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
8396+ ldi2 == 0x3414015AU &&
8397+ bel == 0xE4008200U &&
8398+ nop == 0x08000240U)
8399+ {
8400+ regs->gr[25] = (ldi1 & 2) >> 1;
8401+ regs->gr[20] = __NR_rt_sigreturn;
8402+ regs->gr[31] = regs->iaoq[1] + 16;
8403+ regs->sr[0] = regs->iasq[1];
8404+ regs->iaoq[0] = 0x100UL;
8405+ regs->iaoq[1] = regs->iaoq[0] + 4;
8406+ regs->iasq[0] = regs->sr[2];
8407+ regs->iasq[1] = regs->sr[2];
8408+ return 2;
8409+ }
8410+ } while (0);
8411+#endif
8412+
8413+ return 1;
8414+}
8415+
8416+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
8417+{
8418+ unsigned long i;
8419+
8420+ printk(KERN_ERR "PAX: bytes at PC: ");
8421+ for (i = 0; i < 5; i++) {
8422+ unsigned int c;
8423+ if (get_user(c, (unsigned int *)pc+i))
8424+ printk(KERN_CONT "???????? ");
8425+ else
8426+ printk(KERN_CONT "%08x ", c);
8427+ }
8428+ printk("\n");
8429+}
8430+#endif
8431+
8432 int fixup_exception(struct pt_regs *regs)
8433 {
8434 const struct exception_table_entry *fix;
8435@@ -234,8 +345,33 @@ retry:
8436
8437 good_area:
8438
8439- if ((vma->vm_flags & acc_type) != acc_type)
8440+ if ((vma->vm_flags & acc_type) != acc_type) {
8441+
8442+#ifdef CONFIG_PAX_PAGEEXEC
8443+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
8444+ (address & ~3UL) == instruction_pointer(regs))
8445+ {
8446+ up_read(&mm->mmap_sem);
8447+ switch (pax_handle_fetch_fault(regs)) {
8448+
8449+#ifdef CONFIG_PAX_EMUPLT
8450+ case 3:
8451+ return;
8452+#endif
8453+
8454+#ifdef CONFIG_PAX_EMUTRAMP
8455+ case 2:
8456+ return;
8457+#endif
8458+
8459+ }
8460+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
8461+ do_group_exit(SIGKILL);
8462+ }
8463+#endif
8464+
8465 goto bad_area;
8466+ }
8467
8468 /*
8469 * If for any reason at all we couldn't handle the fault, make
8470diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
8471index 80b94b0..a3274fb 100644
8472--- a/arch/powerpc/Kconfig
8473+++ b/arch/powerpc/Kconfig
8474@@ -398,6 +398,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
8475 config KEXEC
8476 bool "kexec system call"
8477 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
8478+ depends on !GRKERNSEC_KMEM
8479 help
8480 kexec is a system call that implements the ability to shutdown your
8481 current kernel, and to start another kernel. It is like a reboot
8482diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
8483index 28992d0..c797b20 100644
8484--- a/arch/powerpc/include/asm/atomic.h
8485+++ b/arch/powerpc/include/asm/atomic.h
8486@@ -519,6 +519,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
8487 return t1;
8488 }
8489
8490+#define atomic64_read_unchecked(v) atomic64_read(v)
8491+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
8492+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
8493+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
8494+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
8495+#define atomic64_inc_unchecked(v) atomic64_inc(v)
8496+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
8497+#define atomic64_dec_unchecked(v) atomic64_dec(v)
8498+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
8499+
8500 #endif /* __powerpc64__ */
8501
8502 #endif /* __KERNEL__ */
8503diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
8504index bab79a1..4a3eabc 100644
8505--- a/arch/powerpc/include/asm/barrier.h
8506+++ b/arch/powerpc/include/asm/barrier.h
8507@@ -73,7 +73,7 @@
8508 do { \
8509 compiletime_assert_atomic_type(*p); \
8510 __lwsync(); \
8511- ACCESS_ONCE(*p) = (v); \
8512+ ACCESS_ONCE_RW(*p) = (v); \
8513 } while (0)
8514
8515 #define smp_load_acquire(p) \
8516diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
8517index ed0afc1..0332825 100644
8518--- a/arch/powerpc/include/asm/cache.h
8519+++ b/arch/powerpc/include/asm/cache.h
8520@@ -3,6 +3,7 @@
8521
8522 #ifdef __KERNEL__
8523
8524+#include <linux/const.h>
8525
8526 /* bytes per L1 cache line */
8527 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
8528@@ -22,7 +23,7 @@
8529 #define L1_CACHE_SHIFT 7
8530 #endif
8531
8532-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8533+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8534
8535 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8536
8537diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
8538index 888d8f3..66f581c 100644
8539--- a/arch/powerpc/include/asm/elf.h
8540+++ b/arch/powerpc/include/asm/elf.h
8541@@ -28,8 +28,19 @@
8542 the loader. We need to make sure that it is out of the way of the program
8543 that it will "exec", and that there is sufficient room for the brk. */
8544
8545-extern unsigned long randomize_et_dyn(unsigned long base);
8546-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
8547+#define ELF_ET_DYN_BASE (0x20000000)
8548+
8549+#ifdef CONFIG_PAX_ASLR
8550+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
8551+
8552+#ifdef __powerpc64__
8553+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
8554+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
8555+#else
8556+#define PAX_DELTA_MMAP_LEN 15
8557+#define PAX_DELTA_STACK_LEN 15
8558+#endif
8559+#endif
8560
8561 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
8562
8563@@ -129,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8564 (0x7ff >> (PAGE_SHIFT - 12)) : \
8565 (0x3ffff >> (PAGE_SHIFT - 12)))
8566
8567-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8568-#define arch_randomize_brk arch_randomize_brk
8569-
8570-
8571 #ifdef CONFIG_SPU_BASE
8572 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
8573 #define NT_SPU 1
8574diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
8575index 8196e9c..d83a9f3 100644
8576--- a/arch/powerpc/include/asm/exec.h
8577+++ b/arch/powerpc/include/asm/exec.h
8578@@ -4,6 +4,6 @@
8579 #ifndef _ASM_POWERPC_EXEC_H
8580 #define _ASM_POWERPC_EXEC_H
8581
8582-extern unsigned long arch_align_stack(unsigned long sp);
8583+#define arch_align_stack(x) ((x) & ~0xfUL)
8584
8585 #endif /* _ASM_POWERPC_EXEC_H */
8586diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8587index 5acabbd..7ea14fa 100644
8588--- a/arch/powerpc/include/asm/kmap_types.h
8589+++ b/arch/powerpc/include/asm/kmap_types.h
8590@@ -10,7 +10,7 @@
8591 * 2 of the License, or (at your option) any later version.
8592 */
8593
8594-#define KM_TYPE_NR 16
8595+#define KM_TYPE_NR 17
8596
8597 #endif /* __KERNEL__ */
8598 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8599diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8600index b8da913..60b608a 100644
8601--- a/arch/powerpc/include/asm/local.h
8602+++ b/arch/powerpc/include/asm/local.h
8603@@ -9,15 +9,26 @@ typedef struct
8604 atomic_long_t a;
8605 } local_t;
8606
8607+typedef struct
8608+{
8609+ atomic_long_unchecked_t a;
8610+} local_unchecked_t;
8611+
8612 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8613
8614 #define local_read(l) atomic_long_read(&(l)->a)
8615+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8616 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8617+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8618
8619 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8620+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8621 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8622+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8623 #define local_inc(l) atomic_long_inc(&(l)->a)
8624+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8625 #define local_dec(l) atomic_long_dec(&(l)->a)
8626+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8627
8628 static __inline__ long local_add_return(long a, local_t *l)
8629 {
8630@@ -35,6 +46,7 @@ static __inline__ long local_add_return(long a, local_t *l)
8631
8632 return t;
8633 }
8634+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
8635
8636 #define local_add_negative(a, l) (local_add_return((a), (l)) < 0)
8637
8638@@ -54,6 +66,7 @@ static __inline__ long local_sub_return(long a, local_t *l)
8639
8640 return t;
8641 }
8642+#define local_sub_return_unchecked(i, l) atomic_long_sub_return_unchecked((i), (&(l)->a))
8643
8644 static __inline__ long local_inc_return(local_t *l)
8645 {
8646@@ -101,6 +114,8 @@ static __inline__ long local_dec_return(local_t *l)
8647
8648 #define local_cmpxchg(l, o, n) \
8649 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8650+#define local_cmpxchg_unchecked(l, o, n) \
8651+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
8652 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8653
8654 /**
8655diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8656index 8565c25..2865190 100644
8657--- a/arch/powerpc/include/asm/mman.h
8658+++ b/arch/powerpc/include/asm/mman.h
8659@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8660 }
8661 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8662
8663-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8664+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8665 {
8666 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
8667 }
8668diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
8669index 32e4e21..62afb12 100644
8670--- a/arch/powerpc/include/asm/page.h
8671+++ b/arch/powerpc/include/asm/page.h
8672@@ -230,8 +230,9 @@ extern long long virt_phys_offset;
8673 * and needs to be executable. This means the whole heap ends
8674 * up being executable.
8675 */
8676-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8677- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8678+#define VM_DATA_DEFAULT_FLAGS32 \
8679+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8680+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8681
8682 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8683 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8684@@ -259,6 +260,9 @@ extern long long virt_phys_offset;
8685 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
8686 #endif
8687
8688+#define ktla_ktva(addr) (addr)
8689+#define ktva_ktla(addr) (addr)
8690+
8691 #ifndef CONFIG_PPC_BOOK3S_64
8692 /*
8693 * Use the top bit of the higher-level page table entries to indicate whether
8694diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
8695index 88693ce..ac6f9ab 100644
8696--- a/arch/powerpc/include/asm/page_64.h
8697+++ b/arch/powerpc/include/asm/page_64.h
8698@@ -153,15 +153,18 @@ do { \
8699 * stack by default, so in the absence of a PT_GNU_STACK program header
8700 * we turn execute permission off.
8701 */
8702-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8703- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8704+#define VM_STACK_DEFAULT_FLAGS32 \
8705+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8706+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8707
8708 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8709 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8710
8711+#ifndef CONFIG_PAX_PAGEEXEC
8712 #define VM_STACK_DEFAULT_FLAGS \
8713 (is_32bit_task() ? \
8714 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
8715+#endif
8716
8717 #include <asm-generic/getorder.h>
8718
8719diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
8720index 4b0be20..c15a27d 100644
8721--- a/arch/powerpc/include/asm/pgalloc-64.h
8722+++ b/arch/powerpc/include/asm/pgalloc-64.h
8723@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8724 #ifndef CONFIG_PPC_64K_PAGES
8725
8726 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
8727+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
8728
8729 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
8730 {
8731@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8732 pud_set(pud, (unsigned long)pmd);
8733 }
8734
8735+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8736+{
8737+ pud_populate(mm, pud, pmd);
8738+}
8739+
8740 #define pmd_populate(mm, pmd, pte_page) \
8741 pmd_populate_kernel(mm, pmd, page_address(pte_page))
8742 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
8743@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
8744 #endif
8745
8746 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
8747+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8748
8749 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
8750 pte_t *pte)
8751diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
8752index d98c1ec..9f61569 100644
8753--- a/arch/powerpc/include/asm/pgtable.h
8754+++ b/arch/powerpc/include/asm/pgtable.h
8755@@ -2,6 +2,7 @@
8756 #define _ASM_POWERPC_PGTABLE_H
8757 #ifdef __KERNEL__
8758
8759+#include <linux/const.h>
8760 #ifndef __ASSEMBLY__
8761 #include <linux/mmdebug.h>
8762 #include <asm/processor.h> /* For TASK_SIZE */
8763diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
8764index 4aad413..85d86bf 100644
8765--- a/arch/powerpc/include/asm/pte-hash32.h
8766+++ b/arch/powerpc/include/asm/pte-hash32.h
8767@@ -21,6 +21,7 @@
8768 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
8769 #define _PAGE_USER 0x004 /* usermode access allowed */
8770 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
8771+#define _PAGE_EXEC _PAGE_GUARDED
8772 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
8773 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
8774 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
8775diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
8776index bffd89d..a6641ed 100644
8777--- a/arch/powerpc/include/asm/reg.h
8778+++ b/arch/powerpc/include/asm/reg.h
8779@@ -251,6 +251,7 @@
8780 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
8781 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
8782 #define DSISR_NOHPTE 0x40000000 /* no translation found */
8783+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
8784 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
8785 #define DSISR_ISSTORE 0x02000000 /* access was a store */
8786 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
8787diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
8788index 5a6614a..d89995d1 100644
8789--- a/arch/powerpc/include/asm/smp.h
8790+++ b/arch/powerpc/include/asm/smp.h
8791@@ -51,7 +51,7 @@ struct smp_ops_t {
8792 int (*cpu_disable)(void);
8793 void (*cpu_die)(unsigned int nr);
8794 int (*cpu_bootable)(unsigned int nr);
8795-};
8796+} __no_const;
8797
8798 extern void smp_send_debugger_break(void);
8799 extern void start_secondary_resume(void);
8800diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
8801index b034ecd..af7e31f 100644
8802--- a/arch/powerpc/include/asm/thread_info.h
8803+++ b/arch/powerpc/include/asm/thread_info.h
8804@@ -107,6 +107,8 @@ static inline struct thread_info *current_thread_info(void)
8805 #if defined(CONFIG_PPC64)
8806 #define TIF_ELF2ABI 18 /* function descriptors must die! */
8807 #endif
8808+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
8809+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
8810
8811 /* as above, but as bit values */
8812 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
8813@@ -125,9 +127,10 @@ static inline struct thread_info *current_thread_info(void)
8814 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8815 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
8816 #define _TIF_NOHZ (1<<TIF_NOHZ)
8817+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8818 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
8819 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
8820- _TIF_NOHZ)
8821+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
8822
8823 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
8824 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
8825diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
8826index 9485b43..3bd3c16 100644
8827--- a/arch/powerpc/include/asm/uaccess.h
8828+++ b/arch/powerpc/include/asm/uaccess.h
8829@@ -58,6 +58,7 @@
8830
8831 #endif
8832
8833+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
8834 #define access_ok(type, addr, size) \
8835 (__chk_user_ptr(addr), \
8836 __access_ok((__force unsigned long)(addr), (size), get_fs()))
8837@@ -318,52 +319,6 @@ do { \
8838 extern unsigned long __copy_tofrom_user(void __user *to,
8839 const void __user *from, unsigned long size);
8840
8841-#ifndef __powerpc64__
8842-
8843-static inline unsigned long copy_from_user(void *to,
8844- const void __user *from, unsigned long n)
8845-{
8846- unsigned long over;
8847-
8848- if (access_ok(VERIFY_READ, from, n))
8849- return __copy_tofrom_user((__force void __user *)to, from, n);
8850- if ((unsigned long)from < TASK_SIZE) {
8851- over = (unsigned long)from + n - TASK_SIZE;
8852- return __copy_tofrom_user((__force void __user *)to, from,
8853- n - over) + over;
8854- }
8855- return n;
8856-}
8857-
8858-static inline unsigned long copy_to_user(void __user *to,
8859- const void *from, unsigned long n)
8860-{
8861- unsigned long over;
8862-
8863- if (access_ok(VERIFY_WRITE, to, n))
8864- return __copy_tofrom_user(to, (__force void __user *)from, n);
8865- if ((unsigned long)to < TASK_SIZE) {
8866- over = (unsigned long)to + n - TASK_SIZE;
8867- return __copy_tofrom_user(to, (__force void __user *)from,
8868- n - over) + over;
8869- }
8870- return n;
8871-}
8872-
8873-#else /* __powerpc64__ */
8874-
8875-#define __copy_in_user(to, from, size) \
8876- __copy_tofrom_user((to), (from), (size))
8877-
8878-extern unsigned long copy_from_user(void *to, const void __user *from,
8879- unsigned long n);
8880-extern unsigned long copy_to_user(void __user *to, const void *from,
8881- unsigned long n);
8882-extern unsigned long copy_in_user(void __user *to, const void __user *from,
8883- unsigned long n);
8884-
8885-#endif /* __powerpc64__ */
8886-
8887 static inline unsigned long __copy_from_user_inatomic(void *to,
8888 const void __user *from, unsigned long n)
8889 {
8890@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
8891 if (ret == 0)
8892 return 0;
8893 }
8894+
8895+ if (!__builtin_constant_p(n))
8896+ check_object_size(to, n, false);
8897+
8898 return __copy_tofrom_user((__force void __user *)to, from, n);
8899 }
8900
8901@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
8902 if (ret == 0)
8903 return 0;
8904 }
8905+
8906+ if (!__builtin_constant_p(n))
8907+ check_object_size(from, n, true);
8908+
8909 return __copy_tofrom_user(to, (__force const void __user *)from, n);
8910 }
8911
8912@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
8913 return __copy_to_user_inatomic(to, from, size);
8914 }
8915
8916+#ifndef __powerpc64__
8917+
8918+static inline unsigned long __must_check copy_from_user(void *to,
8919+ const void __user *from, unsigned long n)
8920+{
8921+ unsigned long over;
8922+
8923+ if ((long)n < 0)
8924+ return n;
8925+
8926+ if (access_ok(VERIFY_READ, from, n)) {
8927+ if (!__builtin_constant_p(n))
8928+ check_object_size(to, n, false);
8929+ return __copy_tofrom_user((__force void __user *)to, from, n);
8930+ }
8931+ if ((unsigned long)from < TASK_SIZE) {
8932+ over = (unsigned long)from + n - TASK_SIZE;
8933+ if (!__builtin_constant_p(n - over))
8934+ check_object_size(to, n - over, false);
8935+ return __copy_tofrom_user((__force void __user *)to, from,
8936+ n - over) + over;
8937+ }
8938+ return n;
8939+}
8940+
8941+static inline unsigned long __must_check copy_to_user(void __user *to,
8942+ const void *from, unsigned long n)
8943+{
8944+ unsigned long over;
8945+
8946+ if ((long)n < 0)
8947+ return n;
8948+
8949+ if (access_ok(VERIFY_WRITE, to, n)) {
8950+ if (!__builtin_constant_p(n))
8951+ check_object_size(from, n, true);
8952+ return __copy_tofrom_user(to, (__force void __user *)from, n);
8953+ }
8954+ if ((unsigned long)to < TASK_SIZE) {
8955+ over = (unsigned long)to + n - TASK_SIZE;
8956+ if (!__builtin_constant_p(n))
8957+ check_object_size(from, n - over, true);
8958+ return __copy_tofrom_user(to, (__force void __user *)from,
8959+ n - over) + over;
8960+ }
8961+ return n;
8962+}
8963+
8964+#else /* __powerpc64__ */
8965+
8966+#define __copy_in_user(to, from, size) \
8967+ __copy_tofrom_user((to), (from), (size))
8968+
8969+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
8970+{
8971+ if ((long)n < 0 || n > INT_MAX)
8972+ return n;
8973+
8974+ if (!__builtin_constant_p(n))
8975+ check_object_size(to, n, false);
8976+
8977+ if (likely(access_ok(VERIFY_READ, from, n)))
8978+ n = __copy_from_user(to, from, n);
8979+ else
8980+ memset(to, 0, n);
8981+ return n;
8982+}
8983+
8984+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
8985+{
8986+ if ((long)n < 0 || n > INT_MAX)
8987+ return n;
8988+
8989+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
8990+ if (!__builtin_constant_p(n))
8991+ check_object_size(from, n, true);
8992+ n = __copy_to_user(to, from, n);
8993+ }
8994+ return n;
8995+}
8996+
8997+extern unsigned long copy_in_user(void __user *to, const void __user *from,
8998+ unsigned long n);
8999+
9000+#endif /* __powerpc64__ */
9001+
9002 extern unsigned long __clear_user(void __user *addr, unsigned long size);
9003
9004 static inline unsigned long clear_user(void __user *addr, unsigned long size)
9005diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
9006index 670c312..60c2b52 100644
9007--- a/arch/powerpc/kernel/Makefile
9008+++ b/arch/powerpc/kernel/Makefile
9009@@ -27,6 +27,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
9010 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
9011 endif
9012
9013+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
9014+
9015 obj-y := cputable.o ptrace.o syscalls.o \
9016 irq.o align.o signal_32.o pmc.o vdso.o \
9017 process.o systbl.o idle.o \
9018diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
9019index bb9cac6..5181202 100644
9020--- a/arch/powerpc/kernel/exceptions-64e.S
9021+++ b/arch/powerpc/kernel/exceptions-64e.S
9022@@ -1010,6 +1010,7 @@ storage_fault_common:
9023 std r14,_DAR(r1)
9024 std r15,_DSISR(r1)
9025 addi r3,r1,STACK_FRAME_OVERHEAD
9026+ bl save_nvgprs
9027 mr r4,r14
9028 mr r5,r15
9029 ld r14,PACA_EXGEN+EX_R14(r13)
9030@@ -1018,8 +1019,7 @@ storage_fault_common:
9031 cmpdi r3,0
9032 bne- 1f
9033 b ret_from_except_lite
9034-1: bl save_nvgprs
9035- mr r5,r3
9036+1: mr r5,r3
9037 addi r3,r1,STACK_FRAME_OVERHEAD
9038 ld r4,_DAR(r1)
9039 bl bad_page_fault
9040diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
9041index a7d36b1..53af150 100644
9042--- a/arch/powerpc/kernel/exceptions-64s.S
9043+++ b/arch/powerpc/kernel/exceptions-64s.S
9044@@ -1637,10 +1637,10 @@ handle_page_fault:
9045 11: ld r4,_DAR(r1)
9046 ld r5,_DSISR(r1)
9047 addi r3,r1,STACK_FRAME_OVERHEAD
9048+ bl save_nvgprs
9049 bl do_page_fault
9050 cmpdi r3,0
9051 beq+ 12f
9052- bl save_nvgprs
9053 mr r5,r3
9054 addi r3,r1,STACK_FRAME_OVERHEAD
9055 lwz r4,_DAR(r1)
9056diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
9057index 248ee7e..1eb60dd 100644
9058--- a/arch/powerpc/kernel/irq.c
9059+++ b/arch/powerpc/kernel/irq.c
9060@@ -447,6 +447,8 @@ void migrate_irqs(void)
9061 }
9062 #endif
9063
9064+extern void gr_handle_kernel_exploit(void);
9065+
9066 static inline void check_stack_overflow(void)
9067 {
9068 #ifdef CONFIG_DEBUG_STACKOVERFLOW
9069@@ -459,6 +461,7 @@ static inline void check_stack_overflow(void)
9070 printk("do_IRQ: stack overflow: %ld\n",
9071 sp - sizeof(struct thread_info));
9072 dump_stack();
9073+ gr_handle_kernel_exploit();
9074 }
9075 #endif
9076 }
9077diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
9078index 6cff040..74ac5d1b 100644
9079--- a/arch/powerpc/kernel/module_32.c
9080+++ b/arch/powerpc/kernel/module_32.c
9081@@ -161,7 +161,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
9082 me->arch.core_plt_section = i;
9083 }
9084 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
9085- printk("Module doesn't contain .plt or .init.plt sections.\n");
9086+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
9087 return -ENOEXEC;
9088 }
9089
9090@@ -191,11 +191,16 @@ static uint32_t do_plt_call(void *location,
9091
9092 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
9093 /* Init, or core PLT? */
9094- if (location >= mod->module_core
9095- && location < mod->module_core + mod->core_size)
9096+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
9097+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
9098 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
9099- else
9100+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
9101+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
9102 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
9103+ else {
9104+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
9105+ return ~0UL;
9106+ }
9107
9108 /* Find this entry, or if that fails, the next avail. entry */
9109 while (entry->jump[0]) {
9110@@ -299,7 +304,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
9111 }
9112 #ifdef CONFIG_DYNAMIC_FTRACE
9113 module->arch.tramp =
9114- do_plt_call(module->module_core,
9115+ do_plt_call(module->module_core_rx,
9116 (unsigned long)ftrace_caller,
9117 sechdrs, module);
9118 #endif
9119diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
9120index be99774..9879c82 100644
9121--- a/arch/powerpc/kernel/process.c
9122+++ b/arch/powerpc/kernel/process.c
9123@@ -1039,8 +1039,8 @@ void show_regs(struct pt_regs * regs)
9124 * Lookup NIP late so we have the best change of getting the
9125 * above info out without failing
9126 */
9127- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
9128- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
9129+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
9130+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
9131 #endif
9132 show_stack(current, (unsigned long *) regs->gpr[1]);
9133 if (!user_mode(regs))
9134@@ -1554,10 +1554,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
9135 newsp = stack[0];
9136 ip = stack[STACK_FRAME_LR_SAVE];
9137 if (!firstframe || ip != lr) {
9138- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
9139+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
9140 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
9141 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
9142- printk(" (%pS)",
9143+ printk(" (%pA)",
9144 (void *)current->ret_stack[curr_frame].ret);
9145 curr_frame--;
9146 }
9147@@ -1577,7 +1577,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
9148 struct pt_regs *regs = (struct pt_regs *)
9149 (sp + STACK_FRAME_OVERHEAD);
9150 lr = regs->link;
9151- printk("--- Exception: %lx at %pS\n LR = %pS\n",
9152+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
9153 regs->trap, (void *)regs->nip, (void *)lr);
9154 firstframe = 1;
9155 }
9156@@ -1613,58 +1613,3 @@ void notrace __ppc64_runlatch_off(void)
9157 mtspr(SPRN_CTRLT, ctrl);
9158 }
9159 #endif /* CONFIG_PPC64 */
9160-
9161-unsigned long arch_align_stack(unsigned long sp)
9162-{
9163- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9164- sp -= get_random_int() & ~PAGE_MASK;
9165- return sp & ~0xf;
9166-}
9167-
9168-static inline unsigned long brk_rnd(void)
9169-{
9170- unsigned long rnd = 0;
9171-
9172- /* 8MB for 32bit, 1GB for 64bit */
9173- if (is_32bit_task())
9174- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
9175- else
9176- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
9177-
9178- return rnd << PAGE_SHIFT;
9179-}
9180-
9181-unsigned long arch_randomize_brk(struct mm_struct *mm)
9182-{
9183- unsigned long base = mm->brk;
9184- unsigned long ret;
9185-
9186-#ifdef CONFIG_PPC_STD_MMU_64
9187- /*
9188- * If we are using 1TB segments and we are allowed to randomise
9189- * the heap, we can put it above 1TB so it is backed by a 1TB
9190- * segment. Otherwise the heap will be in the bottom 1TB
9191- * which always uses 256MB segments and this may result in a
9192- * performance penalty.
9193- */
9194- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
9195- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
9196-#endif
9197-
9198- ret = PAGE_ALIGN(base + brk_rnd());
9199-
9200- if (ret < mm->brk)
9201- return mm->brk;
9202-
9203- return ret;
9204-}
9205-
9206-unsigned long randomize_et_dyn(unsigned long base)
9207-{
9208- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
9209-
9210- if (ret < base)
9211- return base;
9212-
9213- return ret;
9214-}
9215diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
9216index 2e3d2bf..35df241 100644
9217--- a/arch/powerpc/kernel/ptrace.c
9218+++ b/arch/powerpc/kernel/ptrace.c
9219@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
9220 return ret;
9221 }
9222
9223+#ifdef CONFIG_GRKERNSEC_SETXID
9224+extern void gr_delayed_cred_worker(void);
9225+#endif
9226+
9227 /*
9228 * We must return the syscall number to actually look up in the table.
9229 * This can be -1L to skip running any syscall at all.
9230@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
9231
9232 secure_computing_strict(regs->gpr[0]);
9233
9234+#ifdef CONFIG_GRKERNSEC_SETXID
9235+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9236+ gr_delayed_cred_worker();
9237+#endif
9238+
9239 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
9240 tracehook_report_syscall_entry(regs))
9241 /*
9242@@ -1808,6 +1817,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
9243 {
9244 int step;
9245
9246+#ifdef CONFIG_GRKERNSEC_SETXID
9247+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9248+ gr_delayed_cred_worker();
9249+#endif
9250+
9251 audit_syscall_exit(regs);
9252
9253 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9254diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
9255index 1bc5a17..910d3f3 100644
9256--- a/arch/powerpc/kernel/signal_32.c
9257+++ b/arch/powerpc/kernel/signal_32.c
9258@@ -1012,7 +1012,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
9259 /* Save user registers on the stack */
9260 frame = &rt_sf->uc.uc_mcontext;
9261 addr = frame;
9262- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
9263+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9264 sigret = 0;
9265 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
9266 } else {
9267diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
9268index 97c1e4b..f427f81 100644
9269--- a/arch/powerpc/kernel/signal_64.c
9270+++ b/arch/powerpc/kernel/signal_64.c
9271@@ -755,7 +755,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
9272 current->thread.fp_state.fpscr = 0;
9273
9274 /* Set up to return from userspace. */
9275- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
9276+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9277 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
9278 } else {
9279 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
9280diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
9281index 239f1cd..5359f76 100644
9282--- a/arch/powerpc/kernel/traps.c
9283+++ b/arch/powerpc/kernel/traps.c
9284@@ -142,6 +142,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
9285 return flags;
9286 }
9287
9288+extern void gr_handle_kernel_exploit(void);
9289+
9290 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9291 int signr)
9292 {
9293@@ -191,6 +193,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9294 panic("Fatal exception in interrupt");
9295 if (panic_on_oops)
9296 panic("Fatal exception");
9297+
9298+ gr_handle_kernel_exploit();
9299+
9300 do_exit(signr);
9301 }
9302
9303diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
9304index ce74c33..0803371 100644
9305--- a/arch/powerpc/kernel/vdso.c
9306+++ b/arch/powerpc/kernel/vdso.c
9307@@ -35,6 +35,7 @@
9308 #include <asm/vdso.h>
9309 #include <asm/vdso_datapage.h>
9310 #include <asm/setup.h>
9311+#include <asm/mman.h>
9312
9313 #undef DEBUG
9314
9315@@ -221,7 +222,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9316 vdso_base = VDSO32_MBASE;
9317 #endif
9318
9319- current->mm->context.vdso_base = 0;
9320+ current->mm->context.vdso_base = ~0UL;
9321
9322 /* vDSO has a problem and was disabled, just don't "enable" it for the
9323 * process
9324@@ -241,7 +242,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9325 vdso_base = get_unmapped_area(NULL, vdso_base,
9326 (vdso_pages << PAGE_SHIFT) +
9327 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
9328- 0, 0);
9329+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
9330 if (IS_ERR_VALUE(vdso_base)) {
9331 rc = vdso_base;
9332 goto fail_mmapsem;
9333diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
9334index 61c738a..b1092d6 100644
9335--- a/arch/powerpc/kvm/powerpc.c
9336+++ b/arch/powerpc/kvm/powerpc.c
9337@@ -1195,7 +1195,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
9338 }
9339 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
9340
9341-int kvm_arch_init(void *opaque)
9342+int kvm_arch_init(const void *opaque)
9343 {
9344 return 0;
9345 }
9346diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
9347index 5eea6f3..5d10396 100644
9348--- a/arch/powerpc/lib/usercopy_64.c
9349+++ b/arch/powerpc/lib/usercopy_64.c
9350@@ -9,22 +9,6 @@
9351 #include <linux/module.h>
9352 #include <asm/uaccess.h>
9353
9354-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9355-{
9356- if (likely(access_ok(VERIFY_READ, from, n)))
9357- n = __copy_from_user(to, from, n);
9358- else
9359- memset(to, 0, n);
9360- return n;
9361-}
9362-
9363-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9364-{
9365- if (likely(access_ok(VERIFY_WRITE, to, n)))
9366- n = __copy_to_user(to, from, n);
9367- return n;
9368-}
9369-
9370 unsigned long copy_in_user(void __user *to, const void __user *from,
9371 unsigned long n)
9372 {
9373@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
9374 return n;
9375 }
9376
9377-EXPORT_SYMBOL(copy_from_user);
9378-EXPORT_SYMBOL(copy_to_user);
9379 EXPORT_SYMBOL(copy_in_user);
9380
9381diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
9382index 51ab9e7..7d3c78b 100644
9383--- a/arch/powerpc/mm/fault.c
9384+++ b/arch/powerpc/mm/fault.c
9385@@ -33,6 +33,10 @@
9386 #include <linux/magic.h>
9387 #include <linux/ratelimit.h>
9388 #include <linux/context_tracking.h>
9389+#include <linux/slab.h>
9390+#include <linux/pagemap.h>
9391+#include <linux/compiler.h>
9392+#include <linux/unistd.h>
9393
9394 #include <asm/firmware.h>
9395 #include <asm/page.h>
9396@@ -69,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
9397 }
9398 #endif
9399
9400+#ifdef CONFIG_PAX_PAGEEXEC
9401+/*
9402+ * PaX: decide what to do with offenders (regs->nip = fault address)
9403+ *
9404+ * returns 1 when task should be killed
9405+ */
9406+static int pax_handle_fetch_fault(struct pt_regs *regs)
9407+{
9408+ return 1;
9409+}
9410+
9411+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9412+{
9413+ unsigned long i;
9414+
9415+ printk(KERN_ERR "PAX: bytes at PC: ");
9416+ for (i = 0; i < 5; i++) {
9417+ unsigned int c;
9418+ if (get_user(c, (unsigned int __user *)pc+i))
9419+ printk(KERN_CONT "???????? ");
9420+ else
9421+ printk(KERN_CONT "%08x ", c);
9422+ }
9423+ printk("\n");
9424+}
9425+#endif
9426+
9427 /*
9428 * Check whether the instruction at regs->nip is a store using
9429 * an update addressing form which will update r1.
9430@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
9431 * indicate errors in DSISR but can validly be set in SRR1.
9432 */
9433 if (trap == 0x400)
9434- error_code &= 0x48200000;
9435+ error_code &= 0x58200000;
9436 else
9437 is_write = error_code & DSISR_ISSTORE;
9438 #else
9439@@ -378,7 +409,7 @@ good_area:
9440 * "undefined". Of those that can be set, this is the only
9441 * one which seems bad.
9442 */
9443- if (error_code & 0x10000000)
9444+ if (error_code & DSISR_GUARDED)
9445 /* Guarded storage error. */
9446 goto bad_area;
9447 #endif /* CONFIG_8xx */
9448@@ -393,7 +424,7 @@ good_area:
9449 * processors use the same I/D cache coherency mechanism
9450 * as embedded.
9451 */
9452- if (error_code & DSISR_PROTFAULT)
9453+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
9454 goto bad_area;
9455 #endif /* CONFIG_PPC_STD_MMU */
9456
9457@@ -483,6 +514,23 @@ bad_area:
9458 bad_area_nosemaphore:
9459 /* User mode accesses cause a SIGSEGV */
9460 if (user_mode(regs)) {
9461+
9462+#ifdef CONFIG_PAX_PAGEEXEC
9463+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
9464+#ifdef CONFIG_PPC_STD_MMU
9465+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
9466+#else
9467+ if (is_exec && regs->nip == address) {
9468+#endif
9469+ switch (pax_handle_fetch_fault(regs)) {
9470+ }
9471+
9472+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
9473+ do_group_exit(SIGKILL);
9474+ }
9475+ }
9476+#endif
9477+
9478 _exception(SIGSEGV, regs, code, address);
9479 goto bail;
9480 }
9481diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
9482index cb8bdbe..cde4bc7 100644
9483--- a/arch/powerpc/mm/mmap.c
9484+++ b/arch/powerpc/mm/mmap.c
9485@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
9486 return sysctl_legacy_va_layout;
9487 }
9488
9489-static unsigned long mmap_rnd(void)
9490+static unsigned long mmap_rnd(struct mm_struct *mm)
9491 {
9492 unsigned long rnd = 0;
9493
9494+#ifdef CONFIG_PAX_RANDMMAP
9495+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9496+#endif
9497+
9498 if (current->flags & PF_RANDOMIZE) {
9499 /* 8MB for 32bit, 1GB for 64bit */
9500 if (is_32bit_task())
9501@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
9502 return rnd << PAGE_SHIFT;
9503 }
9504
9505-static inline unsigned long mmap_base(void)
9506+static inline unsigned long mmap_base(struct mm_struct *mm)
9507 {
9508 unsigned long gap = rlimit(RLIMIT_STACK);
9509
9510@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
9511 else if (gap > MAX_GAP)
9512 gap = MAX_GAP;
9513
9514- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
9515+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
9516 }
9517
9518 /*
9519@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9520 */
9521 if (mmap_is_legacy()) {
9522 mm->mmap_base = TASK_UNMAPPED_BASE;
9523+
9524+#ifdef CONFIG_PAX_RANDMMAP
9525+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9526+ mm->mmap_base += mm->delta_mmap;
9527+#endif
9528+
9529 mm->get_unmapped_area = arch_get_unmapped_area;
9530 } else {
9531- mm->mmap_base = mmap_base();
9532+ mm->mmap_base = mmap_base(mm);
9533+
9534+#ifdef CONFIG_PAX_RANDMMAP
9535+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9536+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9537+#endif
9538+
9539 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9540 }
9541 }
9542diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
9543index b0c75cc..ef7fb93 100644
9544--- a/arch/powerpc/mm/slice.c
9545+++ b/arch/powerpc/mm/slice.c
9546@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
9547 if ((mm->task_size - len) < addr)
9548 return 0;
9549 vma = find_vma(mm, addr);
9550- return (!vma || (addr + len) <= vma->vm_start);
9551+ return check_heap_stack_gap(vma, addr, len, 0);
9552 }
9553
9554 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
9555@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
9556 info.align_offset = 0;
9557
9558 addr = TASK_UNMAPPED_BASE;
9559+
9560+#ifdef CONFIG_PAX_RANDMMAP
9561+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9562+ addr += mm->delta_mmap;
9563+#endif
9564+
9565 while (addr < TASK_SIZE) {
9566 info.low_limit = addr;
9567 if (!slice_scan_available(addr, available, 1, &addr))
9568@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
9569 if (fixed && addr > (mm->task_size - len))
9570 return -ENOMEM;
9571
9572+#ifdef CONFIG_PAX_RANDMMAP
9573+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
9574+ addr = 0;
9575+#endif
9576+
9577 /* If hint, make sure it matches our alignment restrictions */
9578 if (!fixed && addr) {
9579 addr = _ALIGN_UP(addr, 1ul << pshift);
9580diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9581index 4278acf..67fd0e6 100644
9582--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9583+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9584@@ -400,8 +400,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
9585 }
9586
9587 static struct pci_ops scc_pciex_pci_ops = {
9588- scc_pciex_read_config,
9589- scc_pciex_write_config,
9590+ .read = scc_pciex_read_config,
9591+ .write = scc_pciex_write_config,
9592 };
9593
9594 static void pciex_clear_intr_all(unsigned int __iomem *base)
9595diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
9596index 9098692..3d54cd1 100644
9597--- a/arch/powerpc/platforms/cell/spufs/file.c
9598+++ b/arch/powerpc/platforms/cell/spufs/file.c
9599@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9600 return VM_FAULT_NOPAGE;
9601 }
9602
9603-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
9604+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
9605 unsigned long address,
9606- void *buf, int len, int write)
9607+ void *buf, size_t len, int write)
9608 {
9609 struct spu_context *ctx = vma->vm_file->private_data;
9610 unsigned long offset = address - vma->vm_start;
9611diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
9612index fa934fe..c296056 100644
9613--- a/arch/s390/include/asm/atomic.h
9614+++ b/arch/s390/include/asm/atomic.h
9615@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
9616 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
9617 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9618
9619+#define atomic64_read_unchecked(v) atomic64_read(v)
9620+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9621+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9622+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9623+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9624+#define atomic64_inc_unchecked(v) atomic64_inc(v)
9625+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9626+#define atomic64_dec_unchecked(v) atomic64_dec(v)
9627+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9628+
9629 #endif /* __ARCH_S390_ATOMIC__ */
9630diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
9631index 19ff956..8d39cb1 100644
9632--- a/arch/s390/include/asm/barrier.h
9633+++ b/arch/s390/include/asm/barrier.h
9634@@ -37,7 +37,7 @@
9635 do { \
9636 compiletime_assert_atomic_type(*p); \
9637 barrier(); \
9638- ACCESS_ONCE(*p) = (v); \
9639+ ACCESS_ONCE_RW(*p) = (v); \
9640 } while (0)
9641
9642 #define smp_load_acquire(p) \
9643diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
9644index 4d7ccac..d03d0ad 100644
9645--- a/arch/s390/include/asm/cache.h
9646+++ b/arch/s390/include/asm/cache.h
9647@@ -9,8 +9,10 @@
9648 #ifndef __ARCH_S390_CACHE_H
9649 #define __ARCH_S390_CACHE_H
9650
9651-#define L1_CACHE_BYTES 256
9652+#include <linux/const.h>
9653+
9654 #define L1_CACHE_SHIFT 8
9655+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9656 #define NET_SKB_PAD 32
9657
9658 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9659diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
9660index 78f4f87..598ce39 100644
9661--- a/arch/s390/include/asm/elf.h
9662+++ b/arch/s390/include/asm/elf.h
9663@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
9664 the loader. We need to make sure that it is out of the way of the program
9665 that it will "exec", and that there is sufficient room for the brk. */
9666
9667-extern unsigned long randomize_et_dyn(unsigned long base);
9668-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
9669+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
9670+
9671+#ifdef CONFIG_PAX_ASLR
9672+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
9673+
9674+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9675+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9676+#endif
9677
9678 /* This yields a mask that user programs can use to figure out what
9679 instruction set this CPU supports. */
9680@@ -222,9 +228,6 @@ struct linux_binprm;
9681 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
9682 int arch_setup_additional_pages(struct linux_binprm *, int);
9683
9684-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9685-#define arch_randomize_brk arch_randomize_brk
9686-
9687 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
9688
9689 #endif
9690diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
9691index c4a93d6..4d2a9b4 100644
9692--- a/arch/s390/include/asm/exec.h
9693+++ b/arch/s390/include/asm/exec.h
9694@@ -7,6 +7,6 @@
9695 #ifndef __ASM_EXEC_H
9696 #define __ASM_EXEC_H
9697
9698-extern unsigned long arch_align_stack(unsigned long sp);
9699+#define arch_align_stack(x) ((x) & ~0xfUL)
9700
9701 #endif /* __ASM_EXEC_H */
9702diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
9703index cd4c68e..6764641 100644
9704--- a/arch/s390/include/asm/uaccess.h
9705+++ b/arch/s390/include/asm/uaccess.h
9706@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
9707 __range_ok((unsigned long)(addr), (size)); \
9708 })
9709
9710+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9711 #define access_ok(type, addr, size) __access_ok(addr, size)
9712
9713 /*
9714@@ -275,6 +276,10 @@ static inline unsigned long __must_check
9715 copy_to_user(void __user *to, const void *from, unsigned long n)
9716 {
9717 might_fault();
9718+
9719+ if ((long)n < 0)
9720+ return n;
9721+
9722 return __copy_to_user(to, from, n);
9723 }
9724
9725@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
9726 static inline unsigned long __must_check
9727 copy_from_user(void *to, const void __user *from, unsigned long n)
9728 {
9729- unsigned int sz = __compiletime_object_size(to);
9730+ size_t sz = __compiletime_object_size(to);
9731
9732 might_fault();
9733- if (unlikely(sz != -1 && sz < n)) {
9734+
9735+ if ((long)n < 0)
9736+ return n;
9737+
9738+ if (unlikely(sz != (size_t)-1 && sz < n)) {
9739 copy_from_user_overflow();
9740 return n;
9741 }
9742diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
9743index b89b591..fd9609d 100644
9744--- a/arch/s390/kernel/module.c
9745+++ b/arch/s390/kernel/module.c
9746@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
9747
9748 /* Increase core size by size of got & plt and set start
9749 offsets for got and plt. */
9750- me->core_size = ALIGN(me->core_size, 4);
9751- me->arch.got_offset = me->core_size;
9752- me->core_size += me->arch.got_size;
9753- me->arch.plt_offset = me->core_size;
9754- me->core_size += me->arch.plt_size;
9755+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
9756+ me->arch.got_offset = me->core_size_rw;
9757+ me->core_size_rw += me->arch.got_size;
9758+ me->arch.plt_offset = me->core_size_rx;
9759+ me->core_size_rx += me->arch.plt_size;
9760 return 0;
9761 }
9762
9763@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9764 if (info->got_initialized == 0) {
9765 Elf_Addr *gotent;
9766
9767- gotent = me->module_core + me->arch.got_offset +
9768+ gotent = me->module_core_rw + me->arch.got_offset +
9769 info->got_offset;
9770 *gotent = val;
9771 info->got_initialized = 1;
9772@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9773 rc = apply_rela_bits(loc, val, 0, 64, 0);
9774 else if (r_type == R_390_GOTENT ||
9775 r_type == R_390_GOTPLTENT) {
9776- val += (Elf_Addr) me->module_core - loc;
9777+ val += (Elf_Addr) me->module_core_rw - loc;
9778 rc = apply_rela_bits(loc, val, 1, 32, 1);
9779 }
9780 break;
9781@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9782 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
9783 if (info->plt_initialized == 0) {
9784 unsigned int *ip;
9785- ip = me->module_core + me->arch.plt_offset +
9786+ ip = me->module_core_rx + me->arch.plt_offset +
9787 info->plt_offset;
9788 #ifndef CONFIG_64BIT
9789 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
9790@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9791 val - loc + 0xffffUL < 0x1ffffeUL) ||
9792 (r_type == R_390_PLT32DBL &&
9793 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
9794- val = (Elf_Addr) me->module_core +
9795+ val = (Elf_Addr) me->module_core_rx +
9796 me->arch.plt_offset +
9797 info->plt_offset;
9798 val += rela->r_addend - loc;
9799@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9800 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
9801 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
9802 val = val + rela->r_addend -
9803- ((Elf_Addr) me->module_core + me->arch.got_offset);
9804+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
9805 if (r_type == R_390_GOTOFF16)
9806 rc = apply_rela_bits(loc, val, 0, 16, 0);
9807 else if (r_type == R_390_GOTOFF32)
9808@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9809 break;
9810 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
9811 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
9812- val = (Elf_Addr) me->module_core + me->arch.got_offset +
9813+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
9814 rela->r_addend - loc;
9815 if (r_type == R_390_GOTPC)
9816 rc = apply_rela_bits(loc, val, 1, 32, 0);
9817diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
9818index 93b9ca4..4ea1454 100644
9819--- a/arch/s390/kernel/process.c
9820+++ b/arch/s390/kernel/process.c
9821@@ -242,37 +242,3 @@ unsigned long get_wchan(struct task_struct *p)
9822 }
9823 return 0;
9824 }
9825-
9826-unsigned long arch_align_stack(unsigned long sp)
9827-{
9828- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9829- sp -= get_random_int() & ~PAGE_MASK;
9830- return sp & ~0xf;
9831-}
9832-
9833-static inline unsigned long brk_rnd(void)
9834-{
9835- /* 8MB for 32bit, 1GB for 64bit */
9836- if (is_32bit_task())
9837- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
9838- else
9839- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
9840-}
9841-
9842-unsigned long arch_randomize_brk(struct mm_struct *mm)
9843-{
9844- unsigned long ret;
9845-
9846- ret = PAGE_ALIGN(mm->brk + brk_rnd());
9847- return (ret > mm->brk) ? ret : mm->brk;
9848-}
9849-
9850-unsigned long randomize_et_dyn(unsigned long base)
9851-{
9852- unsigned long ret;
9853-
9854- if (!(current->flags & PF_RANDOMIZE))
9855- return base;
9856- ret = PAGE_ALIGN(base + brk_rnd());
9857- return (ret > base) ? ret : base;
9858-}
9859diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
9860index 9b436c2..54fbf0a 100644
9861--- a/arch/s390/mm/mmap.c
9862+++ b/arch/s390/mm/mmap.c
9863@@ -95,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9864 */
9865 if (mmap_is_legacy()) {
9866 mm->mmap_base = mmap_base_legacy();
9867+
9868+#ifdef CONFIG_PAX_RANDMMAP
9869+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9870+ mm->mmap_base += mm->delta_mmap;
9871+#endif
9872+
9873 mm->get_unmapped_area = arch_get_unmapped_area;
9874 } else {
9875 mm->mmap_base = mmap_base();
9876+
9877+#ifdef CONFIG_PAX_RANDMMAP
9878+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9879+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9880+#endif
9881+
9882 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9883 }
9884 }
9885@@ -170,9 +182,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9886 */
9887 if (mmap_is_legacy()) {
9888 mm->mmap_base = mmap_base_legacy();
9889+
9890+#ifdef CONFIG_PAX_RANDMMAP
9891+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9892+ mm->mmap_base += mm->delta_mmap;
9893+#endif
9894+
9895 mm->get_unmapped_area = s390_get_unmapped_area;
9896 } else {
9897 mm->mmap_base = mmap_base();
9898+
9899+#ifdef CONFIG_PAX_RANDMMAP
9900+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9901+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9902+#endif
9903+
9904 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
9905 }
9906 }
9907diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
9908index ae3d59f..f65f075 100644
9909--- a/arch/score/include/asm/cache.h
9910+++ b/arch/score/include/asm/cache.h
9911@@ -1,7 +1,9 @@
9912 #ifndef _ASM_SCORE_CACHE_H
9913 #define _ASM_SCORE_CACHE_H
9914
9915+#include <linux/const.h>
9916+
9917 #define L1_CACHE_SHIFT 4
9918-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9919+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9920
9921 #endif /* _ASM_SCORE_CACHE_H */
9922diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
9923index f9f3cd5..58ff438 100644
9924--- a/arch/score/include/asm/exec.h
9925+++ b/arch/score/include/asm/exec.h
9926@@ -1,6 +1,6 @@
9927 #ifndef _ASM_SCORE_EXEC_H
9928 #define _ASM_SCORE_EXEC_H
9929
9930-extern unsigned long arch_align_stack(unsigned long sp);
9931+#define arch_align_stack(x) (x)
9932
9933 #endif /* _ASM_SCORE_EXEC_H */
9934diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
9935index a1519ad3..e8ac1ff 100644
9936--- a/arch/score/kernel/process.c
9937+++ b/arch/score/kernel/process.c
9938@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
9939
9940 return task_pt_regs(task)->cp0_epc;
9941 }
9942-
9943-unsigned long arch_align_stack(unsigned long sp)
9944-{
9945- return sp;
9946-}
9947diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
9948index ef9e555..331bd29 100644
9949--- a/arch/sh/include/asm/cache.h
9950+++ b/arch/sh/include/asm/cache.h
9951@@ -9,10 +9,11 @@
9952 #define __ASM_SH_CACHE_H
9953 #ifdef __KERNEL__
9954
9955+#include <linux/const.h>
9956 #include <linux/init.h>
9957 #include <cpu/cache.h>
9958
9959-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9960+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9961
9962 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9963
9964diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
9965index 6777177..cb5e44f 100644
9966--- a/arch/sh/mm/mmap.c
9967+++ b/arch/sh/mm/mmap.c
9968@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9969 struct mm_struct *mm = current->mm;
9970 struct vm_area_struct *vma;
9971 int do_colour_align;
9972+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9973 struct vm_unmapped_area_info info;
9974
9975 if (flags & MAP_FIXED) {
9976@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9977 if (filp || (flags & MAP_SHARED))
9978 do_colour_align = 1;
9979
9980+#ifdef CONFIG_PAX_RANDMMAP
9981+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9982+#endif
9983+
9984 if (addr) {
9985 if (do_colour_align)
9986 addr = COLOUR_ALIGN(addr, pgoff);
9987@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9988 addr = PAGE_ALIGN(addr);
9989
9990 vma = find_vma(mm, addr);
9991- if (TASK_SIZE - len >= addr &&
9992- (!vma || addr + len <= vma->vm_start))
9993+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9994 return addr;
9995 }
9996
9997 info.flags = 0;
9998 info.length = len;
9999- info.low_limit = TASK_UNMAPPED_BASE;
10000+ info.low_limit = mm->mmap_base;
10001 info.high_limit = TASK_SIZE;
10002 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
10003 info.align_offset = pgoff << PAGE_SHIFT;
10004@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10005 struct mm_struct *mm = current->mm;
10006 unsigned long addr = addr0;
10007 int do_colour_align;
10008+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10009 struct vm_unmapped_area_info info;
10010
10011 if (flags & MAP_FIXED) {
10012@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10013 if (filp || (flags & MAP_SHARED))
10014 do_colour_align = 1;
10015
10016+#ifdef CONFIG_PAX_RANDMMAP
10017+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10018+#endif
10019+
10020 /* requesting a specific address */
10021 if (addr) {
10022 if (do_colour_align)
10023@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10024 addr = PAGE_ALIGN(addr);
10025
10026 vma = find_vma(mm, addr);
10027- if (TASK_SIZE - len >= addr &&
10028- (!vma || addr + len <= vma->vm_start))
10029+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10030 return addr;
10031 }
10032
10033@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10034 VM_BUG_ON(addr != -ENOMEM);
10035 info.flags = 0;
10036 info.low_limit = TASK_UNMAPPED_BASE;
10037+
10038+#ifdef CONFIG_PAX_RANDMMAP
10039+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10040+ info.low_limit += mm->delta_mmap;
10041+#endif
10042+
10043 info.high_limit = TASK_SIZE;
10044 addr = vm_unmapped_area(&info);
10045 }
10046diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
10047index bb894c8..8141d5c 100644
10048--- a/arch/sparc/include/asm/atomic_64.h
10049+++ b/arch/sparc/include/asm/atomic_64.h
10050@@ -15,18 +15,40 @@
10051 #define ATOMIC64_INIT(i) { (i) }
10052
10053 #define atomic_read(v) (*(volatile int *)&(v)->counter)
10054+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
10055+{
10056+ return v->counter;
10057+}
10058 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
10059+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
10060+{
10061+ return v->counter;
10062+}
10063
10064 #define atomic_set(v, i) (((v)->counter) = i)
10065+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
10066+{
10067+ v->counter = i;
10068+}
10069 #define atomic64_set(v, i) (((v)->counter) = i)
10070+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
10071+{
10072+ v->counter = i;
10073+}
10074
10075 void atomic_add(int, atomic_t *);
10076+void atomic_add_unchecked(int, atomic_unchecked_t *);
10077 void atomic64_add(long, atomic64_t *);
10078+void atomic64_add_unchecked(long, atomic64_unchecked_t *);
10079 void atomic_sub(int, atomic_t *);
10080+void atomic_sub_unchecked(int, atomic_unchecked_t *);
10081 void atomic64_sub(long, atomic64_t *);
10082+void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
10083
10084 int atomic_add_ret(int, atomic_t *);
10085+int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
10086 long atomic64_add_ret(long, atomic64_t *);
10087+long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
10088 int atomic_sub_ret(int, atomic_t *);
10089 long atomic64_sub_ret(long, atomic64_t *);
10090
10091@@ -34,13 +56,29 @@ long atomic64_sub_ret(long, atomic64_t *);
10092 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
10093
10094 #define atomic_inc_return(v) atomic_add_ret(1, v)
10095+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
10096+{
10097+ return atomic_add_ret_unchecked(1, v);
10098+}
10099 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
10100+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10101+{
10102+ return atomic64_add_ret_unchecked(1, v);
10103+}
10104
10105 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
10106 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
10107
10108 #define atomic_add_return(i, v) atomic_add_ret(i, v)
10109+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
10110+{
10111+ return atomic_add_ret_unchecked(i, v);
10112+}
10113 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
10114+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
10115+{
10116+ return atomic64_add_ret_unchecked(i, v);
10117+}
10118
10119 /*
10120 * atomic_inc_and_test - increment and test
10121@@ -51,6 +89,10 @@ long atomic64_sub_ret(long, atomic64_t *);
10122 * other cases.
10123 */
10124 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
10125+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
10126+{
10127+ return atomic_inc_return_unchecked(v) == 0;
10128+}
10129 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
10130
10131 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
10132@@ -60,25 +102,60 @@ long atomic64_sub_ret(long, atomic64_t *);
10133 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
10134
10135 #define atomic_inc(v) atomic_add(1, v)
10136+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
10137+{
10138+ atomic_add_unchecked(1, v);
10139+}
10140 #define atomic64_inc(v) atomic64_add(1, v)
10141+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
10142+{
10143+ atomic64_add_unchecked(1, v);
10144+}
10145
10146 #define atomic_dec(v) atomic_sub(1, v)
10147+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
10148+{
10149+ atomic_sub_unchecked(1, v);
10150+}
10151 #define atomic64_dec(v) atomic64_sub(1, v)
10152+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
10153+{
10154+ atomic64_sub_unchecked(1, v);
10155+}
10156
10157 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
10158 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
10159
10160 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
10161+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
10162+{
10163+ return cmpxchg(&v->counter, old, new);
10164+}
10165 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
10166+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
10167+{
10168+ return xchg(&v->counter, new);
10169+}
10170
10171 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10172 {
10173- int c, old;
10174+ int c, old, new;
10175 c = atomic_read(v);
10176 for (;;) {
10177- if (unlikely(c == (u)))
10178+ if (unlikely(c == u))
10179 break;
10180- old = atomic_cmpxchg((v), c, c + (a));
10181+
10182+ asm volatile("addcc %2, %0, %0\n"
10183+
10184+#ifdef CONFIG_PAX_REFCOUNT
10185+ "tvs %%icc, 6\n"
10186+#endif
10187+
10188+ : "=r" (new)
10189+ : "0" (c), "ir" (a)
10190+ : "cc");
10191+
10192+ old = atomic_cmpxchg(v, c, new);
10193 if (likely(old == c))
10194 break;
10195 c = old;
10196@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10197 #define atomic64_cmpxchg(v, o, n) \
10198 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
10199 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
10200+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10201+{
10202+ return xchg(&v->counter, new);
10203+}
10204
10205 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10206 {
10207- long c, old;
10208+ long c, old, new;
10209 c = atomic64_read(v);
10210 for (;;) {
10211- if (unlikely(c == (u)))
10212+ if (unlikely(c == u))
10213 break;
10214- old = atomic64_cmpxchg((v), c, c + (a));
10215+
10216+ asm volatile("addcc %2, %0, %0\n"
10217+
10218+#ifdef CONFIG_PAX_REFCOUNT
10219+ "tvs %%xcc, 6\n"
10220+#endif
10221+
10222+ : "=r" (new)
10223+ : "0" (c), "ir" (a)
10224+ : "cc");
10225+
10226+ old = atomic64_cmpxchg(v, c, new);
10227 if (likely(old == c))
10228 break;
10229 c = old;
10230 }
10231- return c != (u);
10232+ return c != u;
10233 }
10234
10235 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10236diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
10237index 305dcc3..7835030 100644
10238--- a/arch/sparc/include/asm/barrier_64.h
10239+++ b/arch/sparc/include/asm/barrier_64.h
10240@@ -57,7 +57,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
10241 do { \
10242 compiletime_assert_atomic_type(*p); \
10243 barrier(); \
10244- ACCESS_ONCE(*p) = (v); \
10245+ ACCESS_ONCE_RW(*p) = (v); \
10246 } while (0)
10247
10248 #define smp_load_acquire(p) \
10249diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
10250index 5bb6991..5c2132e 100644
10251--- a/arch/sparc/include/asm/cache.h
10252+++ b/arch/sparc/include/asm/cache.h
10253@@ -7,10 +7,12 @@
10254 #ifndef _SPARC_CACHE_H
10255 #define _SPARC_CACHE_H
10256
10257+#include <linux/const.h>
10258+
10259 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
10260
10261 #define L1_CACHE_SHIFT 5
10262-#define L1_CACHE_BYTES 32
10263+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10264
10265 #ifdef CONFIG_SPARC32
10266 #define SMP_CACHE_BYTES_SHIFT 5
10267diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
10268index a24e41f..47677ff 100644
10269--- a/arch/sparc/include/asm/elf_32.h
10270+++ b/arch/sparc/include/asm/elf_32.h
10271@@ -114,6 +114,13 @@ typedef struct {
10272
10273 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
10274
10275+#ifdef CONFIG_PAX_ASLR
10276+#define PAX_ELF_ET_DYN_BASE 0x10000UL
10277+
10278+#define PAX_DELTA_MMAP_LEN 16
10279+#define PAX_DELTA_STACK_LEN 16
10280+#endif
10281+
10282 /* This yields a mask that user programs can use to figure out what
10283 instruction set this cpu supports. This can NOT be done in userspace
10284 on Sparc. */
10285diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
10286index 370ca1e..d4f4a98 100644
10287--- a/arch/sparc/include/asm/elf_64.h
10288+++ b/arch/sparc/include/asm/elf_64.h
10289@@ -189,6 +189,13 @@ typedef struct {
10290 #define ELF_ET_DYN_BASE 0x0000010000000000UL
10291 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
10292
10293+#ifdef CONFIG_PAX_ASLR
10294+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
10295+
10296+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
10297+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
10298+#endif
10299+
10300 extern unsigned long sparc64_elf_hwcap;
10301 #define ELF_HWCAP sparc64_elf_hwcap
10302
10303diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
10304index a3890da..f6a408e 100644
10305--- a/arch/sparc/include/asm/pgalloc_32.h
10306+++ b/arch/sparc/include/asm/pgalloc_32.h
10307@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
10308 }
10309
10310 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
10311+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10312
10313 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
10314 unsigned long address)
10315diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
10316index 39a7ac4..2c9b586 100644
10317--- a/arch/sparc/include/asm/pgalloc_64.h
10318+++ b/arch/sparc/include/asm/pgalloc_64.h
10319@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
10320 }
10321
10322 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
10323+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
10324
10325 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
10326 {
10327diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
10328index 59ba6f6..4518128 100644
10329--- a/arch/sparc/include/asm/pgtable.h
10330+++ b/arch/sparc/include/asm/pgtable.h
10331@@ -5,4 +5,8 @@
10332 #else
10333 #include <asm/pgtable_32.h>
10334 #endif
10335+
10336+#define ktla_ktva(addr) (addr)
10337+#define ktva_ktla(addr) (addr)
10338+
10339 #endif
10340diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
10341index b9b91ae..950b91e 100644
10342--- a/arch/sparc/include/asm/pgtable_32.h
10343+++ b/arch/sparc/include/asm/pgtable_32.h
10344@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
10345 #define PAGE_SHARED SRMMU_PAGE_SHARED
10346 #define PAGE_COPY SRMMU_PAGE_COPY
10347 #define PAGE_READONLY SRMMU_PAGE_RDONLY
10348+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
10349+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
10350+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
10351 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
10352
10353 /* Top-level page directory - dummy used by init-mm.
10354@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
10355
10356 /* xwr */
10357 #define __P000 PAGE_NONE
10358-#define __P001 PAGE_READONLY
10359-#define __P010 PAGE_COPY
10360-#define __P011 PAGE_COPY
10361+#define __P001 PAGE_READONLY_NOEXEC
10362+#define __P010 PAGE_COPY_NOEXEC
10363+#define __P011 PAGE_COPY_NOEXEC
10364 #define __P100 PAGE_READONLY
10365 #define __P101 PAGE_READONLY
10366 #define __P110 PAGE_COPY
10367 #define __P111 PAGE_COPY
10368
10369 #define __S000 PAGE_NONE
10370-#define __S001 PAGE_READONLY
10371-#define __S010 PAGE_SHARED
10372-#define __S011 PAGE_SHARED
10373+#define __S001 PAGE_READONLY_NOEXEC
10374+#define __S010 PAGE_SHARED_NOEXEC
10375+#define __S011 PAGE_SHARED_NOEXEC
10376 #define __S100 PAGE_READONLY
10377 #define __S101 PAGE_READONLY
10378 #define __S110 PAGE_SHARED
10379diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
10380index 79da178..c2eede8 100644
10381--- a/arch/sparc/include/asm/pgtsrmmu.h
10382+++ b/arch/sparc/include/asm/pgtsrmmu.h
10383@@ -115,6 +115,11 @@
10384 SRMMU_EXEC | SRMMU_REF)
10385 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
10386 SRMMU_EXEC | SRMMU_REF)
10387+
10388+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
10389+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10390+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10391+
10392 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
10393 SRMMU_DIRTY | SRMMU_REF)
10394
10395diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
10396index f5fffd8..a0669f0 100644
10397--- a/arch/sparc/include/asm/setup.h
10398+++ b/arch/sparc/include/asm/setup.h
10399@@ -53,8 +53,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
10400 void handle_ld_nf(u32 insn, struct pt_regs *regs);
10401
10402 /* init_64.c */
10403-extern atomic_t dcpage_flushes;
10404-extern atomic_t dcpage_flushes_xcall;
10405+extern atomic_unchecked_t dcpage_flushes;
10406+extern atomic_unchecked_t dcpage_flushes_xcall;
10407
10408 extern int sysctl_tsb_ratio;
10409 #endif
10410diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
10411index 9689176..63c18ea 100644
10412--- a/arch/sparc/include/asm/spinlock_64.h
10413+++ b/arch/sparc/include/asm/spinlock_64.h
10414@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
10415
10416 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
10417
10418-static void inline arch_read_lock(arch_rwlock_t *lock)
10419+static inline void arch_read_lock(arch_rwlock_t *lock)
10420 {
10421 unsigned long tmp1, tmp2;
10422
10423 __asm__ __volatile__ (
10424 "1: ldsw [%2], %0\n"
10425 " brlz,pn %0, 2f\n"
10426-"4: add %0, 1, %1\n"
10427+"4: addcc %0, 1, %1\n"
10428+
10429+#ifdef CONFIG_PAX_REFCOUNT
10430+" tvs %%icc, 6\n"
10431+#endif
10432+
10433 " cas [%2], %0, %1\n"
10434 " cmp %0, %1\n"
10435 " bne,pn %%icc, 1b\n"
10436@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
10437 " .previous"
10438 : "=&r" (tmp1), "=&r" (tmp2)
10439 : "r" (lock)
10440- : "memory");
10441+ : "memory", "cc");
10442 }
10443
10444-static int inline arch_read_trylock(arch_rwlock_t *lock)
10445+static inline int arch_read_trylock(arch_rwlock_t *lock)
10446 {
10447 int tmp1, tmp2;
10448
10449@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10450 "1: ldsw [%2], %0\n"
10451 " brlz,a,pn %0, 2f\n"
10452 " mov 0, %0\n"
10453-" add %0, 1, %1\n"
10454+" addcc %0, 1, %1\n"
10455+
10456+#ifdef CONFIG_PAX_REFCOUNT
10457+" tvs %%icc, 6\n"
10458+#endif
10459+
10460 " cas [%2], %0, %1\n"
10461 " cmp %0, %1\n"
10462 " bne,pn %%icc, 1b\n"
10463@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10464 return tmp1;
10465 }
10466
10467-static void inline arch_read_unlock(arch_rwlock_t *lock)
10468+static inline void arch_read_unlock(arch_rwlock_t *lock)
10469 {
10470 unsigned long tmp1, tmp2;
10471
10472 __asm__ __volatile__(
10473 "1: lduw [%2], %0\n"
10474-" sub %0, 1, %1\n"
10475+" subcc %0, 1, %1\n"
10476+
10477+#ifdef CONFIG_PAX_REFCOUNT
10478+" tvs %%icc, 6\n"
10479+#endif
10480+
10481 " cas [%2], %0, %1\n"
10482 " cmp %0, %1\n"
10483 " bne,pn %%xcc, 1b\n"
10484@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
10485 : "memory");
10486 }
10487
10488-static void inline arch_write_lock(arch_rwlock_t *lock)
10489+static inline void arch_write_lock(arch_rwlock_t *lock)
10490 {
10491 unsigned long mask, tmp1, tmp2;
10492
10493@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
10494 : "memory");
10495 }
10496
10497-static void inline arch_write_unlock(arch_rwlock_t *lock)
10498+static inline void arch_write_unlock(arch_rwlock_t *lock)
10499 {
10500 __asm__ __volatile__(
10501 " stw %%g0, [%0]"
10502@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
10503 : "memory");
10504 }
10505
10506-static int inline arch_write_trylock(arch_rwlock_t *lock)
10507+static inline int arch_write_trylock(arch_rwlock_t *lock)
10508 {
10509 unsigned long mask, tmp1, tmp2, result;
10510
10511diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
10512index 96efa7a..16858bf 100644
10513--- a/arch/sparc/include/asm/thread_info_32.h
10514+++ b/arch/sparc/include/asm/thread_info_32.h
10515@@ -49,6 +49,8 @@ struct thread_info {
10516 unsigned long w_saved;
10517
10518 struct restart_block restart_block;
10519+
10520+ unsigned long lowest_stack;
10521 };
10522
10523 /*
10524diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
10525index a5f01ac..703b554 100644
10526--- a/arch/sparc/include/asm/thread_info_64.h
10527+++ b/arch/sparc/include/asm/thread_info_64.h
10528@@ -63,6 +63,8 @@ struct thread_info {
10529 struct pt_regs *kern_una_regs;
10530 unsigned int kern_una_insn;
10531
10532+ unsigned long lowest_stack;
10533+
10534 unsigned long fpregs[0] __attribute__ ((aligned(64)));
10535 };
10536
10537@@ -188,12 +190,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
10538 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
10539 /* flag bit 4 is available */
10540 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
10541-/* flag bit 6 is available */
10542+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
10543 #define TIF_32BIT 7 /* 32-bit binary */
10544 #define TIF_NOHZ 8 /* in adaptive nohz mode */
10545 #define TIF_SECCOMP 9 /* secure computing */
10546 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
10547 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
10548+
10549 /* NOTE: Thread flags >= 12 should be ones we have no interest
10550 * in using in assembly, else we can't use the mask as
10551 * an immediate value in instructions such as andcc.
10552@@ -213,12 +216,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
10553 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
10554 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
10555 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
10556+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
10557
10558 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
10559 _TIF_DO_NOTIFY_RESUME_MASK | \
10560 _TIF_NEED_RESCHED)
10561 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
10562
10563+#define _TIF_WORK_SYSCALL \
10564+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
10565+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
10566+
10567+
10568 /*
10569 * Thread-synchronous status.
10570 *
10571diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
10572index bd56c28..4b63d83 100644
10573--- a/arch/sparc/include/asm/uaccess.h
10574+++ b/arch/sparc/include/asm/uaccess.h
10575@@ -1,5 +1,6 @@
10576 #ifndef ___ASM_SPARC_UACCESS_H
10577 #define ___ASM_SPARC_UACCESS_H
10578+
10579 #if defined(__sparc__) && defined(__arch64__)
10580 #include <asm/uaccess_64.h>
10581 #else
10582diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
10583index 9634d08..f55fe4f 100644
10584--- a/arch/sparc/include/asm/uaccess_32.h
10585+++ b/arch/sparc/include/asm/uaccess_32.h
10586@@ -250,27 +250,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
10587
10588 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
10589 {
10590- if (n && __access_ok((unsigned long) to, n))
10591+ if ((long)n < 0)
10592+ return n;
10593+
10594+ if (n && __access_ok((unsigned long) to, n)) {
10595+ if (!__builtin_constant_p(n))
10596+ check_object_size(from, n, true);
10597 return __copy_user(to, (__force void __user *) from, n);
10598- else
10599+ } else
10600 return n;
10601 }
10602
10603 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
10604 {
10605+ if ((long)n < 0)
10606+ return n;
10607+
10608+ if (!__builtin_constant_p(n))
10609+ check_object_size(from, n, true);
10610+
10611 return __copy_user(to, (__force void __user *) from, n);
10612 }
10613
10614 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
10615 {
10616- if (n && __access_ok((unsigned long) from, n))
10617+ if ((long)n < 0)
10618+ return n;
10619+
10620+ if (n && __access_ok((unsigned long) from, n)) {
10621+ if (!__builtin_constant_p(n))
10622+ check_object_size(to, n, false);
10623 return __copy_user((__force void __user *) to, from, n);
10624- else
10625+ } else
10626 return n;
10627 }
10628
10629 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
10630 {
10631+ if ((long)n < 0)
10632+ return n;
10633+
10634 return __copy_user((__force void __user *) to, from, n);
10635 }
10636
10637diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
10638index c990a5e..f17b9c1 100644
10639--- a/arch/sparc/include/asm/uaccess_64.h
10640+++ b/arch/sparc/include/asm/uaccess_64.h
10641@@ -10,6 +10,7 @@
10642 #include <linux/compiler.h>
10643 #include <linux/string.h>
10644 #include <linux/thread_info.h>
10645+#include <linux/kernel.h>
10646 #include <asm/asi.h>
10647 #include <asm/spitfire.h>
10648 #include <asm-generic/uaccess-unaligned.h>
10649@@ -214,8 +215,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
10650 static inline unsigned long __must_check
10651 copy_from_user(void *to, const void __user *from, unsigned long size)
10652 {
10653- unsigned long ret = ___copy_from_user(to, from, size);
10654+ unsigned long ret;
10655
10656+ if ((long)size < 0 || size > INT_MAX)
10657+ return size;
10658+
10659+ if (!__builtin_constant_p(size))
10660+ check_object_size(to, size, false);
10661+
10662+ ret = ___copy_from_user(to, from, size);
10663 if (unlikely(ret))
10664 ret = copy_from_user_fixup(to, from, size);
10665
10666@@ -231,8 +239,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
10667 static inline unsigned long __must_check
10668 copy_to_user(void __user *to, const void *from, unsigned long size)
10669 {
10670- unsigned long ret = ___copy_to_user(to, from, size);
10671+ unsigned long ret;
10672
10673+ if ((long)size < 0 || size > INT_MAX)
10674+ return size;
10675+
10676+ if (!__builtin_constant_p(size))
10677+ check_object_size(from, size, true);
10678+
10679+ ret = ___copy_to_user(to, from, size);
10680 if (unlikely(ret))
10681 ret = copy_to_user_fixup(to, from, size);
10682 return ret;
10683diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
10684index 7cf9c6e..6206648 100644
10685--- a/arch/sparc/kernel/Makefile
10686+++ b/arch/sparc/kernel/Makefile
10687@@ -4,7 +4,7 @@
10688 #
10689
10690 asflags-y := -ansi
10691-ccflags-y := -Werror
10692+#ccflags-y := -Werror
10693
10694 extra-y := head_$(BITS).o
10695
10696diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
10697index 50e7b62..79fae35 100644
10698--- a/arch/sparc/kernel/process_32.c
10699+++ b/arch/sparc/kernel/process_32.c
10700@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
10701
10702 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
10703 r->psr, r->pc, r->npc, r->y, print_tainted());
10704- printk("PC: <%pS>\n", (void *) r->pc);
10705+ printk("PC: <%pA>\n", (void *) r->pc);
10706 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10707 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
10708 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
10709 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10710 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
10711 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
10712- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
10713+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
10714
10715 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10716 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
10717@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10718 rw = (struct reg_window32 *) fp;
10719 pc = rw->ins[7];
10720 printk("[%08lx : ", pc);
10721- printk("%pS ] ", (void *) pc);
10722+ printk("%pA ] ", (void *) pc);
10723 fp = rw->ins[6];
10724 } while (++count < 16);
10725 printk("\n");
10726diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
10727index 027e099..6d4178f 100644
10728--- a/arch/sparc/kernel/process_64.c
10729+++ b/arch/sparc/kernel/process_64.c
10730@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
10731 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
10732 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
10733 if (regs->tstate & TSTATE_PRIV)
10734- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
10735+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
10736 }
10737
10738 void show_regs(struct pt_regs *regs)
10739@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
10740
10741 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
10742 regs->tpc, regs->tnpc, regs->y, print_tainted());
10743- printk("TPC: <%pS>\n", (void *) regs->tpc);
10744+ printk("TPC: <%pA>\n", (void *) regs->tpc);
10745 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
10746 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
10747 regs->u_regs[3]);
10748@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
10749 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
10750 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
10751 regs->u_regs[15]);
10752- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
10753+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
10754 show_regwindow(regs);
10755 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
10756 }
10757@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
10758 ((tp && tp->task) ? tp->task->pid : -1));
10759
10760 if (gp->tstate & TSTATE_PRIV) {
10761- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
10762+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
10763 (void *) gp->tpc,
10764 (void *) gp->o7,
10765 (void *) gp->i7,
10766diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
10767index 79cc0d1..ec62734 100644
10768--- a/arch/sparc/kernel/prom_common.c
10769+++ b/arch/sparc/kernel/prom_common.c
10770@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
10771
10772 unsigned int prom_early_allocated __initdata;
10773
10774-static struct of_pdt_ops prom_sparc_ops __initdata = {
10775+static struct of_pdt_ops prom_sparc_ops __initconst = {
10776 .nextprop = prom_common_nextprop,
10777 .getproplen = prom_getproplen,
10778 .getproperty = prom_getproperty,
10779diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
10780index c13c9f2..d572c34 100644
10781--- a/arch/sparc/kernel/ptrace_64.c
10782+++ b/arch/sparc/kernel/ptrace_64.c
10783@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
10784 return ret;
10785 }
10786
10787+#ifdef CONFIG_GRKERNSEC_SETXID
10788+extern void gr_delayed_cred_worker(void);
10789+#endif
10790+
10791 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10792 {
10793 int ret = 0;
10794@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10795 if (test_thread_flag(TIF_NOHZ))
10796 user_exit();
10797
10798+#ifdef CONFIG_GRKERNSEC_SETXID
10799+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10800+ gr_delayed_cred_worker();
10801+#endif
10802+
10803 if (test_thread_flag(TIF_SYSCALL_TRACE))
10804 ret = tracehook_report_syscall_entry(regs);
10805
10806@@ -1093,6 +1102,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
10807 if (test_thread_flag(TIF_NOHZ))
10808 user_exit();
10809
10810+#ifdef CONFIG_GRKERNSEC_SETXID
10811+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10812+ gr_delayed_cred_worker();
10813+#endif
10814+
10815 audit_syscall_exit(regs);
10816
10817 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
10818diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
10819index 41aa247..eadfb74 100644
10820--- a/arch/sparc/kernel/smp_64.c
10821+++ b/arch/sparc/kernel/smp_64.c
10822@@ -883,7 +883,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10823 return;
10824
10825 #ifdef CONFIG_DEBUG_DCFLUSH
10826- atomic_inc(&dcpage_flushes);
10827+ atomic_inc_unchecked(&dcpage_flushes);
10828 #endif
10829
10830 this_cpu = get_cpu();
10831@@ -907,7 +907,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10832 xcall_deliver(data0, __pa(pg_addr),
10833 (u64) pg_addr, cpumask_of(cpu));
10834 #ifdef CONFIG_DEBUG_DCFLUSH
10835- atomic_inc(&dcpage_flushes_xcall);
10836+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10837 #endif
10838 }
10839 }
10840@@ -926,7 +926,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10841 preempt_disable();
10842
10843 #ifdef CONFIG_DEBUG_DCFLUSH
10844- atomic_inc(&dcpage_flushes);
10845+ atomic_inc_unchecked(&dcpage_flushes);
10846 #endif
10847 data0 = 0;
10848 pg_addr = page_address(page);
10849@@ -943,7 +943,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10850 xcall_deliver(data0, __pa(pg_addr),
10851 (u64) pg_addr, cpu_online_mask);
10852 #ifdef CONFIG_DEBUG_DCFLUSH
10853- atomic_inc(&dcpage_flushes_xcall);
10854+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10855 #endif
10856 }
10857 __local_flush_dcache_page(page);
10858diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
10859index 646988d..b88905f 100644
10860--- a/arch/sparc/kernel/sys_sparc_32.c
10861+++ b/arch/sparc/kernel/sys_sparc_32.c
10862@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10863 if (len > TASK_SIZE - PAGE_SIZE)
10864 return -ENOMEM;
10865 if (!addr)
10866- addr = TASK_UNMAPPED_BASE;
10867+ addr = current->mm->mmap_base;
10868
10869 info.flags = 0;
10870 info.length = len;
10871diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
10872index c85403d..6af95c9 100644
10873--- a/arch/sparc/kernel/sys_sparc_64.c
10874+++ b/arch/sparc/kernel/sys_sparc_64.c
10875@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10876 struct vm_area_struct * vma;
10877 unsigned long task_size = TASK_SIZE;
10878 int do_color_align;
10879+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10880 struct vm_unmapped_area_info info;
10881
10882 if (flags & MAP_FIXED) {
10883 /* We do not accept a shared mapping if it would violate
10884 * cache aliasing constraints.
10885 */
10886- if ((flags & MAP_SHARED) &&
10887+ if ((filp || (flags & MAP_SHARED)) &&
10888 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10889 return -EINVAL;
10890 return addr;
10891@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10892 if (filp || (flags & MAP_SHARED))
10893 do_color_align = 1;
10894
10895+#ifdef CONFIG_PAX_RANDMMAP
10896+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10897+#endif
10898+
10899 if (addr) {
10900 if (do_color_align)
10901 addr = COLOR_ALIGN(addr, pgoff);
10902@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10903 addr = PAGE_ALIGN(addr);
10904
10905 vma = find_vma(mm, addr);
10906- if (task_size - len >= addr &&
10907- (!vma || addr + len <= vma->vm_start))
10908+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10909 return addr;
10910 }
10911
10912 info.flags = 0;
10913 info.length = len;
10914- info.low_limit = TASK_UNMAPPED_BASE;
10915+ info.low_limit = mm->mmap_base;
10916 info.high_limit = min(task_size, VA_EXCLUDE_START);
10917 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10918 info.align_offset = pgoff << PAGE_SHIFT;
10919+ info.threadstack_offset = offset;
10920 addr = vm_unmapped_area(&info);
10921
10922 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10923 VM_BUG_ON(addr != -ENOMEM);
10924 info.low_limit = VA_EXCLUDE_END;
10925+
10926+#ifdef CONFIG_PAX_RANDMMAP
10927+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10928+ info.low_limit += mm->delta_mmap;
10929+#endif
10930+
10931 info.high_limit = task_size;
10932 addr = vm_unmapped_area(&info);
10933 }
10934@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10935 unsigned long task_size = STACK_TOP32;
10936 unsigned long addr = addr0;
10937 int do_color_align;
10938+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10939 struct vm_unmapped_area_info info;
10940
10941 /* This should only ever run for 32-bit processes. */
10942@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10943 /* We do not accept a shared mapping if it would violate
10944 * cache aliasing constraints.
10945 */
10946- if ((flags & MAP_SHARED) &&
10947+ if ((filp || (flags & MAP_SHARED)) &&
10948 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10949 return -EINVAL;
10950 return addr;
10951@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10952 if (filp || (flags & MAP_SHARED))
10953 do_color_align = 1;
10954
10955+#ifdef CONFIG_PAX_RANDMMAP
10956+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10957+#endif
10958+
10959 /* requesting a specific address */
10960 if (addr) {
10961 if (do_color_align)
10962@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10963 addr = PAGE_ALIGN(addr);
10964
10965 vma = find_vma(mm, addr);
10966- if (task_size - len >= addr &&
10967- (!vma || addr + len <= vma->vm_start))
10968+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10969 return addr;
10970 }
10971
10972@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10973 info.high_limit = mm->mmap_base;
10974 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10975 info.align_offset = pgoff << PAGE_SHIFT;
10976+ info.threadstack_offset = offset;
10977 addr = vm_unmapped_area(&info);
10978
10979 /*
10980@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10981 VM_BUG_ON(addr != -ENOMEM);
10982 info.flags = 0;
10983 info.low_limit = TASK_UNMAPPED_BASE;
10984+
10985+#ifdef CONFIG_PAX_RANDMMAP
10986+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10987+ info.low_limit += mm->delta_mmap;
10988+#endif
10989+
10990 info.high_limit = STACK_TOP32;
10991 addr = vm_unmapped_area(&info);
10992 }
10993@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
10994 EXPORT_SYMBOL(get_fb_unmapped_area);
10995
10996 /* Essentially the same as PowerPC. */
10997-static unsigned long mmap_rnd(void)
10998+static unsigned long mmap_rnd(struct mm_struct *mm)
10999 {
11000 unsigned long rnd = 0UL;
11001
11002+#ifdef CONFIG_PAX_RANDMMAP
11003+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
11004+#endif
11005+
11006 if (current->flags & PF_RANDOMIZE) {
11007 unsigned long val = get_random_int();
11008 if (test_thread_flag(TIF_32BIT))
11009@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
11010
11011 void arch_pick_mmap_layout(struct mm_struct *mm)
11012 {
11013- unsigned long random_factor = mmap_rnd();
11014+ unsigned long random_factor = mmap_rnd(mm);
11015 unsigned long gap;
11016
11017 /*
11018@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
11019 gap == RLIM_INFINITY ||
11020 sysctl_legacy_va_layout) {
11021 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
11022+
11023+#ifdef CONFIG_PAX_RANDMMAP
11024+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11025+ mm->mmap_base += mm->delta_mmap;
11026+#endif
11027+
11028 mm->get_unmapped_area = arch_get_unmapped_area;
11029 } else {
11030 /* We know it's 32-bit */
11031@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
11032 gap = (task_size / 6 * 5);
11033
11034 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
11035+
11036+#ifdef CONFIG_PAX_RANDMMAP
11037+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11038+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
11039+#endif
11040+
11041 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
11042 }
11043 }
11044diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
11045index 33a17e7..d87fb1f 100644
11046--- a/arch/sparc/kernel/syscalls.S
11047+++ b/arch/sparc/kernel/syscalls.S
11048@@ -52,7 +52,7 @@ sys32_rt_sigreturn:
11049 #endif
11050 .align 32
11051 1: ldx [%g6 + TI_FLAGS], %l5
11052- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11053+ andcc %l5, _TIF_WORK_SYSCALL, %g0
11054 be,pt %icc, rtrap
11055 nop
11056 call syscall_trace_leave
11057@@ -184,7 +184,7 @@ linux_sparc_syscall32:
11058
11059 srl %i3, 0, %o3 ! IEU0
11060 srl %i2, 0, %o2 ! IEU0 Group
11061- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11062+ andcc %l0, _TIF_WORK_SYSCALL, %g0
11063 bne,pn %icc, linux_syscall_trace32 ! CTI
11064 mov %i0, %l5 ! IEU1
11065 5: call %l7 ! CTI Group brk forced
11066@@ -208,7 +208,7 @@ linux_sparc_syscall:
11067
11068 mov %i3, %o3 ! IEU1
11069 mov %i4, %o4 ! IEU0 Group
11070- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11071+ andcc %l0, _TIF_WORK_SYSCALL, %g0
11072 bne,pn %icc, linux_syscall_trace ! CTI Group
11073 mov %i0, %l5 ! IEU0
11074 2: call %l7 ! CTI Group brk forced
11075@@ -223,7 +223,7 @@ ret_sys_call:
11076
11077 cmp %o0, -ERESTART_RESTARTBLOCK
11078 bgeu,pn %xcc, 1f
11079- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11080+ andcc %l0, _TIF_WORK_SYSCALL, %g0
11081 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
11082
11083 2:
11084diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
11085index 6fd386c5..6907d81 100644
11086--- a/arch/sparc/kernel/traps_32.c
11087+++ b/arch/sparc/kernel/traps_32.c
11088@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
11089 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
11090 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
11091
11092+extern void gr_handle_kernel_exploit(void);
11093+
11094 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11095 {
11096 static int die_counter;
11097@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11098 count++ < 30 &&
11099 (((unsigned long) rw) >= PAGE_OFFSET) &&
11100 !(((unsigned long) rw) & 0x7)) {
11101- printk("Caller[%08lx]: %pS\n", rw->ins[7],
11102+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
11103 (void *) rw->ins[7]);
11104 rw = (struct reg_window32 *)rw->ins[6];
11105 }
11106 }
11107 printk("Instruction DUMP:");
11108 instruction_dump ((unsigned long *) regs->pc);
11109- if(regs->psr & PSR_PS)
11110+ if(regs->psr & PSR_PS) {
11111+ gr_handle_kernel_exploit();
11112 do_exit(SIGKILL);
11113+ }
11114 do_exit(SIGSEGV);
11115 }
11116
11117diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
11118index fb6640e..2daada8 100644
11119--- a/arch/sparc/kernel/traps_64.c
11120+++ b/arch/sparc/kernel/traps_64.c
11121@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
11122 i + 1,
11123 p->trapstack[i].tstate, p->trapstack[i].tpc,
11124 p->trapstack[i].tnpc, p->trapstack[i].tt);
11125- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
11126+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
11127 }
11128 }
11129
11130@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
11131
11132 lvl -= 0x100;
11133 if (regs->tstate & TSTATE_PRIV) {
11134+
11135+#ifdef CONFIG_PAX_REFCOUNT
11136+ if (lvl == 6)
11137+ pax_report_refcount_overflow(regs);
11138+#endif
11139+
11140 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
11141 die_if_kernel(buffer, regs);
11142 }
11143@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
11144 void bad_trap_tl1(struct pt_regs *regs, long lvl)
11145 {
11146 char buffer[32];
11147-
11148+
11149 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
11150 0, lvl, SIGTRAP) == NOTIFY_STOP)
11151 return;
11152
11153+#ifdef CONFIG_PAX_REFCOUNT
11154+ if (lvl == 6)
11155+ pax_report_refcount_overflow(regs);
11156+#endif
11157+
11158 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
11159
11160 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
11161@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
11162 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
11163 printk("%s" "ERROR(%d): ",
11164 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
11165- printk("TPC<%pS>\n", (void *) regs->tpc);
11166+ printk("TPC<%pA>\n", (void *) regs->tpc);
11167 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
11168 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
11169 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
11170@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11171 smp_processor_id(),
11172 (type & 0x1) ? 'I' : 'D',
11173 regs->tpc);
11174- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
11175+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
11176 panic("Irrecoverable Cheetah+ parity error.");
11177 }
11178
11179@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11180 smp_processor_id(),
11181 (type & 0x1) ? 'I' : 'D',
11182 regs->tpc);
11183- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
11184+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
11185 }
11186
11187 struct sun4v_error_entry {
11188@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
11189 /*0x38*/u64 reserved_5;
11190 };
11191
11192-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11193-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11194+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11195+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11196
11197 static const char *sun4v_err_type_to_str(u8 type)
11198 {
11199@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
11200 }
11201
11202 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11203- int cpu, const char *pfx, atomic_t *ocnt)
11204+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
11205 {
11206 u64 *raw_ptr = (u64 *) ent;
11207 u32 attrs;
11208@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11209
11210 show_regs(regs);
11211
11212- if ((cnt = atomic_read(ocnt)) != 0) {
11213- atomic_set(ocnt, 0);
11214+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
11215+ atomic_set_unchecked(ocnt, 0);
11216 wmb();
11217 printk("%s: Queue overflowed %d times.\n",
11218 pfx, cnt);
11219@@ -2048,7 +2059,7 @@ out:
11220 */
11221 void sun4v_resum_overflow(struct pt_regs *regs)
11222 {
11223- atomic_inc(&sun4v_resum_oflow_cnt);
11224+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
11225 }
11226
11227 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
11228@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
11229 /* XXX Actually even this can make not that much sense. Perhaps
11230 * XXX we should just pull the plug and panic directly from here?
11231 */
11232- atomic_inc(&sun4v_nonresum_oflow_cnt);
11233+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
11234 }
11235
11236 unsigned long sun4v_err_itlb_vaddr;
11237@@ -2116,9 +2127,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
11238
11239 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
11240 regs->tpc, tl);
11241- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
11242+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
11243 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11244- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
11245+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
11246 (void *) regs->u_regs[UREG_I7]);
11247 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
11248 "pte[%lx] error[%lx]\n",
11249@@ -2140,9 +2151,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
11250
11251 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
11252 regs->tpc, tl);
11253- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
11254+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
11255 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11256- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
11257+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
11258 (void *) regs->u_regs[UREG_I7]);
11259 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
11260 "pte[%lx] error[%lx]\n",
11261@@ -2359,13 +2370,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11262 fp = (unsigned long)sf->fp + STACK_BIAS;
11263 }
11264
11265- printk(" [%016lx] %pS\n", pc, (void *) pc);
11266+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11267 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11268 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
11269 int index = tsk->curr_ret_stack;
11270 if (tsk->ret_stack && index >= graph) {
11271 pc = tsk->ret_stack[index - graph].ret;
11272- printk(" [%016lx] %pS\n", pc, (void *) pc);
11273+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11274 graph++;
11275 }
11276 }
11277@@ -2383,6 +2394,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
11278 return (struct reg_window *) (fp + STACK_BIAS);
11279 }
11280
11281+extern void gr_handle_kernel_exploit(void);
11282+
11283 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11284 {
11285 static int die_counter;
11286@@ -2411,7 +2424,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11287 while (rw &&
11288 count++ < 30 &&
11289 kstack_valid(tp, (unsigned long) rw)) {
11290- printk("Caller[%016lx]: %pS\n", rw->ins[7],
11291+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
11292 (void *) rw->ins[7]);
11293
11294 rw = kernel_stack_up(rw);
11295@@ -2424,8 +2437,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11296 }
11297 user_instruction_dump ((unsigned int __user *) regs->tpc);
11298 }
11299- if (regs->tstate & TSTATE_PRIV)
11300+ if (regs->tstate & TSTATE_PRIV) {
11301+ gr_handle_kernel_exploit();
11302 do_exit(SIGKILL);
11303+ }
11304 do_exit(SIGSEGV);
11305 }
11306 EXPORT_SYMBOL(die_if_kernel);
11307diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
11308index 62098a8..547ab2c 100644
11309--- a/arch/sparc/kernel/unaligned_64.c
11310+++ b/arch/sparc/kernel/unaligned_64.c
11311@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
11312 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
11313
11314 if (__ratelimit(&ratelimit)) {
11315- printk("Kernel unaligned access at TPC[%lx] %pS\n",
11316+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
11317 regs->tpc, (void *) regs->tpc);
11318 }
11319 }
11320diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
11321index 3269b02..64f5231 100644
11322--- a/arch/sparc/lib/Makefile
11323+++ b/arch/sparc/lib/Makefile
11324@@ -2,7 +2,7 @@
11325 #
11326
11327 asflags-y := -ansi -DST_DIV0=0x02
11328-ccflags-y := -Werror
11329+#ccflags-y := -Werror
11330
11331 lib-$(CONFIG_SPARC32) += ashrdi3.o
11332 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
11333diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
11334index 85c233d..68500e0 100644
11335--- a/arch/sparc/lib/atomic_64.S
11336+++ b/arch/sparc/lib/atomic_64.S
11337@@ -17,7 +17,12 @@
11338 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
11339 BACKOFF_SETUP(%o2)
11340 1: lduw [%o1], %g1
11341- add %g1, %o0, %g7
11342+ addcc %g1, %o0, %g7
11343+
11344+#ifdef CONFIG_PAX_REFCOUNT
11345+ tvs %icc, 6
11346+#endif
11347+
11348 cas [%o1], %g1, %g7
11349 cmp %g1, %g7
11350 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11351@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
11352 2: BACKOFF_SPIN(%o2, %o3, 1b)
11353 ENDPROC(atomic_add)
11354
11355+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11356+ BACKOFF_SETUP(%o2)
11357+1: lduw [%o1], %g1
11358+ add %g1, %o0, %g7
11359+ cas [%o1], %g1, %g7
11360+ cmp %g1, %g7
11361+ bne,pn %icc, 2f
11362+ nop
11363+ retl
11364+ nop
11365+2: BACKOFF_SPIN(%o2, %o3, 1b)
11366+ENDPROC(atomic_add_unchecked)
11367+
11368 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11369 BACKOFF_SETUP(%o2)
11370 1: lduw [%o1], %g1
11371- sub %g1, %o0, %g7
11372+ subcc %g1, %o0, %g7
11373+
11374+#ifdef CONFIG_PAX_REFCOUNT
11375+ tvs %icc, 6
11376+#endif
11377+
11378 cas [%o1], %g1, %g7
11379 cmp %g1, %g7
11380 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11381@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11382 2: BACKOFF_SPIN(%o2, %o3, 1b)
11383 ENDPROC(atomic_sub)
11384
11385+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
11386+ BACKOFF_SETUP(%o2)
11387+1: lduw [%o1], %g1
11388+ sub %g1, %o0, %g7
11389+ cas [%o1], %g1, %g7
11390+ cmp %g1, %g7
11391+ bne,pn %icc, 2f
11392+ nop
11393+ retl
11394+ nop
11395+2: BACKOFF_SPIN(%o2, %o3, 1b)
11396+ENDPROC(atomic_sub_unchecked)
11397+
11398 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11399 BACKOFF_SETUP(%o2)
11400 1: lduw [%o1], %g1
11401- add %g1, %o0, %g7
11402+ addcc %g1, %o0, %g7
11403+
11404+#ifdef CONFIG_PAX_REFCOUNT
11405+ tvs %icc, 6
11406+#endif
11407+
11408 cas [%o1], %g1, %g7
11409 cmp %g1, %g7
11410 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11411@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11412 2: BACKOFF_SPIN(%o2, %o3, 1b)
11413 ENDPROC(atomic_add_ret)
11414
11415+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11416+ BACKOFF_SETUP(%o2)
11417+1: lduw [%o1], %g1
11418+ addcc %g1, %o0, %g7
11419+ cas [%o1], %g1, %g7
11420+ cmp %g1, %g7
11421+ bne,pn %icc, 2f
11422+ add %g7, %o0, %g7
11423+ sra %g7, 0, %o0
11424+ retl
11425+ nop
11426+2: BACKOFF_SPIN(%o2, %o3, 1b)
11427+ENDPROC(atomic_add_ret_unchecked)
11428+
11429 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
11430 BACKOFF_SETUP(%o2)
11431 1: lduw [%o1], %g1
11432- sub %g1, %o0, %g7
11433+ subcc %g1, %o0, %g7
11434+
11435+#ifdef CONFIG_PAX_REFCOUNT
11436+ tvs %icc, 6
11437+#endif
11438+
11439 cas [%o1], %g1, %g7
11440 cmp %g1, %g7
11441 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11442@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
11443 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
11444 BACKOFF_SETUP(%o2)
11445 1: ldx [%o1], %g1
11446- add %g1, %o0, %g7
11447+ addcc %g1, %o0, %g7
11448+
11449+#ifdef CONFIG_PAX_REFCOUNT
11450+ tvs %xcc, 6
11451+#endif
11452+
11453 casx [%o1], %g1, %g7
11454 cmp %g1, %g7
11455 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11456@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
11457 2: BACKOFF_SPIN(%o2, %o3, 1b)
11458 ENDPROC(atomic64_add)
11459
11460+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11461+ BACKOFF_SETUP(%o2)
11462+1: ldx [%o1], %g1
11463+ addcc %g1, %o0, %g7
11464+ casx [%o1], %g1, %g7
11465+ cmp %g1, %g7
11466+ bne,pn %xcc, 2f
11467+ nop
11468+ retl
11469+ nop
11470+2: BACKOFF_SPIN(%o2, %o3, 1b)
11471+ENDPROC(atomic64_add_unchecked)
11472+
11473 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11474 BACKOFF_SETUP(%o2)
11475 1: ldx [%o1], %g1
11476- sub %g1, %o0, %g7
11477+ subcc %g1, %o0, %g7
11478+
11479+#ifdef CONFIG_PAX_REFCOUNT
11480+ tvs %xcc, 6
11481+#endif
11482+
11483 casx [%o1], %g1, %g7
11484 cmp %g1, %g7
11485 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11486@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11487 2: BACKOFF_SPIN(%o2, %o3, 1b)
11488 ENDPROC(atomic64_sub)
11489
11490+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
11491+ BACKOFF_SETUP(%o2)
11492+1: ldx [%o1], %g1
11493+ subcc %g1, %o0, %g7
11494+ casx [%o1], %g1, %g7
11495+ cmp %g1, %g7
11496+ bne,pn %xcc, 2f
11497+ nop
11498+ retl
11499+ nop
11500+2: BACKOFF_SPIN(%o2, %o3, 1b)
11501+ENDPROC(atomic64_sub_unchecked)
11502+
11503 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11504 BACKOFF_SETUP(%o2)
11505 1: ldx [%o1], %g1
11506- add %g1, %o0, %g7
11507+ addcc %g1, %o0, %g7
11508+
11509+#ifdef CONFIG_PAX_REFCOUNT
11510+ tvs %xcc, 6
11511+#endif
11512+
11513 casx [%o1], %g1, %g7
11514 cmp %g1, %g7
11515 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11516@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11517 2: BACKOFF_SPIN(%o2, %o3, 1b)
11518 ENDPROC(atomic64_add_ret)
11519
11520+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11521+ BACKOFF_SETUP(%o2)
11522+1: ldx [%o1], %g1
11523+ addcc %g1, %o0, %g7
11524+ casx [%o1], %g1, %g7
11525+ cmp %g1, %g7
11526+ bne,pn %xcc, 2f
11527+ add %g7, %o0, %g7
11528+ mov %g7, %o0
11529+ retl
11530+ nop
11531+2: BACKOFF_SPIN(%o2, %o3, 1b)
11532+ENDPROC(atomic64_add_ret_unchecked)
11533+
11534 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
11535 BACKOFF_SETUP(%o2)
11536 1: ldx [%o1], %g1
11537- sub %g1, %o0, %g7
11538+ subcc %g1, %o0, %g7
11539+
11540+#ifdef CONFIG_PAX_REFCOUNT
11541+ tvs %xcc, 6
11542+#endif
11543+
11544 casx [%o1], %g1, %g7
11545 cmp %g1, %g7
11546 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11547diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
11548index 323335b..ed85ea2 100644
11549--- a/arch/sparc/lib/ksyms.c
11550+++ b/arch/sparc/lib/ksyms.c
11551@@ -100,12 +100,18 @@ EXPORT_SYMBOL(__clear_user);
11552
11553 /* Atomic counter implementation. */
11554 EXPORT_SYMBOL(atomic_add);
11555+EXPORT_SYMBOL(atomic_add_unchecked);
11556 EXPORT_SYMBOL(atomic_add_ret);
11557+EXPORT_SYMBOL(atomic_add_ret_unchecked);
11558 EXPORT_SYMBOL(atomic_sub);
11559+EXPORT_SYMBOL(atomic_sub_unchecked);
11560 EXPORT_SYMBOL(atomic_sub_ret);
11561 EXPORT_SYMBOL(atomic64_add);
11562+EXPORT_SYMBOL(atomic64_add_unchecked);
11563 EXPORT_SYMBOL(atomic64_add_ret);
11564+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
11565 EXPORT_SYMBOL(atomic64_sub);
11566+EXPORT_SYMBOL(atomic64_sub_unchecked);
11567 EXPORT_SYMBOL(atomic64_sub_ret);
11568 EXPORT_SYMBOL(atomic64_dec_if_positive);
11569
11570diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
11571index 30c3ecc..736f015 100644
11572--- a/arch/sparc/mm/Makefile
11573+++ b/arch/sparc/mm/Makefile
11574@@ -2,7 +2,7 @@
11575 #
11576
11577 asflags-y := -ansi
11578-ccflags-y := -Werror
11579+#ccflags-y := -Werror
11580
11581 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
11582 obj-y += fault_$(BITS).o
11583diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
11584index 908e8c1..1524793 100644
11585--- a/arch/sparc/mm/fault_32.c
11586+++ b/arch/sparc/mm/fault_32.c
11587@@ -21,6 +21,9 @@
11588 #include <linux/perf_event.h>
11589 #include <linux/interrupt.h>
11590 #include <linux/kdebug.h>
11591+#include <linux/slab.h>
11592+#include <linux/pagemap.h>
11593+#include <linux/compiler.h>
11594
11595 #include <asm/page.h>
11596 #include <asm/pgtable.h>
11597@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
11598 return safe_compute_effective_address(regs, insn);
11599 }
11600
11601+#ifdef CONFIG_PAX_PAGEEXEC
11602+#ifdef CONFIG_PAX_DLRESOLVE
11603+static void pax_emuplt_close(struct vm_area_struct *vma)
11604+{
11605+ vma->vm_mm->call_dl_resolve = 0UL;
11606+}
11607+
11608+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11609+{
11610+ unsigned int *kaddr;
11611+
11612+ vmf->page = alloc_page(GFP_HIGHUSER);
11613+ if (!vmf->page)
11614+ return VM_FAULT_OOM;
11615+
11616+ kaddr = kmap(vmf->page);
11617+ memset(kaddr, 0, PAGE_SIZE);
11618+ kaddr[0] = 0x9DE3BFA8U; /* save */
11619+ flush_dcache_page(vmf->page);
11620+ kunmap(vmf->page);
11621+ return VM_FAULT_MAJOR;
11622+}
11623+
11624+static const struct vm_operations_struct pax_vm_ops = {
11625+ .close = pax_emuplt_close,
11626+ .fault = pax_emuplt_fault
11627+};
11628+
11629+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11630+{
11631+ int ret;
11632+
11633+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11634+ vma->vm_mm = current->mm;
11635+ vma->vm_start = addr;
11636+ vma->vm_end = addr + PAGE_SIZE;
11637+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11638+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11639+ vma->vm_ops = &pax_vm_ops;
11640+
11641+ ret = insert_vm_struct(current->mm, vma);
11642+ if (ret)
11643+ return ret;
11644+
11645+ ++current->mm->total_vm;
11646+ return 0;
11647+}
11648+#endif
11649+
11650+/*
11651+ * PaX: decide what to do with offenders (regs->pc = fault address)
11652+ *
11653+ * returns 1 when task should be killed
11654+ * 2 when patched PLT trampoline was detected
11655+ * 3 when unpatched PLT trampoline was detected
11656+ */
11657+static int pax_handle_fetch_fault(struct pt_regs *regs)
11658+{
11659+
11660+#ifdef CONFIG_PAX_EMUPLT
11661+ int err;
11662+
11663+ do { /* PaX: patched PLT emulation #1 */
11664+ unsigned int sethi1, sethi2, jmpl;
11665+
11666+ err = get_user(sethi1, (unsigned int *)regs->pc);
11667+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
11668+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
11669+
11670+ if (err)
11671+ break;
11672+
11673+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11674+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11675+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11676+ {
11677+ unsigned int addr;
11678+
11679+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11680+ addr = regs->u_regs[UREG_G1];
11681+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11682+ regs->pc = addr;
11683+ regs->npc = addr+4;
11684+ return 2;
11685+ }
11686+ } while (0);
11687+
11688+ do { /* PaX: patched PLT emulation #2 */
11689+ unsigned int ba;
11690+
11691+ err = get_user(ba, (unsigned int *)regs->pc);
11692+
11693+ if (err)
11694+ break;
11695+
11696+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11697+ unsigned int addr;
11698+
11699+ if ((ba & 0xFFC00000U) == 0x30800000U)
11700+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11701+ else
11702+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11703+ regs->pc = addr;
11704+ regs->npc = addr+4;
11705+ return 2;
11706+ }
11707+ } while (0);
11708+
11709+ do { /* PaX: patched PLT emulation #3 */
11710+ unsigned int sethi, bajmpl, nop;
11711+
11712+ err = get_user(sethi, (unsigned int *)regs->pc);
11713+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
11714+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11715+
11716+ if (err)
11717+ break;
11718+
11719+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11720+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11721+ nop == 0x01000000U)
11722+ {
11723+ unsigned int addr;
11724+
11725+ addr = (sethi & 0x003FFFFFU) << 10;
11726+ regs->u_regs[UREG_G1] = addr;
11727+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11728+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11729+ else
11730+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11731+ regs->pc = addr;
11732+ regs->npc = addr+4;
11733+ return 2;
11734+ }
11735+ } while (0);
11736+
11737+ do { /* PaX: unpatched PLT emulation step 1 */
11738+ unsigned int sethi, ba, nop;
11739+
11740+ err = get_user(sethi, (unsigned int *)regs->pc);
11741+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
11742+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11743+
11744+ if (err)
11745+ break;
11746+
11747+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11748+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11749+ nop == 0x01000000U)
11750+ {
11751+ unsigned int addr, save, call;
11752+
11753+ if ((ba & 0xFFC00000U) == 0x30800000U)
11754+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11755+ else
11756+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11757+
11758+ err = get_user(save, (unsigned int *)addr);
11759+ err |= get_user(call, (unsigned int *)(addr+4));
11760+ err |= get_user(nop, (unsigned int *)(addr+8));
11761+ if (err)
11762+ break;
11763+
11764+#ifdef CONFIG_PAX_DLRESOLVE
11765+ if (save == 0x9DE3BFA8U &&
11766+ (call & 0xC0000000U) == 0x40000000U &&
11767+ nop == 0x01000000U)
11768+ {
11769+ struct vm_area_struct *vma;
11770+ unsigned long call_dl_resolve;
11771+
11772+ down_read(&current->mm->mmap_sem);
11773+ call_dl_resolve = current->mm->call_dl_resolve;
11774+ up_read(&current->mm->mmap_sem);
11775+ if (likely(call_dl_resolve))
11776+ goto emulate;
11777+
11778+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11779+
11780+ down_write(&current->mm->mmap_sem);
11781+ if (current->mm->call_dl_resolve) {
11782+ call_dl_resolve = current->mm->call_dl_resolve;
11783+ up_write(&current->mm->mmap_sem);
11784+ if (vma)
11785+ kmem_cache_free(vm_area_cachep, vma);
11786+ goto emulate;
11787+ }
11788+
11789+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11790+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11791+ up_write(&current->mm->mmap_sem);
11792+ if (vma)
11793+ kmem_cache_free(vm_area_cachep, vma);
11794+ return 1;
11795+ }
11796+
11797+ if (pax_insert_vma(vma, call_dl_resolve)) {
11798+ up_write(&current->mm->mmap_sem);
11799+ kmem_cache_free(vm_area_cachep, vma);
11800+ return 1;
11801+ }
11802+
11803+ current->mm->call_dl_resolve = call_dl_resolve;
11804+ up_write(&current->mm->mmap_sem);
11805+
11806+emulate:
11807+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11808+ regs->pc = call_dl_resolve;
11809+ regs->npc = addr+4;
11810+ return 3;
11811+ }
11812+#endif
11813+
11814+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11815+ if ((save & 0xFFC00000U) == 0x05000000U &&
11816+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11817+ nop == 0x01000000U)
11818+ {
11819+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11820+ regs->u_regs[UREG_G2] = addr + 4;
11821+ addr = (save & 0x003FFFFFU) << 10;
11822+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11823+ regs->pc = addr;
11824+ regs->npc = addr+4;
11825+ return 3;
11826+ }
11827+ }
11828+ } while (0);
11829+
11830+ do { /* PaX: unpatched PLT emulation step 2 */
11831+ unsigned int save, call, nop;
11832+
11833+ err = get_user(save, (unsigned int *)(regs->pc-4));
11834+ err |= get_user(call, (unsigned int *)regs->pc);
11835+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
11836+ if (err)
11837+ break;
11838+
11839+ if (save == 0x9DE3BFA8U &&
11840+ (call & 0xC0000000U) == 0x40000000U &&
11841+ nop == 0x01000000U)
11842+ {
11843+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
11844+
11845+ regs->u_regs[UREG_RETPC] = regs->pc;
11846+ regs->pc = dl_resolve;
11847+ regs->npc = dl_resolve+4;
11848+ return 3;
11849+ }
11850+ } while (0);
11851+#endif
11852+
11853+ return 1;
11854+}
11855+
11856+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11857+{
11858+ unsigned long i;
11859+
11860+ printk(KERN_ERR "PAX: bytes at PC: ");
11861+ for (i = 0; i < 8; i++) {
11862+ unsigned int c;
11863+ if (get_user(c, (unsigned int *)pc+i))
11864+ printk(KERN_CONT "???????? ");
11865+ else
11866+ printk(KERN_CONT "%08x ", c);
11867+ }
11868+ printk("\n");
11869+}
11870+#endif
11871+
11872 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
11873 int text_fault)
11874 {
11875@@ -226,6 +500,24 @@ good_area:
11876 if (!(vma->vm_flags & VM_WRITE))
11877 goto bad_area;
11878 } else {
11879+
11880+#ifdef CONFIG_PAX_PAGEEXEC
11881+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
11882+ up_read(&mm->mmap_sem);
11883+ switch (pax_handle_fetch_fault(regs)) {
11884+
11885+#ifdef CONFIG_PAX_EMUPLT
11886+ case 2:
11887+ case 3:
11888+ return;
11889+#endif
11890+
11891+ }
11892+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
11893+ do_group_exit(SIGKILL);
11894+ }
11895+#endif
11896+
11897 /* Allow reads even for write-only mappings */
11898 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
11899 goto bad_area;
11900diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
11901index 587cd05..fbdf17a 100644
11902--- a/arch/sparc/mm/fault_64.c
11903+++ b/arch/sparc/mm/fault_64.c
11904@@ -22,6 +22,9 @@
11905 #include <linux/kdebug.h>
11906 #include <linux/percpu.h>
11907 #include <linux/context_tracking.h>
11908+#include <linux/slab.h>
11909+#include <linux/pagemap.h>
11910+#include <linux/compiler.h>
11911
11912 #include <asm/page.h>
11913 #include <asm/pgtable.h>
11914@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
11915 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
11916 regs->tpc);
11917 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
11918- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
11919+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
11920 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
11921 dump_stack();
11922 unhandled_fault(regs->tpc, current, regs);
11923@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
11924 show_regs(regs);
11925 }
11926
11927+#ifdef CONFIG_PAX_PAGEEXEC
11928+#ifdef CONFIG_PAX_DLRESOLVE
11929+static void pax_emuplt_close(struct vm_area_struct *vma)
11930+{
11931+ vma->vm_mm->call_dl_resolve = 0UL;
11932+}
11933+
11934+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11935+{
11936+ unsigned int *kaddr;
11937+
11938+ vmf->page = alloc_page(GFP_HIGHUSER);
11939+ if (!vmf->page)
11940+ return VM_FAULT_OOM;
11941+
11942+ kaddr = kmap(vmf->page);
11943+ memset(kaddr, 0, PAGE_SIZE);
11944+ kaddr[0] = 0x9DE3BFA8U; /* save */
11945+ flush_dcache_page(vmf->page);
11946+ kunmap(vmf->page);
11947+ return VM_FAULT_MAJOR;
11948+}
11949+
11950+static const struct vm_operations_struct pax_vm_ops = {
11951+ .close = pax_emuplt_close,
11952+ .fault = pax_emuplt_fault
11953+};
11954+
11955+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11956+{
11957+ int ret;
11958+
11959+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11960+ vma->vm_mm = current->mm;
11961+ vma->vm_start = addr;
11962+ vma->vm_end = addr + PAGE_SIZE;
11963+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11964+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11965+ vma->vm_ops = &pax_vm_ops;
11966+
11967+ ret = insert_vm_struct(current->mm, vma);
11968+ if (ret)
11969+ return ret;
11970+
11971+ ++current->mm->total_vm;
11972+ return 0;
11973+}
11974+#endif
11975+
11976+/*
11977+ * PaX: decide what to do with offenders (regs->tpc = fault address)
11978+ *
11979+ * returns 1 when task should be killed
11980+ * 2 when patched PLT trampoline was detected
11981+ * 3 when unpatched PLT trampoline was detected
11982+ */
11983+static int pax_handle_fetch_fault(struct pt_regs *regs)
11984+{
11985+
11986+#ifdef CONFIG_PAX_EMUPLT
11987+ int err;
11988+
11989+ do { /* PaX: patched PLT emulation #1 */
11990+ unsigned int sethi1, sethi2, jmpl;
11991+
11992+ err = get_user(sethi1, (unsigned int *)regs->tpc);
11993+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
11994+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
11995+
11996+ if (err)
11997+ break;
11998+
11999+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
12000+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
12001+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
12002+ {
12003+ unsigned long addr;
12004+
12005+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
12006+ addr = regs->u_regs[UREG_G1];
12007+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12008+
12009+ if (test_thread_flag(TIF_32BIT))
12010+ addr &= 0xFFFFFFFFUL;
12011+
12012+ regs->tpc = addr;
12013+ regs->tnpc = addr+4;
12014+ return 2;
12015+ }
12016+ } while (0);
12017+
12018+ do { /* PaX: patched PLT emulation #2 */
12019+ unsigned int ba;
12020+
12021+ err = get_user(ba, (unsigned int *)regs->tpc);
12022+
12023+ if (err)
12024+ break;
12025+
12026+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
12027+ unsigned long addr;
12028+
12029+ if ((ba & 0xFFC00000U) == 0x30800000U)
12030+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
12031+ else
12032+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12033+
12034+ if (test_thread_flag(TIF_32BIT))
12035+ addr &= 0xFFFFFFFFUL;
12036+
12037+ regs->tpc = addr;
12038+ regs->tnpc = addr+4;
12039+ return 2;
12040+ }
12041+ } while (0);
12042+
12043+ do { /* PaX: patched PLT emulation #3 */
12044+ unsigned int sethi, bajmpl, nop;
12045+
12046+ err = get_user(sethi, (unsigned int *)regs->tpc);
12047+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
12048+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12049+
12050+ if (err)
12051+ break;
12052+
12053+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12054+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
12055+ nop == 0x01000000U)
12056+ {
12057+ unsigned long addr;
12058+
12059+ addr = (sethi & 0x003FFFFFU) << 10;
12060+ regs->u_regs[UREG_G1] = addr;
12061+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
12062+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12063+ else
12064+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12065+
12066+ if (test_thread_flag(TIF_32BIT))
12067+ addr &= 0xFFFFFFFFUL;
12068+
12069+ regs->tpc = addr;
12070+ regs->tnpc = addr+4;
12071+ return 2;
12072+ }
12073+ } while (0);
12074+
12075+ do { /* PaX: patched PLT emulation #4 */
12076+ unsigned int sethi, mov1, call, mov2;
12077+
12078+ err = get_user(sethi, (unsigned int *)regs->tpc);
12079+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
12080+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
12081+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
12082+
12083+ if (err)
12084+ break;
12085+
12086+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12087+ mov1 == 0x8210000FU &&
12088+ (call & 0xC0000000U) == 0x40000000U &&
12089+ mov2 == 0x9E100001U)
12090+ {
12091+ unsigned long addr;
12092+
12093+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
12094+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12095+
12096+ if (test_thread_flag(TIF_32BIT))
12097+ addr &= 0xFFFFFFFFUL;
12098+
12099+ regs->tpc = addr;
12100+ regs->tnpc = addr+4;
12101+ return 2;
12102+ }
12103+ } while (0);
12104+
12105+ do { /* PaX: patched PLT emulation #5 */
12106+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
12107+
12108+ err = get_user(sethi, (unsigned int *)regs->tpc);
12109+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
12110+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
12111+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
12112+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
12113+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
12114+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
12115+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
12116+
12117+ if (err)
12118+ break;
12119+
12120+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12121+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
12122+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12123+ (or1 & 0xFFFFE000U) == 0x82106000U &&
12124+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12125+ sllx == 0x83287020U &&
12126+ jmpl == 0x81C04005U &&
12127+ nop == 0x01000000U)
12128+ {
12129+ unsigned long addr;
12130+
12131+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12132+ regs->u_regs[UREG_G1] <<= 32;
12133+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12134+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
12135+ regs->tpc = addr;
12136+ regs->tnpc = addr+4;
12137+ return 2;
12138+ }
12139+ } while (0);
12140+
12141+ do { /* PaX: patched PLT emulation #6 */
12142+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
12143+
12144+ err = get_user(sethi, (unsigned int *)regs->tpc);
12145+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
12146+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
12147+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
12148+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
12149+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
12150+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
12151+
12152+ if (err)
12153+ break;
12154+
12155+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12156+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
12157+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12158+ sllx == 0x83287020U &&
12159+ (or & 0xFFFFE000U) == 0x8A116000U &&
12160+ jmpl == 0x81C04005U &&
12161+ nop == 0x01000000U)
12162+ {
12163+ unsigned long addr;
12164+
12165+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
12166+ regs->u_regs[UREG_G1] <<= 32;
12167+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
12168+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
12169+ regs->tpc = addr;
12170+ regs->tnpc = addr+4;
12171+ return 2;
12172+ }
12173+ } while (0);
12174+
12175+ do { /* PaX: unpatched PLT emulation step 1 */
12176+ unsigned int sethi, ba, nop;
12177+
12178+ err = get_user(sethi, (unsigned int *)regs->tpc);
12179+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12180+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12181+
12182+ if (err)
12183+ break;
12184+
12185+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12186+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
12187+ nop == 0x01000000U)
12188+ {
12189+ unsigned long addr;
12190+ unsigned int save, call;
12191+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
12192+
12193+ if ((ba & 0xFFC00000U) == 0x30800000U)
12194+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
12195+ else
12196+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12197+
12198+ if (test_thread_flag(TIF_32BIT))
12199+ addr &= 0xFFFFFFFFUL;
12200+
12201+ err = get_user(save, (unsigned int *)addr);
12202+ err |= get_user(call, (unsigned int *)(addr+4));
12203+ err |= get_user(nop, (unsigned int *)(addr+8));
12204+ if (err)
12205+ break;
12206+
12207+#ifdef CONFIG_PAX_DLRESOLVE
12208+ if (save == 0x9DE3BFA8U &&
12209+ (call & 0xC0000000U) == 0x40000000U &&
12210+ nop == 0x01000000U)
12211+ {
12212+ struct vm_area_struct *vma;
12213+ unsigned long call_dl_resolve;
12214+
12215+ down_read(&current->mm->mmap_sem);
12216+ call_dl_resolve = current->mm->call_dl_resolve;
12217+ up_read(&current->mm->mmap_sem);
12218+ if (likely(call_dl_resolve))
12219+ goto emulate;
12220+
12221+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
12222+
12223+ down_write(&current->mm->mmap_sem);
12224+ if (current->mm->call_dl_resolve) {
12225+ call_dl_resolve = current->mm->call_dl_resolve;
12226+ up_write(&current->mm->mmap_sem);
12227+ if (vma)
12228+ kmem_cache_free(vm_area_cachep, vma);
12229+ goto emulate;
12230+ }
12231+
12232+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
12233+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
12234+ up_write(&current->mm->mmap_sem);
12235+ if (vma)
12236+ kmem_cache_free(vm_area_cachep, vma);
12237+ return 1;
12238+ }
12239+
12240+ if (pax_insert_vma(vma, call_dl_resolve)) {
12241+ up_write(&current->mm->mmap_sem);
12242+ kmem_cache_free(vm_area_cachep, vma);
12243+ return 1;
12244+ }
12245+
12246+ current->mm->call_dl_resolve = call_dl_resolve;
12247+ up_write(&current->mm->mmap_sem);
12248+
12249+emulate:
12250+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12251+ regs->tpc = call_dl_resolve;
12252+ regs->tnpc = addr+4;
12253+ return 3;
12254+ }
12255+#endif
12256+
12257+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
12258+ if ((save & 0xFFC00000U) == 0x05000000U &&
12259+ (call & 0xFFFFE000U) == 0x85C0A000U &&
12260+ nop == 0x01000000U)
12261+ {
12262+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12263+ regs->u_regs[UREG_G2] = addr + 4;
12264+ addr = (save & 0x003FFFFFU) << 10;
12265+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12266+
12267+ if (test_thread_flag(TIF_32BIT))
12268+ addr &= 0xFFFFFFFFUL;
12269+
12270+ regs->tpc = addr;
12271+ regs->tnpc = addr+4;
12272+ return 3;
12273+ }
12274+
12275+ /* PaX: 64-bit PLT stub */
12276+ err = get_user(sethi1, (unsigned int *)addr);
12277+ err |= get_user(sethi2, (unsigned int *)(addr+4));
12278+ err |= get_user(or1, (unsigned int *)(addr+8));
12279+ err |= get_user(or2, (unsigned int *)(addr+12));
12280+ err |= get_user(sllx, (unsigned int *)(addr+16));
12281+ err |= get_user(add, (unsigned int *)(addr+20));
12282+ err |= get_user(jmpl, (unsigned int *)(addr+24));
12283+ err |= get_user(nop, (unsigned int *)(addr+28));
12284+ if (err)
12285+ break;
12286+
12287+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
12288+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12289+ (or1 & 0xFFFFE000U) == 0x88112000U &&
12290+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12291+ sllx == 0x89293020U &&
12292+ add == 0x8A010005U &&
12293+ jmpl == 0x89C14000U &&
12294+ nop == 0x01000000U)
12295+ {
12296+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12297+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12298+ regs->u_regs[UREG_G4] <<= 32;
12299+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12300+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
12301+ regs->u_regs[UREG_G4] = addr + 24;
12302+ addr = regs->u_regs[UREG_G5];
12303+ regs->tpc = addr;
12304+ regs->tnpc = addr+4;
12305+ return 3;
12306+ }
12307+ }
12308+ } while (0);
12309+
12310+#ifdef CONFIG_PAX_DLRESOLVE
12311+ do { /* PaX: unpatched PLT emulation step 2 */
12312+ unsigned int save, call, nop;
12313+
12314+ err = get_user(save, (unsigned int *)(regs->tpc-4));
12315+ err |= get_user(call, (unsigned int *)regs->tpc);
12316+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
12317+ if (err)
12318+ break;
12319+
12320+ if (save == 0x9DE3BFA8U &&
12321+ (call & 0xC0000000U) == 0x40000000U &&
12322+ nop == 0x01000000U)
12323+ {
12324+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12325+
12326+ if (test_thread_flag(TIF_32BIT))
12327+ dl_resolve &= 0xFFFFFFFFUL;
12328+
12329+ regs->u_regs[UREG_RETPC] = regs->tpc;
12330+ regs->tpc = dl_resolve;
12331+ regs->tnpc = dl_resolve+4;
12332+ return 3;
12333+ }
12334+ } while (0);
12335+#endif
12336+
12337+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
12338+ unsigned int sethi, ba, nop;
12339+
12340+ err = get_user(sethi, (unsigned int *)regs->tpc);
12341+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12342+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12343+
12344+ if (err)
12345+ break;
12346+
12347+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12348+ (ba & 0xFFF00000U) == 0x30600000U &&
12349+ nop == 0x01000000U)
12350+ {
12351+ unsigned long addr;
12352+
12353+ addr = (sethi & 0x003FFFFFU) << 10;
12354+ regs->u_regs[UREG_G1] = addr;
12355+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12356+
12357+ if (test_thread_flag(TIF_32BIT))
12358+ addr &= 0xFFFFFFFFUL;
12359+
12360+ regs->tpc = addr;
12361+ regs->tnpc = addr+4;
12362+ return 2;
12363+ }
12364+ } while (0);
12365+
12366+#endif
12367+
12368+ return 1;
12369+}
12370+
12371+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12372+{
12373+ unsigned long i;
12374+
12375+ printk(KERN_ERR "PAX: bytes at PC: ");
12376+ for (i = 0; i < 8; i++) {
12377+ unsigned int c;
12378+ if (get_user(c, (unsigned int *)pc+i))
12379+ printk(KERN_CONT "???????? ");
12380+ else
12381+ printk(KERN_CONT "%08x ", c);
12382+ }
12383+ printk("\n");
12384+}
12385+#endif
12386+
12387 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12388 {
12389 enum ctx_state prev_state = exception_enter();
12390@@ -350,6 +813,29 @@ retry:
12391 if (!vma)
12392 goto bad_area;
12393
12394+#ifdef CONFIG_PAX_PAGEEXEC
12395+ /* PaX: detect ITLB misses on non-exec pages */
12396+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
12397+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
12398+ {
12399+ if (address != regs->tpc)
12400+ goto good_area;
12401+
12402+ up_read(&mm->mmap_sem);
12403+ switch (pax_handle_fetch_fault(regs)) {
12404+
12405+#ifdef CONFIG_PAX_EMUPLT
12406+ case 2:
12407+ case 3:
12408+ return;
12409+#endif
12410+
12411+ }
12412+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
12413+ do_group_exit(SIGKILL);
12414+ }
12415+#endif
12416+
12417 /* Pure DTLB misses do not tell us whether the fault causing
12418 * load/store/atomic was a write or not, it only says that there
12419 * was no match. So in such a case we (carefully) read the
12420diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
12421index d329537..2c3746a 100644
12422--- a/arch/sparc/mm/hugetlbpage.c
12423+++ b/arch/sparc/mm/hugetlbpage.c
12424@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12425 unsigned long addr,
12426 unsigned long len,
12427 unsigned long pgoff,
12428- unsigned long flags)
12429+ unsigned long flags,
12430+ unsigned long offset)
12431 {
12432+ struct mm_struct *mm = current->mm;
12433 unsigned long task_size = TASK_SIZE;
12434 struct vm_unmapped_area_info info;
12435
12436@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12437
12438 info.flags = 0;
12439 info.length = len;
12440- info.low_limit = TASK_UNMAPPED_BASE;
12441+ info.low_limit = mm->mmap_base;
12442 info.high_limit = min(task_size, VA_EXCLUDE_START);
12443 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12444 info.align_offset = 0;
12445+ info.threadstack_offset = offset;
12446 addr = vm_unmapped_area(&info);
12447
12448 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
12449 VM_BUG_ON(addr != -ENOMEM);
12450 info.low_limit = VA_EXCLUDE_END;
12451+
12452+#ifdef CONFIG_PAX_RANDMMAP
12453+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12454+ info.low_limit += mm->delta_mmap;
12455+#endif
12456+
12457 info.high_limit = task_size;
12458 addr = vm_unmapped_area(&info);
12459 }
12460@@ -55,7 +64,8 @@ static unsigned long
12461 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12462 const unsigned long len,
12463 const unsigned long pgoff,
12464- const unsigned long flags)
12465+ const unsigned long flags,
12466+ const unsigned long offset)
12467 {
12468 struct mm_struct *mm = current->mm;
12469 unsigned long addr = addr0;
12470@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12471 info.high_limit = mm->mmap_base;
12472 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12473 info.align_offset = 0;
12474+ info.threadstack_offset = offset;
12475 addr = vm_unmapped_area(&info);
12476
12477 /*
12478@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12479 VM_BUG_ON(addr != -ENOMEM);
12480 info.flags = 0;
12481 info.low_limit = TASK_UNMAPPED_BASE;
12482+
12483+#ifdef CONFIG_PAX_RANDMMAP
12484+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12485+ info.low_limit += mm->delta_mmap;
12486+#endif
12487+
12488 info.high_limit = STACK_TOP32;
12489 addr = vm_unmapped_area(&info);
12490 }
12491@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12492 struct mm_struct *mm = current->mm;
12493 struct vm_area_struct *vma;
12494 unsigned long task_size = TASK_SIZE;
12495+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
12496
12497 if (test_thread_flag(TIF_32BIT))
12498 task_size = STACK_TOP32;
12499@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12500 return addr;
12501 }
12502
12503+#ifdef CONFIG_PAX_RANDMMAP
12504+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
12505+#endif
12506+
12507 if (addr) {
12508 addr = ALIGN(addr, HPAGE_SIZE);
12509 vma = find_vma(mm, addr);
12510- if (task_size - len >= addr &&
12511- (!vma || addr + len <= vma->vm_start))
12512+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12513 return addr;
12514 }
12515 if (mm->get_unmapped_area == arch_get_unmapped_area)
12516 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12517- pgoff, flags);
12518+ pgoff, flags, offset);
12519 else
12520 return hugetlb_get_unmapped_area_topdown(file, addr, len,
12521- pgoff, flags);
12522+ pgoff, flags, offset);
12523 }
12524
12525 pte_t *huge_pte_alloc(struct mm_struct *mm,
12526diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
12527index 2cfb0f2..e917d9f 100644
12528--- a/arch/sparc/mm/init_64.c
12529+++ b/arch/sparc/mm/init_64.c
12530@@ -189,9 +189,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
12531 int num_kernel_image_mappings;
12532
12533 #ifdef CONFIG_DEBUG_DCFLUSH
12534-atomic_t dcpage_flushes = ATOMIC_INIT(0);
12535+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
12536 #ifdef CONFIG_SMP
12537-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12538+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12539 #endif
12540 #endif
12541
12542@@ -199,7 +199,7 @@ inline void flush_dcache_page_impl(struct page *page)
12543 {
12544 BUG_ON(tlb_type == hypervisor);
12545 #ifdef CONFIG_DEBUG_DCFLUSH
12546- atomic_inc(&dcpage_flushes);
12547+ atomic_inc_unchecked(&dcpage_flushes);
12548 #endif
12549
12550 #ifdef DCACHE_ALIASING_POSSIBLE
12551@@ -471,10 +471,10 @@ void mmu_info(struct seq_file *m)
12552
12553 #ifdef CONFIG_DEBUG_DCFLUSH
12554 seq_printf(m, "DCPageFlushes\t: %d\n",
12555- atomic_read(&dcpage_flushes));
12556+ atomic_read_unchecked(&dcpage_flushes));
12557 #ifdef CONFIG_SMP
12558 seq_printf(m, "DCPageFlushesXC\t: %d\n",
12559- atomic_read(&dcpage_flushes_xcall));
12560+ atomic_read_unchecked(&dcpage_flushes_xcall));
12561 #endif /* CONFIG_SMP */
12562 #endif /* CONFIG_DEBUG_DCFLUSH */
12563 }
12564diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
12565index 4f3006b..453f625f 100644
12566--- a/arch/tile/Kconfig
12567+++ b/arch/tile/Kconfig
12568@@ -192,6 +192,7 @@ source "kernel/Kconfig.hz"
12569
12570 config KEXEC
12571 bool "kexec system call"
12572+ depends on !GRKERNSEC_KMEM
12573 ---help---
12574 kexec is a system call that implements the ability to shutdown your
12575 current kernel, and to start another kernel. It is like a reboot
12576diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
12577index 7b11c5f..755a026 100644
12578--- a/arch/tile/include/asm/atomic_64.h
12579+++ b/arch/tile/include/asm/atomic_64.h
12580@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
12581
12582 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12583
12584+#define atomic64_read_unchecked(v) atomic64_read(v)
12585+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
12586+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
12587+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
12588+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
12589+#define atomic64_inc_unchecked(v) atomic64_inc(v)
12590+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
12591+#define atomic64_dec_unchecked(v) atomic64_dec(v)
12592+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
12593+
12594 /* Define this to indicate that cmpxchg is an efficient operation. */
12595 #define __HAVE_ARCH_CMPXCHG
12596
12597diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
12598index 6160761..00cac88 100644
12599--- a/arch/tile/include/asm/cache.h
12600+++ b/arch/tile/include/asm/cache.h
12601@@ -15,11 +15,12 @@
12602 #ifndef _ASM_TILE_CACHE_H
12603 #define _ASM_TILE_CACHE_H
12604
12605+#include <linux/const.h>
12606 #include <arch/chip.h>
12607
12608 /* bytes per L1 data cache line */
12609 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
12610-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12611+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12612
12613 /* bytes per L2 cache line */
12614 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
12615diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
12616index b6cde32..c0cb736 100644
12617--- a/arch/tile/include/asm/uaccess.h
12618+++ b/arch/tile/include/asm/uaccess.h
12619@@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
12620 const void __user *from,
12621 unsigned long n)
12622 {
12623- int sz = __compiletime_object_size(to);
12624+ size_t sz = __compiletime_object_size(to);
12625
12626- if (likely(sz == -1 || sz >= n))
12627+ if (likely(sz == (size_t)-1 || sz >= n))
12628 n = _copy_from_user(to, from, n);
12629 else
12630 copy_from_user_overflow();
12631diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
12632index e514899..f8743c4 100644
12633--- a/arch/tile/mm/hugetlbpage.c
12634+++ b/arch/tile/mm/hugetlbpage.c
12635@@ -207,6 +207,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
12636 info.high_limit = TASK_SIZE;
12637 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12638 info.align_offset = 0;
12639+ info.threadstack_offset = 0;
12640 return vm_unmapped_area(&info);
12641 }
12642
12643@@ -224,6 +225,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
12644 info.high_limit = current->mm->mmap_base;
12645 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12646 info.align_offset = 0;
12647+ info.threadstack_offset = 0;
12648 addr = vm_unmapped_area(&info);
12649
12650 /*
12651diff --git a/arch/um/Makefile b/arch/um/Makefile
12652index e4b1a96..16162f8 100644
12653--- a/arch/um/Makefile
12654+++ b/arch/um/Makefile
12655@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
12656 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
12657 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
12658
12659+ifdef CONSTIFY_PLUGIN
12660+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12661+endif
12662+
12663 #This will adjust *FLAGS accordingly to the platform.
12664 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
12665
12666diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
12667index 19e1bdd..3665b77 100644
12668--- a/arch/um/include/asm/cache.h
12669+++ b/arch/um/include/asm/cache.h
12670@@ -1,6 +1,7 @@
12671 #ifndef __UM_CACHE_H
12672 #define __UM_CACHE_H
12673
12674+#include <linux/const.h>
12675
12676 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
12677 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12678@@ -12,6 +13,6 @@
12679 # define L1_CACHE_SHIFT 5
12680 #endif
12681
12682-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12683+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12684
12685 #endif
12686diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
12687index 2e0a6b1..a64d0f5 100644
12688--- a/arch/um/include/asm/kmap_types.h
12689+++ b/arch/um/include/asm/kmap_types.h
12690@@ -8,6 +8,6 @@
12691
12692 /* No more #include "asm/arch/kmap_types.h" ! */
12693
12694-#define KM_TYPE_NR 14
12695+#define KM_TYPE_NR 15
12696
12697 #endif
12698diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
12699index 5ff53d9..5850cdf 100644
12700--- a/arch/um/include/asm/page.h
12701+++ b/arch/um/include/asm/page.h
12702@@ -14,6 +14,9 @@
12703 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
12704 #define PAGE_MASK (~(PAGE_SIZE-1))
12705
12706+#define ktla_ktva(addr) (addr)
12707+#define ktva_ktla(addr) (addr)
12708+
12709 #ifndef __ASSEMBLY__
12710
12711 struct page;
12712diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
12713index 0032f92..cd151e0 100644
12714--- a/arch/um/include/asm/pgtable-3level.h
12715+++ b/arch/um/include/asm/pgtable-3level.h
12716@@ -58,6 +58,7 @@
12717 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
12718 #define pud_populate(mm, pud, pmd) \
12719 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
12720+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
12721
12722 #ifdef CONFIG_64BIT
12723 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
12724diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
12725index f17bca8..48adb87 100644
12726--- a/arch/um/kernel/process.c
12727+++ b/arch/um/kernel/process.c
12728@@ -356,22 +356,6 @@ int singlestepping(void * t)
12729 return 2;
12730 }
12731
12732-/*
12733- * Only x86 and x86_64 have an arch_align_stack().
12734- * All other arches have "#define arch_align_stack(x) (x)"
12735- * in their asm/exec.h
12736- * As this is included in UML from asm-um/system-generic.h,
12737- * we can use it to behave as the subarch does.
12738- */
12739-#ifndef arch_align_stack
12740-unsigned long arch_align_stack(unsigned long sp)
12741-{
12742- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
12743- sp -= get_random_int() % 8192;
12744- return sp & ~0xf;
12745-}
12746-#endif
12747-
12748 unsigned long get_wchan(struct task_struct *p)
12749 {
12750 unsigned long stack_page, sp, ip;
12751diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
12752index ad8f795..2c7eec6 100644
12753--- a/arch/unicore32/include/asm/cache.h
12754+++ b/arch/unicore32/include/asm/cache.h
12755@@ -12,8 +12,10 @@
12756 #ifndef __UNICORE_CACHE_H__
12757 #define __UNICORE_CACHE_H__
12758
12759-#define L1_CACHE_SHIFT (5)
12760-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12761+#include <linux/const.h>
12762+
12763+#define L1_CACHE_SHIFT 5
12764+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12765
12766 /*
12767 * Memory returned by kmalloc() may be used for DMA, so we must make
12768diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
12769index 27adfd9..bc3551d 100644
12770--- a/arch/x86/Kconfig
12771+++ b/arch/x86/Kconfig
12772@@ -128,7 +128,7 @@ config X86
12773 select RTC_LIB
12774 select HAVE_DEBUG_STACKOVERFLOW
12775 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
12776- select HAVE_CC_STACKPROTECTOR
12777+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
12778 select GENERIC_CPU_AUTOPROBE
12779 select HAVE_ARCH_AUDITSYSCALL
12780 select ARCH_SUPPORTS_ATOMIC_RMW
12781@@ -253,7 +253,7 @@ config X86_HT
12782
12783 config X86_32_LAZY_GS
12784 def_bool y
12785- depends on X86_32 && !CC_STACKPROTECTOR
12786+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
12787
12788 config ARCH_HWEIGHT_CFLAGS
12789 string
12790@@ -549,6 +549,7 @@ config SCHED_OMIT_FRAME_POINTER
12791
12792 menuconfig HYPERVISOR_GUEST
12793 bool "Linux guest support"
12794+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
12795 ---help---
12796 Say Y here to enable options for running Linux under various hyper-
12797 visors. This option enables basic hypervisor detection and platform
12798@@ -1076,6 +1077,7 @@ choice
12799
12800 config NOHIGHMEM
12801 bool "off"
12802+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12803 ---help---
12804 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
12805 However, the address space of 32-bit x86 processors is only 4
12806@@ -1112,6 +1114,7 @@ config NOHIGHMEM
12807
12808 config HIGHMEM4G
12809 bool "4GB"
12810+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12811 ---help---
12812 Select this if you have a 32-bit processor and between 1 and 4
12813 gigabytes of physical RAM.
12814@@ -1164,7 +1167,7 @@ config PAGE_OFFSET
12815 hex
12816 default 0xB0000000 if VMSPLIT_3G_OPT
12817 default 0x80000000 if VMSPLIT_2G
12818- default 0x78000000 if VMSPLIT_2G_OPT
12819+ default 0x70000000 if VMSPLIT_2G_OPT
12820 default 0x40000000 if VMSPLIT_1G
12821 default 0xC0000000
12822 depends on X86_32
12823@@ -1578,6 +1581,7 @@ source kernel/Kconfig.hz
12824
12825 config KEXEC
12826 bool "kexec system call"
12827+ depends on !GRKERNSEC_KMEM
12828 ---help---
12829 kexec is a system call that implements the ability to shutdown your
12830 current kernel, and to start another kernel. It is like a reboot
12831@@ -1728,7 +1732,9 @@ config X86_NEED_RELOCS
12832
12833 config PHYSICAL_ALIGN
12834 hex "Alignment value to which kernel should be aligned"
12835- default "0x200000"
12836+ default "0x1000000"
12837+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
12838+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
12839 range 0x2000 0x1000000 if X86_32
12840 range 0x200000 0x1000000 if X86_64
12841 ---help---
12842@@ -1811,6 +1817,7 @@ config COMPAT_VDSO
12843 def_bool n
12844 prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
12845 depends on X86_32 || IA32_EMULATION
12846+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
12847 ---help---
12848 Certain buggy versions of glibc will crash if they are
12849 presented with a 32-bit vDSO that is not mapped at the address
12850diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
12851index 6983314..54ad7e8 100644
12852--- a/arch/x86/Kconfig.cpu
12853+++ b/arch/x86/Kconfig.cpu
12854@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
12855
12856 config X86_F00F_BUG
12857 def_bool y
12858- depends on M586MMX || M586TSC || M586 || M486
12859+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
12860
12861 config X86_INVD_BUG
12862 def_bool y
12863@@ -327,7 +327,7 @@ config X86_INVD_BUG
12864
12865 config X86_ALIGNMENT_16
12866 def_bool y
12867- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12868+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12869
12870 config X86_INTEL_USERCOPY
12871 def_bool y
12872@@ -369,7 +369,7 @@ config X86_CMPXCHG64
12873 # generates cmov.
12874 config X86_CMOV
12875 def_bool y
12876- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12877+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12878
12879 config X86_MINIMUM_CPU_FAMILY
12880 int
12881diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
12882index 61bd2ad..50b625d 100644
12883--- a/arch/x86/Kconfig.debug
12884+++ b/arch/x86/Kconfig.debug
12885@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
12886 config DEBUG_RODATA
12887 bool "Write protect kernel read-only data structures"
12888 default y
12889- depends on DEBUG_KERNEL
12890+ depends on DEBUG_KERNEL && BROKEN
12891 ---help---
12892 Mark the kernel read-only data as write-protected in the pagetables,
12893 in order to catch accidental (and incorrect) writes to such const
12894@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
12895
12896 config DEBUG_SET_MODULE_RONX
12897 bool "Set loadable kernel module data as NX and text as RO"
12898- depends on MODULES
12899+ depends on MODULES && BROKEN
12900 ---help---
12901 This option helps catch unintended modifications to loadable
12902 kernel module's text and read-only data. It also prevents execution
12903diff --git a/arch/x86/Makefile b/arch/x86/Makefile
12904index 33f71b0..c2cefa2 100644
12905--- a/arch/x86/Makefile
12906+++ b/arch/x86/Makefile
12907@@ -71,9 +71,6 @@ ifeq ($(CONFIG_X86_32),y)
12908 # CPU-specific tuning. Anything which can be shared with UML should go here.
12909 include $(srctree)/arch/x86/Makefile_32.cpu
12910 KBUILD_CFLAGS += $(cflags-y)
12911-
12912- # temporary until string.h is fixed
12913- KBUILD_CFLAGS += -ffreestanding
12914 else
12915 BITS := 64
12916 UTS_MACHINE := x86_64
12917@@ -114,6 +111,9 @@ else
12918 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
12919 endif
12920
12921+# temporary until string.h is fixed
12922+KBUILD_CFLAGS += -ffreestanding
12923+
12924 # Make sure compiler does not have buggy stack-protector support.
12925 ifdef CONFIG_CC_STACKPROTECTOR
12926 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
12927@@ -271,3 +271,12 @@ define archhelp
12928 echo ' FDINITRD=file initrd for the booted kernel'
12929 echo ' kvmconfig - Enable additional options for guest kernel support'
12930 endef
12931+
12932+define OLD_LD
12933+
12934+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
12935+*** Please upgrade your binutils to 2.18 or newer
12936+endef
12937+
12938+archprepare:
12939+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
12940diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
12941index dbe8dd2..2f0a98f 100644
12942--- a/arch/x86/boot/Makefile
12943+++ b/arch/x86/boot/Makefile
12944@@ -52,6 +52,9 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
12945 # ---------------------------------------------------------------------------
12946
12947 KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
12948+ifdef CONSTIFY_PLUGIN
12949+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12950+endif
12951 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12952 GCOV_PROFILE := n
12953
12954diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
12955index 878e4b9..20537ab 100644
12956--- a/arch/x86/boot/bitops.h
12957+++ b/arch/x86/boot/bitops.h
12958@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12959 u8 v;
12960 const u32 *p = (const u32 *)addr;
12961
12962- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12963+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12964 return v;
12965 }
12966
12967@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12968
12969 static inline void set_bit(int nr, void *addr)
12970 {
12971- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12972+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12973 }
12974
12975 #endif /* BOOT_BITOPS_H */
12976diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
12977index bd49ec6..94c7f58 100644
12978--- a/arch/x86/boot/boot.h
12979+++ b/arch/x86/boot/boot.h
12980@@ -84,7 +84,7 @@ static inline void io_delay(void)
12981 static inline u16 ds(void)
12982 {
12983 u16 seg;
12984- asm("movw %%ds,%0" : "=rm" (seg));
12985+ asm volatile("movw %%ds,%0" : "=rm" (seg));
12986 return seg;
12987 }
12988
12989diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
12990index 0fcd913..3bb5c42 100644
12991--- a/arch/x86/boot/compressed/Makefile
12992+++ b/arch/x86/boot/compressed/Makefile
12993@@ -16,6 +16,9 @@ KBUILD_CFLAGS += $(cflags-y)
12994 KBUILD_CFLAGS += -mno-mmx -mno-sse
12995 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
12996 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
12997+ifdef CONSTIFY_PLUGIN
12998+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12999+endif
13000
13001 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13002 GCOV_PROFILE := n
13003diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
13004index a53440e..c3dbf1e 100644
13005--- a/arch/x86/boot/compressed/efi_stub_32.S
13006+++ b/arch/x86/boot/compressed/efi_stub_32.S
13007@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
13008 * parameter 2, ..., param n. To make things easy, we save the return
13009 * address of efi_call_phys in a global variable.
13010 */
13011- popl %ecx
13012- movl %ecx, saved_return_addr(%edx)
13013- /* get the function pointer into ECX*/
13014- popl %ecx
13015- movl %ecx, efi_rt_function_ptr(%edx)
13016+ popl saved_return_addr(%edx)
13017+ popl efi_rt_function_ptr(%edx)
13018
13019 /*
13020 * 3. Call the physical function.
13021 */
13022- call *%ecx
13023+ call *efi_rt_function_ptr(%edx)
13024
13025 /*
13026 * 4. Balance the stack. And because EAX contain the return value,
13027@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
13028 1: popl %edx
13029 subl $1b, %edx
13030
13031- movl efi_rt_function_ptr(%edx), %ecx
13032- pushl %ecx
13033+ pushl efi_rt_function_ptr(%edx)
13034
13035 /*
13036 * 10. Push the saved return address onto the stack and return.
13037 */
13038- movl saved_return_addr(%edx), %ecx
13039- pushl %ecx
13040- ret
13041+ jmpl *saved_return_addr(%edx)
13042 ENDPROC(efi_call_phys)
13043 .previous
13044
13045diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
13046index cbed140..5f2ca57 100644
13047--- a/arch/x86/boot/compressed/head_32.S
13048+++ b/arch/x86/boot/compressed/head_32.S
13049@@ -140,10 +140,10 @@ preferred_addr:
13050 addl %eax, %ebx
13051 notl %eax
13052 andl %eax, %ebx
13053- cmpl $LOAD_PHYSICAL_ADDR, %ebx
13054+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
13055 jge 1f
13056 #endif
13057- movl $LOAD_PHYSICAL_ADDR, %ebx
13058+ movl $____LOAD_PHYSICAL_ADDR, %ebx
13059 1:
13060
13061 /* Target address to relocate to for decompression */
13062diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
13063index 2884e0c..904a2f7 100644
13064--- a/arch/x86/boot/compressed/head_64.S
13065+++ b/arch/x86/boot/compressed/head_64.S
13066@@ -94,10 +94,10 @@ ENTRY(startup_32)
13067 addl %eax, %ebx
13068 notl %eax
13069 andl %eax, %ebx
13070- cmpl $LOAD_PHYSICAL_ADDR, %ebx
13071+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
13072 jge 1f
13073 #endif
13074- movl $LOAD_PHYSICAL_ADDR, %ebx
13075+ movl $____LOAD_PHYSICAL_ADDR, %ebx
13076 1:
13077
13078 /* Target address to relocate to for decompression */
13079@@ -322,10 +322,10 @@ preferred_addr:
13080 addq %rax, %rbp
13081 notq %rax
13082 andq %rax, %rbp
13083- cmpq $LOAD_PHYSICAL_ADDR, %rbp
13084+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
13085 jge 1f
13086 #endif
13087- movq $LOAD_PHYSICAL_ADDR, %rbp
13088+ movq $____LOAD_PHYSICAL_ADDR, %rbp
13089 1:
13090
13091 /* Target address to relocate to for decompression */
13092@@ -431,8 +431,8 @@ gdt:
13093 .long gdt
13094 .word 0
13095 .quad 0x0000000000000000 /* NULL descriptor */
13096- .quad 0x00af9a000000ffff /* __KERNEL_CS */
13097- .quad 0x00cf92000000ffff /* __KERNEL_DS */
13098+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
13099+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
13100 .quad 0x0080890000000000 /* TS descriptor */
13101 .quad 0x0000000000000000 /* TS continued */
13102 gdt_end:
13103diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
13104index 57ab74d..7c52182 100644
13105--- a/arch/x86/boot/compressed/misc.c
13106+++ b/arch/x86/boot/compressed/misc.c
13107@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
13108 * Calculate the delta between where vmlinux was linked to load
13109 * and where it was actually loaded.
13110 */
13111- delta = min_addr - LOAD_PHYSICAL_ADDR;
13112+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
13113 if (!delta) {
13114 debug_putstr("No relocation needed... ");
13115 return;
13116@@ -312,7 +312,7 @@ static void parse_elf(void *output)
13117 Elf32_Ehdr ehdr;
13118 Elf32_Phdr *phdrs, *phdr;
13119 #endif
13120- void *dest;
13121+ void *dest, *prev;
13122 int i;
13123
13124 memcpy(&ehdr, output, sizeof(ehdr));
13125@@ -339,13 +339,16 @@ static void parse_elf(void *output)
13126 case PT_LOAD:
13127 #ifdef CONFIG_RELOCATABLE
13128 dest = output;
13129- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
13130+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
13131 #else
13132 dest = (void *)(phdr->p_paddr);
13133 #endif
13134 memcpy(dest,
13135 output + phdr->p_offset,
13136 phdr->p_filesz);
13137+ if (i)
13138+ memset(prev, 0xff, dest - prev);
13139+ prev = dest + phdr->p_filesz;
13140 break;
13141 default: /* Ignore other PT_* */ break;
13142 }
13143@@ -395,7 +398,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
13144 error("Destination address too large");
13145 #endif
13146 #ifndef CONFIG_RELOCATABLE
13147- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
13148+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
13149 error("Wrong destination address");
13150 #endif
13151
13152diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
13153index 1fd7d57..0f7d096 100644
13154--- a/arch/x86/boot/cpucheck.c
13155+++ b/arch/x86/boot/cpucheck.c
13156@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13157 u32 ecx = MSR_K7_HWCR;
13158 u32 eax, edx;
13159
13160- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13161+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13162 eax &= ~(1 << 15);
13163- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13164+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13165
13166 get_cpuflags(); /* Make sure it really did something */
13167 err = check_cpuflags();
13168@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13169 u32 ecx = MSR_VIA_FCR;
13170 u32 eax, edx;
13171
13172- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13173+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13174 eax |= (1<<1)|(1<<7);
13175- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13176+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13177
13178 set_bit(X86_FEATURE_CX8, cpu.flags);
13179 err = check_cpuflags();
13180@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13181 u32 eax, edx;
13182 u32 level = 1;
13183
13184- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13185- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13186- asm("cpuid"
13187+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13188+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13189+ asm volatile("cpuid"
13190 : "+a" (level), "=d" (cpu.flags[0])
13191 : : "ecx", "ebx");
13192- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13193+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13194
13195 err = check_cpuflags();
13196 } else if (err == 0x01 &&
13197diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
13198index 7a6d43a..edf6e40 100644
13199--- a/arch/x86/boot/header.S
13200+++ b/arch/x86/boot/header.S
13201@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
13202 # single linked list of
13203 # struct setup_data
13204
13205-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
13206+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
13207
13208 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
13209+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13210+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
13211+#else
13212 #define VO_INIT_SIZE (VO__end - VO__text)
13213+#endif
13214 #if ZO_INIT_SIZE > VO_INIT_SIZE
13215 #define INIT_SIZE ZO_INIT_SIZE
13216 #else
13217diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
13218index db75d07..8e6d0af 100644
13219--- a/arch/x86/boot/memory.c
13220+++ b/arch/x86/boot/memory.c
13221@@ -19,7 +19,7 @@
13222
13223 static int detect_memory_e820(void)
13224 {
13225- int count = 0;
13226+ unsigned int count = 0;
13227 struct biosregs ireg, oreg;
13228 struct e820entry *desc = boot_params.e820_map;
13229 static struct e820entry buf; /* static so it is zeroed */
13230diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
13231index ba3e100..6501b8f 100644
13232--- a/arch/x86/boot/video-vesa.c
13233+++ b/arch/x86/boot/video-vesa.c
13234@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
13235
13236 boot_params.screen_info.vesapm_seg = oreg.es;
13237 boot_params.screen_info.vesapm_off = oreg.di;
13238+ boot_params.screen_info.vesapm_size = oreg.cx;
13239 }
13240
13241 /*
13242diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
13243index 43eda28..5ab5fdb 100644
13244--- a/arch/x86/boot/video.c
13245+++ b/arch/x86/boot/video.c
13246@@ -96,7 +96,7 @@ static void store_mode_params(void)
13247 static unsigned int get_entry(void)
13248 {
13249 char entry_buf[4];
13250- int i, len = 0;
13251+ unsigned int i, len = 0;
13252 int key;
13253 unsigned int v;
13254
13255diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
13256index 9105655..41779c1 100644
13257--- a/arch/x86/crypto/aes-x86_64-asm_64.S
13258+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
13259@@ -8,6 +8,8 @@
13260 * including this sentence is retained in full.
13261 */
13262
13263+#include <asm/alternative-asm.h>
13264+
13265 .extern crypto_ft_tab
13266 .extern crypto_it_tab
13267 .extern crypto_fl_tab
13268@@ -70,6 +72,8 @@
13269 je B192; \
13270 leaq 32(r9),r9;
13271
13272+#define ret pax_force_retaddr; ret
13273+
13274 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
13275 movq r1,r2; \
13276 movq r3,r4; \
13277diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
13278index 477e9d7..c92c7d8 100644
13279--- a/arch/x86/crypto/aesni-intel_asm.S
13280+++ b/arch/x86/crypto/aesni-intel_asm.S
13281@@ -31,6 +31,7 @@
13282
13283 #include <linux/linkage.h>
13284 #include <asm/inst.h>
13285+#include <asm/alternative-asm.h>
13286
13287 #ifdef __x86_64__
13288 .data
13289@@ -205,7 +206,7 @@ enc: .octa 0x2
13290 * num_initial_blocks = b mod 4
13291 * encrypt the initial num_initial_blocks blocks and apply ghash on
13292 * the ciphertext
13293-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13294+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13295 * are clobbered
13296 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13297 */
13298@@ -214,8 +215,8 @@ enc: .octa 0x2
13299 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13300 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13301 mov arg7, %r10 # %r10 = AAD
13302- mov arg8, %r12 # %r12 = aadLen
13303- mov %r12, %r11
13304+ mov arg8, %r15 # %r15 = aadLen
13305+ mov %r15, %r11
13306 pxor %xmm\i, %xmm\i
13307 _get_AAD_loop\num_initial_blocks\operation:
13308 movd (%r10), \TMP1
13309@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13310 psrldq $4, %xmm\i
13311 pxor \TMP1, %xmm\i
13312 add $4, %r10
13313- sub $4, %r12
13314+ sub $4, %r15
13315 jne _get_AAD_loop\num_initial_blocks\operation
13316 cmp $16, %r11
13317 je _get_AAD_loop2_done\num_initial_blocks\operation
13318- mov $16, %r12
13319+ mov $16, %r15
13320 _get_AAD_loop2\num_initial_blocks\operation:
13321 psrldq $4, %xmm\i
13322- sub $4, %r12
13323- cmp %r11, %r12
13324+ sub $4, %r15
13325+ cmp %r11, %r15
13326 jne _get_AAD_loop2\num_initial_blocks\operation
13327 _get_AAD_loop2_done\num_initial_blocks\operation:
13328 movdqa SHUF_MASK(%rip), %xmm14
13329@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
13330 * num_initial_blocks = b mod 4
13331 * encrypt the initial num_initial_blocks blocks and apply ghash on
13332 * the ciphertext
13333-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13334+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13335 * are clobbered
13336 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13337 */
13338@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
13339 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13340 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13341 mov arg7, %r10 # %r10 = AAD
13342- mov arg8, %r12 # %r12 = aadLen
13343- mov %r12, %r11
13344+ mov arg8, %r15 # %r15 = aadLen
13345+ mov %r15, %r11
13346 pxor %xmm\i, %xmm\i
13347 _get_AAD_loop\num_initial_blocks\operation:
13348 movd (%r10), \TMP1
13349@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13350 psrldq $4, %xmm\i
13351 pxor \TMP1, %xmm\i
13352 add $4, %r10
13353- sub $4, %r12
13354+ sub $4, %r15
13355 jne _get_AAD_loop\num_initial_blocks\operation
13356 cmp $16, %r11
13357 je _get_AAD_loop2_done\num_initial_blocks\operation
13358- mov $16, %r12
13359+ mov $16, %r15
13360 _get_AAD_loop2\num_initial_blocks\operation:
13361 psrldq $4, %xmm\i
13362- sub $4, %r12
13363- cmp %r11, %r12
13364+ sub $4, %r15
13365+ cmp %r11, %r15
13366 jne _get_AAD_loop2\num_initial_blocks\operation
13367 _get_AAD_loop2_done\num_initial_blocks\operation:
13368 movdqa SHUF_MASK(%rip), %xmm14
13369@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
13370 *
13371 *****************************************************************************/
13372 ENTRY(aesni_gcm_dec)
13373- push %r12
13374+ push %r15
13375 push %r13
13376 push %r14
13377 mov %rsp, %r14
13378@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
13379 */
13380 sub $VARIABLE_OFFSET, %rsp
13381 and $~63, %rsp # align rsp to 64 bytes
13382- mov %arg6, %r12
13383- movdqu (%r12), %xmm13 # %xmm13 = HashKey
13384+ mov %arg6, %r15
13385+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
13386 movdqa SHUF_MASK(%rip), %xmm2
13387 PSHUFB_XMM %xmm2, %xmm13
13388
13389@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
13390 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
13391 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
13392 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
13393- mov %r13, %r12
13394- and $(3<<4), %r12
13395+ mov %r13, %r15
13396+ and $(3<<4), %r15
13397 jz _initial_num_blocks_is_0_decrypt
13398- cmp $(2<<4), %r12
13399+ cmp $(2<<4), %r15
13400 jb _initial_num_blocks_is_1_decrypt
13401 je _initial_num_blocks_is_2_decrypt
13402 _initial_num_blocks_is_3_decrypt:
13403@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
13404 sub $16, %r11
13405 add %r13, %r11
13406 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
13407- lea SHIFT_MASK+16(%rip), %r12
13408- sub %r13, %r12
13409+ lea SHIFT_MASK+16(%rip), %r15
13410+ sub %r13, %r15
13411 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
13412 # (%r13 is the number of bytes in plaintext mod 16)
13413- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13414+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13415 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
13416
13417 movdqa %xmm1, %xmm2
13418 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
13419- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13420+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13421 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
13422 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
13423 pand %xmm1, %xmm2
13424@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
13425 sub $1, %r13
13426 jne _less_than_8_bytes_left_decrypt
13427 _multiple_of_16_bytes_decrypt:
13428- mov arg8, %r12 # %r13 = aadLen (number of bytes)
13429- shl $3, %r12 # convert into number of bits
13430- movd %r12d, %xmm15 # len(A) in %xmm15
13431+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
13432+ shl $3, %r15 # convert into number of bits
13433+ movd %r15d, %xmm15 # len(A) in %xmm15
13434 shl $3, %arg4 # len(C) in bits (*128)
13435 MOVQ_R64_XMM %arg4, %xmm1
13436 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13437@@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
13438 mov %r14, %rsp
13439 pop %r14
13440 pop %r13
13441- pop %r12
13442+ pop %r15
13443+ pax_force_retaddr
13444 ret
13445 ENDPROC(aesni_gcm_dec)
13446
13447@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
13448 * poly = x^128 + x^127 + x^126 + x^121 + 1
13449 ***************************************************************************/
13450 ENTRY(aesni_gcm_enc)
13451- push %r12
13452+ push %r15
13453 push %r13
13454 push %r14
13455 mov %rsp, %r14
13456@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
13457 #
13458 sub $VARIABLE_OFFSET, %rsp
13459 and $~63, %rsp
13460- mov %arg6, %r12
13461- movdqu (%r12), %xmm13
13462+ mov %arg6, %r15
13463+ movdqu (%r15), %xmm13
13464 movdqa SHUF_MASK(%rip), %xmm2
13465 PSHUFB_XMM %xmm2, %xmm13
13466
13467@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
13468 movdqa %xmm13, HashKey(%rsp)
13469 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
13470 and $-16, %r13
13471- mov %r13, %r12
13472+ mov %r13, %r15
13473
13474 # Encrypt first few blocks
13475
13476- and $(3<<4), %r12
13477+ and $(3<<4), %r15
13478 jz _initial_num_blocks_is_0_encrypt
13479- cmp $(2<<4), %r12
13480+ cmp $(2<<4), %r15
13481 jb _initial_num_blocks_is_1_encrypt
13482 je _initial_num_blocks_is_2_encrypt
13483 _initial_num_blocks_is_3_encrypt:
13484@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
13485 sub $16, %r11
13486 add %r13, %r11
13487 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
13488- lea SHIFT_MASK+16(%rip), %r12
13489- sub %r13, %r12
13490+ lea SHIFT_MASK+16(%rip), %r15
13491+ sub %r13, %r15
13492 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
13493 # (%r13 is the number of bytes in plaintext mod 16)
13494- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13495+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13496 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
13497 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
13498- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13499+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13500 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
13501 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
13502 movdqa SHUF_MASK(%rip), %xmm10
13503@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
13504 sub $1, %r13
13505 jne _less_than_8_bytes_left_encrypt
13506 _multiple_of_16_bytes_encrypt:
13507- mov arg8, %r12 # %r12 = addLen (number of bytes)
13508- shl $3, %r12
13509- movd %r12d, %xmm15 # len(A) in %xmm15
13510+ mov arg8, %r15 # %r15 = addLen (number of bytes)
13511+ shl $3, %r15
13512+ movd %r15d, %xmm15 # len(A) in %xmm15
13513 shl $3, %arg4 # len(C) in bits (*128)
13514 MOVQ_R64_XMM %arg4, %xmm1
13515 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13516@@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
13517 mov %r14, %rsp
13518 pop %r14
13519 pop %r13
13520- pop %r12
13521+ pop %r15
13522+ pax_force_retaddr
13523 ret
13524 ENDPROC(aesni_gcm_enc)
13525
13526@@ -1722,6 +1725,7 @@ _key_expansion_256a:
13527 pxor %xmm1, %xmm0
13528 movaps %xmm0, (TKEYP)
13529 add $0x10, TKEYP
13530+ pax_force_retaddr
13531 ret
13532 ENDPROC(_key_expansion_128)
13533 ENDPROC(_key_expansion_256a)
13534@@ -1748,6 +1752,7 @@ _key_expansion_192a:
13535 shufps $0b01001110, %xmm2, %xmm1
13536 movaps %xmm1, 0x10(TKEYP)
13537 add $0x20, TKEYP
13538+ pax_force_retaddr
13539 ret
13540 ENDPROC(_key_expansion_192a)
13541
13542@@ -1768,6 +1773,7 @@ _key_expansion_192b:
13543
13544 movaps %xmm0, (TKEYP)
13545 add $0x10, TKEYP
13546+ pax_force_retaddr
13547 ret
13548 ENDPROC(_key_expansion_192b)
13549
13550@@ -1781,6 +1787,7 @@ _key_expansion_256b:
13551 pxor %xmm1, %xmm2
13552 movaps %xmm2, (TKEYP)
13553 add $0x10, TKEYP
13554+ pax_force_retaddr
13555 ret
13556 ENDPROC(_key_expansion_256b)
13557
13558@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
13559 #ifndef __x86_64__
13560 popl KEYP
13561 #endif
13562+ pax_force_retaddr
13563 ret
13564 ENDPROC(aesni_set_key)
13565
13566@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
13567 popl KLEN
13568 popl KEYP
13569 #endif
13570+ pax_force_retaddr
13571 ret
13572 ENDPROC(aesni_enc)
13573
13574@@ -1974,6 +1983,7 @@ _aesni_enc1:
13575 AESENC KEY STATE
13576 movaps 0x70(TKEYP), KEY
13577 AESENCLAST KEY STATE
13578+ pax_force_retaddr
13579 ret
13580 ENDPROC(_aesni_enc1)
13581
13582@@ -2083,6 +2093,7 @@ _aesni_enc4:
13583 AESENCLAST KEY STATE2
13584 AESENCLAST KEY STATE3
13585 AESENCLAST KEY STATE4
13586+ pax_force_retaddr
13587 ret
13588 ENDPROC(_aesni_enc4)
13589
13590@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
13591 popl KLEN
13592 popl KEYP
13593 #endif
13594+ pax_force_retaddr
13595 ret
13596 ENDPROC(aesni_dec)
13597
13598@@ -2164,6 +2176,7 @@ _aesni_dec1:
13599 AESDEC KEY STATE
13600 movaps 0x70(TKEYP), KEY
13601 AESDECLAST KEY STATE
13602+ pax_force_retaddr
13603 ret
13604 ENDPROC(_aesni_dec1)
13605
13606@@ -2273,6 +2286,7 @@ _aesni_dec4:
13607 AESDECLAST KEY STATE2
13608 AESDECLAST KEY STATE3
13609 AESDECLAST KEY STATE4
13610+ pax_force_retaddr
13611 ret
13612 ENDPROC(_aesni_dec4)
13613
13614@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
13615 popl KEYP
13616 popl LEN
13617 #endif
13618+ pax_force_retaddr
13619 ret
13620 ENDPROC(aesni_ecb_enc)
13621
13622@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
13623 popl KEYP
13624 popl LEN
13625 #endif
13626+ pax_force_retaddr
13627 ret
13628 ENDPROC(aesni_ecb_dec)
13629
13630@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
13631 popl LEN
13632 popl IVP
13633 #endif
13634+ pax_force_retaddr
13635 ret
13636 ENDPROC(aesni_cbc_enc)
13637
13638@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
13639 popl LEN
13640 popl IVP
13641 #endif
13642+ pax_force_retaddr
13643 ret
13644 ENDPROC(aesni_cbc_dec)
13645
13646@@ -2550,6 +2568,7 @@ _aesni_inc_init:
13647 mov $1, TCTR_LOW
13648 MOVQ_R64_XMM TCTR_LOW INC
13649 MOVQ_R64_XMM CTR TCTR_LOW
13650+ pax_force_retaddr
13651 ret
13652 ENDPROC(_aesni_inc_init)
13653
13654@@ -2579,6 +2598,7 @@ _aesni_inc:
13655 .Linc_low:
13656 movaps CTR, IV
13657 PSHUFB_XMM BSWAP_MASK IV
13658+ pax_force_retaddr
13659 ret
13660 ENDPROC(_aesni_inc)
13661
13662@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
13663 .Lctr_enc_ret:
13664 movups IV, (IVP)
13665 .Lctr_enc_just_ret:
13666+ pax_force_retaddr
13667 ret
13668 ENDPROC(aesni_ctr_enc)
13669
13670@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
13671 pxor INC, STATE4
13672 movdqu STATE4, 0x70(OUTP)
13673
13674+ pax_force_retaddr
13675 ret
13676 ENDPROC(aesni_xts_crypt8)
13677
13678diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13679index 246c670..466e2d6 100644
13680--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
13681+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13682@@ -21,6 +21,7 @@
13683 */
13684
13685 #include <linux/linkage.h>
13686+#include <asm/alternative-asm.h>
13687
13688 .file "blowfish-x86_64-asm.S"
13689 .text
13690@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
13691 jnz .L__enc_xor;
13692
13693 write_block();
13694+ pax_force_retaddr
13695 ret;
13696 .L__enc_xor:
13697 xor_block();
13698+ pax_force_retaddr
13699 ret;
13700 ENDPROC(__blowfish_enc_blk)
13701
13702@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
13703
13704 movq %r11, %rbp;
13705
13706+ pax_force_retaddr
13707 ret;
13708 ENDPROC(blowfish_dec_blk)
13709
13710@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
13711
13712 popq %rbx;
13713 popq %rbp;
13714+ pax_force_retaddr
13715 ret;
13716
13717 .L__enc_xor4:
13718@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
13719
13720 popq %rbx;
13721 popq %rbp;
13722+ pax_force_retaddr
13723 ret;
13724 ENDPROC(__blowfish_enc_blk_4way)
13725
13726@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
13727 popq %rbx;
13728 popq %rbp;
13729
13730+ pax_force_retaddr
13731 ret;
13732 ENDPROC(blowfish_dec_blk_4way)
13733diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13734index ce71f92..1dce7ec 100644
13735--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13736+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13737@@ -16,6 +16,7 @@
13738 */
13739
13740 #include <linux/linkage.h>
13741+#include <asm/alternative-asm.h>
13742
13743 #define CAMELLIA_TABLE_BYTE_LEN 272
13744
13745@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13746 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
13747 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
13748 %rcx, (%r9));
13749+ pax_force_retaddr
13750 ret;
13751 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13752
13753@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13754 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
13755 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
13756 %rax, (%r9));
13757+ pax_force_retaddr
13758 ret;
13759 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13760
13761@@ -780,6 +783,7 @@ __camellia_enc_blk16:
13762 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13763 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
13764
13765+ pax_force_retaddr
13766 ret;
13767
13768 .align 8
13769@@ -865,6 +869,7 @@ __camellia_dec_blk16:
13770 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13771 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
13772
13773+ pax_force_retaddr
13774 ret;
13775
13776 .align 8
13777@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
13778 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13779 %xmm8, %rsi);
13780
13781+ pax_force_retaddr
13782 ret;
13783 ENDPROC(camellia_ecb_enc_16way)
13784
13785@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
13786 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13787 %xmm8, %rsi);
13788
13789+ pax_force_retaddr
13790 ret;
13791 ENDPROC(camellia_ecb_dec_16way)
13792
13793@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
13794 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13795 %xmm8, %rsi);
13796
13797+ pax_force_retaddr
13798 ret;
13799 ENDPROC(camellia_cbc_dec_16way)
13800
13801@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
13802 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13803 %xmm8, %rsi);
13804
13805+ pax_force_retaddr
13806 ret;
13807 ENDPROC(camellia_ctr_16way)
13808
13809@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
13810 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13811 %xmm8, %rsi);
13812
13813+ pax_force_retaddr
13814 ret;
13815 ENDPROC(camellia_xts_crypt_16way)
13816
13817diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13818index 0e0b886..5a3123c 100644
13819--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13820+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13821@@ -11,6 +11,7 @@
13822 */
13823
13824 #include <linux/linkage.h>
13825+#include <asm/alternative-asm.h>
13826
13827 #define CAMELLIA_TABLE_BYTE_LEN 272
13828
13829@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13830 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
13831 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
13832 %rcx, (%r9));
13833+ pax_force_retaddr
13834 ret;
13835 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13836
13837@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13838 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
13839 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
13840 %rax, (%r9));
13841+ pax_force_retaddr
13842 ret;
13843 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13844
13845@@ -820,6 +823,7 @@ __camellia_enc_blk32:
13846 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13847 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
13848
13849+ pax_force_retaddr
13850 ret;
13851
13852 .align 8
13853@@ -905,6 +909,7 @@ __camellia_dec_blk32:
13854 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13855 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
13856
13857+ pax_force_retaddr
13858 ret;
13859
13860 .align 8
13861@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
13862
13863 vzeroupper;
13864
13865+ pax_force_retaddr
13866 ret;
13867 ENDPROC(camellia_ecb_enc_32way)
13868
13869@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
13870
13871 vzeroupper;
13872
13873+ pax_force_retaddr
13874 ret;
13875 ENDPROC(camellia_ecb_dec_32way)
13876
13877@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
13878
13879 vzeroupper;
13880
13881+ pax_force_retaddr
13882 ret;
13883 ENDPROC(camellia_cbc_dec_32way)
13884
13885@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
13886
13887 vzeroupper;
13888
13889+ pax_force_retaddr
13890 ret;
13891 ENDPROC(camellia_ctr_32way)
13892
13893@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
13894
13895 vzeroupper;
13896
13897+ pax_force_retaddr
13898 ret;
13899 ENDPROC(camellia_xts_crypt_32way)
13900
13901diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
13902index 310319c..db3d7b5 100644
13903--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
13904+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
13905@@ -21,6 +21,7 @@
13906 */
13907
13908 #include <linux/linkage.h>
13909+#include <asm/alternative-asm.h>
13910
13911 .file "camellia-x86_64-asm_64.S"
13912 .text
13913@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
13914 enc_outunpack(mov, RT1);
13915
13916 movq RRBP, %rbp;
13917+ pax_force_retaddr
13918 ret;
13919
13920 .L__enc_xor:
13921 enc_outunpack(xor, RT1);
13922
13923 movq RRBP, %rbp;
13924+ pax_force_retaddr
13925 ret;
13926 ENDPROC(__camellia_enc_blk)
13927
13928@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
13929 dec_outunpack();
13930
13931 movq RRBP, %rbp;
13932+ pax_force_retaddr
13933 ret;
13934 ENDPROC(camellia_dec_blk)
13935
13936@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
13937
13938 movq RRBP, %rbp;
13939 popq %rbx;
13940+ pax_force_retaddr
13941 ret;
13942
13943 .L__enc2_xor:
13944@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
13945
13946 movq RRBP, %rbp;
13947 popq %rbx;
13948+ pax_force_retaddr
13949 ret;
13950 ENDPROC(__camellia_enc_blk_2way)
13951
13952@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
13953
13954 movq RRBP, %rbp;
13955 movq RXOR, %rbx;
13956+ pax_force_retaddr
13957 ret;
13958 ENDPROC(camellia_dec_blk_2way)
13959diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13960index c35fd5d..2d8c7db 100644
13961--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13962+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13963@@ -24,6 +24,7 @@
13964 */
13965
13966 #include <linux/linkage.h>
13967+#include <asm/alternative-asm.h>
13968
13969 .file "cast5-avx-x86_64-asm_64.S"
13970
13971@@ -281,6 +282,7 @@ __cast5_enc_blk16:
13972 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13973 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13974
13975+ pax_force_retaddr
13976 ret;
13977 ENDPROC(__cast5_enc_blk16)
13978
13979@@ -352,6 +354,7 @@ __cast5_dec_blk16:
13980 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13981 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13982
13983+ pax_force_retaddr
13984 ret;
13985
13986 .L__skip_dec:
13987@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
13988 vmovdqu RR4, (6*4*4)(%r11);
13989 vmovdqu RL4, (7*4*4)(%r11);
13990
13991+ pax_force_retaddr
13992 ret;
13993 ENDPROC(cast5_ecb_enc_16way)
13994
13995@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
13996 vmovdqu RR4, (6*4*4)(%r11);
13997 vmovdqu RL4, (7*4*4)(%r11);
13998
13999+ pax_force_retaddr
14000 ret;
14001 ENDPROC(cast5_ecb_dec_16way)
14002
14003@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
14004 * %rdx: src
14005 */
14006
14007- pushq %r12;
14008+ pushq %r14;
14009
14010 movq %rsi, %r11;
14011- movq %rdx, %r12;
14012+ movq %rdx, %r14;
14013
14014 vmovdqu (0*16)(%rdx), RL1;
14015 vmovdqu (1*16)(%rdx), RR1;
14016@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
14017 call __cast5_dec_blk16;
14018
14019 /* xor with src */
14020- vmovq (%r12), RX;
14021+ vmovq (%r14), RX;
14022 vpshufd $0x4f, RX, RX;
14023 vpxor RX, RR1, RR1;
14024- vpxor 0*16+8(%r12), RL1, RL1;
14025- vpxor 1*16+8(%r12), RR2, RR2;
14026- vpxor 2*16+8(%r12), RL2, RL2;
14027- vpxor 3*16+8(%r12), RR3, RR3;
14028- vpxor 4*16+8(%r12), RL3, RL3;
14029- vpxor 5*16+8(%r12), RR4, RR4;
14030- vpxor 6*16+8(%r12), RL4, RL4;
14031+ vpxor 0*16+8(%r14), RL1, RL1;
14032+ vpxor 1*16+8(%r14), RR2, RR2;
14033+ vpxor 2*16+8(%r14), RL2, RL2;
14034+ vpxor 3*16+8(%r14), RR3, RR3;
14035+ vpxor 4*16+8(%r14), RL3, RL3;
14036+ vpxor 5*16+8(%r14), RR4, RR4;
14037+ vpxor 6*16+8(%r14), RL4, RL4;
14038
14039 vmovdqu RR1, (0*16)(%r11);
14040 vmovdqu RL1, (1*16)(%r11);
14041@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
14042 vmovdqu RR4, (6*16)(%r11);
14043 vmovdqu RL4, (7*16)(%r11);
14044
14045- popq %r12;
14046+ popq %r14;
14047
14048+ pax_force_retaddr
14049 ret;
14050 ENDPROC(cast5_cbc_dec_16way)
14051
14052@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
14053 * %rcx: iv (big endian, 64bit)
14054 */
14055
14056- pushq %r12;
14057+ pushq %r14;
14058
14059 movq %rsi, %r11;
14060- movq %rdx, %r12;
14061+ movq %rdx, %r14;
14062
14063 vpcmpeqd RTMP, RTMP, RTMP;
14064 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
14065@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
14066 call __cast5_enc_blk16;
14067
14068 /* dst = src ^ iv */
14069- vpxor (0*16)(%r12), RR1, RR1;
14070- vpxor (1*16)(%r12), RL1, RL1;
14071- vpxor (2*16)(%r12), RR2, RR2;
14072- vpxor (3*16)(%r12), RL2, RL2;
14073- vpxor (4*16)(%r12), RR3, RR3;
14074- vpxor (5*16)(%r12), RL3, RL3;
14075- vpxor (6*16)(%r12), RR4, RR4;
14076- vpxor (7*16)(%r12), RL4, RL4;
14077+ vpxor (0*16)(%r14), RR1, RR1;
14078+ vpxor (1*16)(%r14), RL1, RL1;
14079+ vpxor (2*16)(%r14), RR2, RR2;
14080+ vpxor (3*16)(%r14), RL2, RL2;
14081+ vpxor (4*16)(%r14), RR3, RR3;
14082+ vpxor (5*16)(%r14), RL3, RL3;
14083+ vpxor (6*16)(%r14), RR4, RR4;
14084+ vpxor (7*16)(%r14), RL4, RL4;
14085 vmovdqu RR1, (0*16)(%r11);
14086 vmovdqu RL1, (1*16)(%r11);
14087 vmovdqu RR2, (2*16)(%r11);
14088@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
14089 vmovdqu RR4, (6*16)(%r11);
14090 vmovdqu RL4, (7*16)(%r11);
14091
14092- popq %r12;
14093+ popq %r14;
14094
14095+ pax_force_retaddr
14096 ret;
14097 ENDPROC(cast5_ctr_16way)
14098diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
14099index e3531f8..e123f35 100644
14100--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
14101+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
14102@@ -24,6 +24,7 @@
14103 */
14104
14105 #include <linux/linkage.h>
14106+#include <asm/alternative-asm.h>
14107 #include "glue_helper-asm-avx.S"
14108
14109 .file "cast6-avx-x86_64-asm_64.S"
14110@@ -295,6 +296,7 @@ __cast6_enc_blk8:
14111 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
14112 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
14113
14114+ pax_force_retaddr
14115 ret;
14116 ENDPROC(__cast6_enc_blk8)
14117
14118@@ -340,6 +342,7 @@ __cast6_dec_blk8:
14119 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
14120 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
14121
14122+ pax_force_retaddr
14123 ret;
14124 ENDPROC(__cast6_dec_blk8)
14125
14126@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
14127
14128 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14129
14130+ pax_force_retaddr
14131 ret;
14132 ENDPROC(cast6_ecb_enc_8way)
14133
14134@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
14135
14136 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14137
14138+ pax_force_retaddr
14139 ret;
14140 ENDPROC(cast6_ecb_dec_8way)
14141
14142@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
14143 * %rdx: src
14144 */
14145
14146- pushq %r12;
14147+ pushq %r14;
14148
14149 movq %rsi, %r11;
14150- movq %rdx, %r12;
14151+ movq %rdx, %r14;
14152
14153 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14154
14155 call __cast6_dec_blk8;
14156
14157- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14158+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14159
14160- popq %r12;
14161+ popq %r14;
14162
14163+ pax_force_retaddr
14164 ret;
14165 ENDPROC(cast6_cbc_dec_8way)
14166
14167@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
14168 * %rcx: iv (little endian, 128bit)
14169 */
14170
14171- pushq %r12;
14172+ pushq %r14;
14173
14174 movq %rsi, %r11;
14175- movq %rdx, %r12;
14176+ movq %rdx, %r14;
14177
14178 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14179 RD2, RX, RKR, RKM);
14180
14181 call __cast6_enc_blk8;
14182
14183- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14184+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14185
14186- popq %r12;
14187+ popq %r14;
14188
14189+ pax_force_retaddr
14190 ret;
14191 ENDPROC(cast6_ctr_8way)
14192
14193@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
14194 /* dst <= regs xor IVs(in dst) */
14195 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14196
14197+ pax_force_retaddr
14198 ret;
14199 ENDPROC(cast6_xts_enc_8way)
14200
14201@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
14202 /* dst <= regs xor IVs(in dst) */
14203 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14204
14205+ pax_force_retaddr
14206 ret;
14207 ENDPROC(cast6_xts_dec_8way)
14208diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14209index dbc4339..de6e120 100644
14210--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14211+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14212@@ -45,6 +45,7 @@
14213
14214 #include <asm/inst.h>
14215 #include <linux/linkage.h>
14216+#include <asm/alternative-asm.h>
14217
14218 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
14219
14220@@ -312,6 +313,7 @@ do_return:
14221 popq %rsi
14222 popq %rdi
14223 popq %rbx
14224+ pax_force_retaddr
14225 ret
14226
14227 ################################################################
14228diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14229index 5d1e007..098cb4f 100644
14230--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
14231+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14232@@ -18,6 +18,7 @@
14233
14234 #include <linux/linkage.h>
14235 #include <asm/inst.h>
14236+#include <asm/alternative-asm.h>
14237
14238 .data
14239
14240@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
14241 psrlq $1, T2
14242 pxor T2, T1
14243 pxor T1, DATA
14244+ pax_force_retaddr
14245 ret
14246 ENDPROC(__clmul_gf128mul_ble)
14247
14248@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
14249 call __clmul_gf128mul_ble
14250 PSHUFB_XMM BSWAP DATA
14251 movups DATA, (%rdi)
14252+ pax_force_retaddr
14253 ret
14254 ENDPROC(clmul_ghash_mul)
14255
14256@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
14257 PSHUFB_XMM BSWAP DATA
14258 movups DATA, (%rdi)
14259 .Lupdate_just_ret:
14260+ pax_force_retaddr
14261 ret
14262 ENDPROC(clmul_ghash_update)
14263diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14264index 9279e0b..c4b3d2c 100644
14265--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
14266+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14267@@ -1,4 +1,5 @@
14268 #include <linux/linkage.h>
14269+#include <asm/alternative-asm.h>
14270
14271 # enter salsa20_encrypt_bytes
14272 ENTRY(salsa20_encrypt_bytes)
14273@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
14274 add %r11,%rsp
14275 mov %rdi,%rax
14276 mov %rsi,%rdx
14277+ pax_force_retaddr
14278 ret
14279 # bytesatleast65:
14280 ._bytesatleast65:
14281@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
14282 add %r11,%rsp
14283 mov %rdi,%rax
14284 mov %rsi,%rdx
14285+ pax_force_retaddr
14286 ret
14287 ENDPROC(salsa20_keysetup)
14288
14289@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
14290 add %r11,%rsp
14291 mov %rdi,%rax
14292 mov %rsi,%rdx
14293+ pax_force_retaddr
14294 ret
14295 ENDPROC(salsa20_ivsetup)
14296diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14297index 2f202f4..d9164d6 100644
14298--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14299+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14300@@ -24,6 +24,7 @@
14301 */
14302
14303 #include <linux/linkage.h>
14304+#include <asm/alternative-asm.h>
14305 #include "glue_helper-asm-avx.S"
14306
14307 .file "serpent-avx-x86_64-asm_64.S"
14308@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
14309 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14310 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14311
14312+ pax_force_retaddr
14313 ret;
14314 ENDPROC(__serpent_enc_blk8_avx)
14315
14316@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
14317 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14318 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14319
14320+ pax_force_retaddr
14321 ret;
14322 ENDPROC(__serpent_dec_blk8_avx)
14323
14324@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
14325
14326 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14327
14328+ pax_force_retaddr
14329 ret;
14330 ENDPROC(serpent_ecb_enc_8way_avx)
14331
14332@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
14333
14334 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14335
14336+ pax_force_retaddr
14337 ret;
14338 ENDPROC(serpent_ecb_dec_8way_avx)
14339
14340@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
14341
14342 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14343
14344+ pax_force_retaddr
14345 ret;
14346 ENDPROC(serpent_cbc_dec_8way_avx)
14347
14348@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
14349
14350 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14351
14352+ pax_force_retaddr
14353 ret;
14354 ENDPROC(serpent_ctr_8way_avx)
14355
14356@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
14357 /* dst <= regs xor IVs(in dst) */
14358 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14359
14360+ pax_force_retaddr
14361 ret;
14362 ENDPROC(serpent_xts_enc_8way_avx)
14363
14364@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
14365 /* dst <= regs xor IVs(in dst) */
14366 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14367
14368+ pax_force_retaddr
14369 ret;
14370 ENDPROC(serpent_xts_dec_8way_avx)
14371diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
14372index b222085..abd483c 100644
14373--- a/arch/x86/crypto/serpent-avx2-asm_64.S
14374+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
14375@@ -15,6 +15,7 @@
14376 */
14377
14378 #include <linux/linkage.h>
14379+#include <asm/alternative-asm.h>
14380 #include "glue_helper-asm-avx2.S"
14381
14382 .file "serpent-avx2-asm_64.S"
14383@@ -610,6 +611,7 @@ __serpent_enc_blk16:
14384 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14385 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14386
14387+ pax_force_retaddr
14388 ret;
14389 ENDPROC(__serpent_enc_blk16)
14390
14391@@ -664,6 +666,7 @@ __serpent_dec_blk16:
14392 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14393 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14394
14395+ pax_force_retaddr
14396 ret;
14397 ENDPROC(__serpent_dec_blk16)
14398
14399@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
14400
14401 vzeroupper;
14402
14403+ pax_force_retaddr
14404 ret;
14405 ENDPROC(serpent_ecb_enc_16way)
14406
14407@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
14408
14409 vzeroupper;
14410
14411+ pax_force_retaddr
14412 ret;
14413 ENDPROC(serpent_ecb_dec_16way)
14414
14415@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
14416
14417 vzeroupper;
14418
14419+ pax_force_retaddr
14420 ret;
14421 ENDPROC(serpent_cbc_dec_16way)
14422
14423@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
14424
14425 vzeroupper;
14426
14427+ pax_force_retaddr
14428 ret;
14429 ENDPROC(serpent_ctr_16way)
14430
14431@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
14432
14433 vzeroupper;
14434
14435+ pax_force_retaddr
14436 ret;
14437 ENDPROC(serpent_xts_enc_16way)
14438
14439@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
14440
14441 vzeroupper;
14442
14443+ pax_force_retaddr
14444 ret;
14445 ENDPROC(serpent_xts_dec_16way)
14446diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14447index acc066c..1559cc4 100644
14448--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14449+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14450@@ -25,6 +25,7 @@
14451 */
14452
14453 #include <linux/linkage.h>
14454+#include <asm/alternative-asm.h>
14455
14456 .file "serpent-sse2-x86_64-asm_64.S"
14457 .text
14458@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
14459 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14460 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14461
14462+ pax_force_retaddr
14463 ret;
14464
14465 .L__enc_xor8:
14466 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14467 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14468
14469+ pax_force_retaddr
14470 ret;
14471 ENDPROC(__serpent_enc_blk_8way)
14472
14473@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
14474 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14475 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14476
14477+ pax_force_retaddr
14478 ret;
14479 ENDPROC(serpent_dec_blk_8way)
14480diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
14481index a410950..9dfe7ad 100644
14482--- a/arch/x86/crypto/sha1_ssse3_asm.S
14483+++ b/arch/x86/crypto/sha1_ssse3_asm.S
14484@@ -29,6 +29,7 @@
14485 */
14486
14487 #include <linux/linkage.h>
14488+#include <asm/alternative-asm.h>
14489
14490 #define CTX %rdi // arg1
14491 #define BUF %rsi // arg2
14492@@ -75,9 +76,9 @@
14493
14494 push %rbx
14495 push %rbp
14496- push %r12
14497+ push %r14
14498
14499- mov %rsp, %r12
14500+ mov %rsp, %r14
14501 sub $64, %rsp # allocate workspace
14502 and $~15, %rsp # align stack
14503
14504@@ -99,11 +100,12 @@
14505 xor %rax, %rax
14506 rep stosq
14507
14508- mov %r12, %rsp # deallocate workspace
14509+ mov %r14, %rsp # deallocate workspace
14510
14511- pop %r12
14512+ pop %r14
14513 pop %rbp
14514 pop %rbx
14515+ pax_force_retaddr
14516 ret
14517
14518 ENDPROC(\name)
14519diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
14520index 642f156..51a513c 100644
14521--- a/arch/x86/crypto/sha256-avx-asm.S
14522+++ b/arch/x86/crypto/sha256-avx-asm.S
14523@@ -49,6 +49,7 @@
14524
14525 #ifdef CONFIG_AS_AVX
14526 #include <linux/linkage.h>
14527+#include <asm/alternative-asm.h>
14528
14529 ## assume buffers not aligned
14530 #define VMOVDQ vmovdqu
14531@@ -460,6 +461,7 @@ done_hash:
14532 popq %r13
14533 popq %rbp
14534 popq %rbx
14535+ pax_force_retaddr
14536 ret
14537 ENDPROC(sha256_transform_avx)
14538
14539diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
14540index 9e86944..3795e6a 100644
14541--- a/arch/x86/crypto/sha256-avx2-asm.S
14542+++ b/arch/x86/crypto/sha256-avx2-asm.S
14543@@ -50,6 +50,7 @@
14544
14545 #ifdef CONFIG_AS_AVX2
14546 #include <linux/linkage.h>
14547+#include <asm/alternative-asm.h>
14548
14549 ## assume buffers not aligned
14550 #define VMOVDQ vmovdqu
14551@@ -720,6 +721,7 @@ done_hash:
14552 popq %r12
14553 popq %rbp
14554 popq %rbx
14555+ pax_force_retaddr
14556 ret
14557 ENDPROC(sha256_transform_rorx)
14558
14559diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
14560index f833b74..8c62a9e 100644
14561--- a/arch/x86/crypto/sha256-ssse3-asm.S
14562+++ b/arch/x86/crypto/sha256-ssse3-asm.S
14563@@ -47,6 +47,7 @@
14564 ########################################################################
14565
14566 #include <linux/linkage.h>
14567+#include <asm/alternative-asm.h>
14568
14569 ## assume buffers not aligned
14570 #define MOVDQ movdqu
14571@@ -471,6 +472,7 @@ done_hash:
14572 popq %rbp
14573 popq %rbx
14574
14575+ pax_force_retaddr
14576 ret
14577 ENDPROC(sha256_transform_ssse3)
14578
14579diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
14580index 974dde9..a823ff9 100644
14581--- a/arch/x86/crypto/sha512-avx-asm.S
14582+++ b/arch/x86/crypto/sha512-avx-asm.S
14583@@ -49,6 +49,7 @@
14584
14585 #ifdef CONFIG_AS_AVX
14586 #include <linux/linkage.h>
14587+#include <asm/alternative-asm.h>
14588
14589 .text
14590
14591@@ -364,6 +365,7 @@ updateblock:
14592 mov frame_RSPSAVE(%rsp), %rsp
14593
14594 nowork:
14595+ pax_force_retaddr
14596 ret
14597 ENDPROC(sha512_transform_avx)
14598
14599diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
14600index 568b961..ed20c37 100644
14601--- a/arch/x86/crypto/sha512-avx2-asm.S
14602+++ b/arch/x86/crypto/sha512-avx2-asm.S
14603@@ -51,6 +51,7 @@
14604
14605 #ifdef CONFIG_AS_AVX2
14606 #include <linux/linkage.h>
14607+#include <asm/alternative-asm.h>
14608
14609 .text
14610
14611@@ -678,6 +679,7 @@ done_hash:
14612
14613 # Restore Stack Pointer
14614 mov frame_RSPSAVE(%rsp), %rsp
14615+ pax_force_retaddr
14616 ret
14617 ENDPROC(sha512_transform_rorx)
14618
14619diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
14620index fb56855..6edd768 100644
14621--- a/arch/x86/crypto/sha512-ssse3-asm.S
14622+++ b/arch/x86/crypto/sha512-ssse3-asm.S
14623@@ -48,6 +48,7 @@
14624 ########################################################################
14625
14626 #include <linux/linkage.h>
14627+#include <asm/alternative-asm.h>
14628
14629 .text
14630
14631@@ -363,6 +364,7 @@ updateblock:
14632 mov frame_RSPSAVE(%rsp), %rsp
14633
14634 nowork:
14635+ pax_force_retaddr
14636 ret
14637 ENDPROC(sha512_transform_ssse3)
14638
14639diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14640index 0505813..b067311 100644
14641--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14642+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14643@@ -24,6 +24,7 @@
14644 */
14645
14646 #include <linux/linkage.h>
14647+#include <asm/alternative-asm.h>
14648 #include "glue_helper-asm-avx.S"
14649
14650 .file "twofish-avx-x86_64-asm_64.S"
14651@@ -284,6 +285,7 @@ __twofish_enc_blk8:
14652 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
14653 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
14654
14655+ pax_force_retaddr
14656 ret;
14657 ENDPROC(__twofish_enc_blk8)
14658
14659@@ -324,6 +326,7 @@ __twofish_dec_blk8:
14660 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
14661 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
14662
14663+ pax_force_retaddr
14664 ret;
14665 ENDPROC(__twofish_dec_blk8)
14666
14667@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
14668
14669 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14670
14671+ pax_force_retaddr
14672 ret;
14673 ENDPROC(twofish_ecb_enc_8way)
14674
14675@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
14676
14677 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14678
14679+ pax_force_retaddr
14680 ret;
14681 ENDPROC(twofish_ecb_dec_8way)
14682
14683@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
14684 * %rdx: src
14685 */
14686
14687- pushq %r12;
14688+ pushq %r14;
14689
14690 movq %rsi, %r11;
14691- movq %rdx, %r12;
14692+ movq %rdx, %r14;
14693
14694 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14695
14696 call __twofish_dec_blk8;
14697
14698- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14699+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14700
14701- popq %r12;
14702+ popq %r14;
14703
14704+ pax_force_retaddr
14705 ret;
14706 ENDPROC(twofish_cbc_dec_8way)
14707
14708@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
14709 * %rcx: iv (little endian, 128bit)
14710 */
14711
14712- pushq %r12;
14713+ pushq %r14;
14714
14715 movq %rsi, %r11;
14716- movq %rdx, %r12;
14717+ movq %rdx, %r14;
14718
14719 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14720 RD2, RX0, RX1, RY0);
14721
14722 call __twofish_enc_blk8;
14723
14724- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14725+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14726
14727- popq %r12;
14728+ popq %r14;
14729
14730+ pax_force_retaddr
14731 ret;
14732 ENDPROC(twofish_ctr_8way)
14733
14734@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
14735 /* dst <= regs xor IVs(in dst) */
14736 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14737
14738+ pax_force_retaddr
14739 ret;
14740 ENDPROC(twofish_xts_enc_8way)
14741
14742@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
14743 /* dst <= regs xor IVs(in dst) */
14744 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14745
14746+ pax_force_retaddr
14747 ret;
14748 ENDPROC(twofish_xts_dec_8way)
14749diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14750index 1c3b7ce..02f578d 100644
14751--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14752+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14753@@ -21,6 +21,7 @@
14754 */
14755
14756 #include <linux/linkage.h>
14757+#include <asm/alternative-asm.h>
14758
14759 .file "twofish-x86_64-asm-3way.S"
14760 .text
14761@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
14762 popq %r13;
14763 popq %r14;
14764 popq %r15;
14765+ pax_force_retaddr
14766 ret;
14767
14768 .L__enc_xor3:
14769@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
14770 popq %r13;
14771 popq %r14;
14772 popq %r15;
14773+ pax_force_retaddr
14774 ret;
14775 ENDPROC(__twofish_enc_blk_3way)
14776
14777@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
14778 popq %r13;
14779 popq %r14;
14780 popq %r15;
14781+ pax_force_retaddr
14782 ret;
14783 ENDPROC(twofish_dec_blk_3way)
14784diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
14785index a039d21..524b8b2 100644
14786--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
14787+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
14788@@ -22,6 +22,7 @@
14789
14790 #include <linux/linkage.h>
14791 #include <asm/asm-offsets.h>
14792+#include <asm/alternative-asm.h>
14793
14794 #define a_offset 0
14795 #define b_offset 4
14796@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
14797
14798 popq R1
14799 movq $1,%rax
14800+ pax_force_retaddr
14801 ret
14802 ENDPROC(twofish_enc_blk)
14803
14804@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
14805
14806 popq R1
14807 movq $1,%rax
14808+ pax_force_retaddr
14809 ret
14810 ENDPROC(twofish_dec_blk)
14811diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
14812index d21ff89..6da8e6e 100644
14813--- a/arch/x86/ia32/ia32_aout.c
14814+++ b/arch/x86/ia32/ia32_aout.c
14815@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
14816 unsigned long dump_start, dump_size;
14817 struct user32 dump;
14818
14819+ memset(&dump, 0, sizeof(dump));
14820+
14821 fs = get_fs();
14822 set_fs(KERNEL_DS);
14823 has_dumped = 1;
14824diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
14825index f9e181a..b0df8b3 100644
14826--- a/arch/x86/ia32/ia32_signal.c
14827+++ b/arch/x86/ia32/ia32_signal.c
14828@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
14829 if (__get_user(set.sig[0], &frame->sc.oldmask)
14830 || (_COMPAT_NSIG_WORDS > 1
14831 && __copy_from_user((((char *) &set.sig) + 4),
14832- &frame->extramask,
14833+ frame->extramask,
14834 sizeof(frame->extramask))))
14835 goto badframe;
14836
14837@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
14838 sp -= frame_size;
14839 /* Align the stack pointer according to the i386 ABI,
14840 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
14841- sp = ((sp + 4) & -16ul) - 4;
14842+ sp = ((sp - 12) & -16ul) - 4;
14843 return (void __user *) sp;
14844 }
14845
14846@@ -386,7 +386,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14847 restorer = current->mm->context.vdso +
14848 selected_vdso32->sym___kernel_sigreturn;
14849 else
14850- restorer = &frame->retcode;
14851+ restorer = frame->retcode;
14852 }
14853
14854 put_user_try {
14855@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14856 * These are actually not used anymore, but left because some
14857 * gdb versions depend on them as a marker.
14858 */
14859- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14860+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14861 } put_user_catch(err);
14862
14863 if (err)
14864@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14865 0xb8,
14866 __NR_ia32_rt_sigreturn,
14867 0x80cd,
14868- 0,
14869+ 0
14870 };
14871
14872 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
14873@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14874
14875 if (ksig->ka.sa.sa_flags & SA_RESTORER)
14876 restorer = ksig->ka.sa.sa_restorer;
14877- else
14878+ else if (current->mm->context.vdso)
14879+ /* Return stub is in 32bit vsyscall page */
14880 restorer = current->mm->context.vdso +
14881 selected_vdso32->sym___kernel_rt_sigreturn;
14882+ else
14883+ restorer = frame->retcode;
14884 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
14885
14886 /*
14887 * Not actually used anymore, but left because some gdb
14888 * versions need it.
14889 */
14890- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14891+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14892 } put_user_catch(err);
14893
14894 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
14895diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
14896index 4299eb0..fefe70e 100644
14897--- a/arch/x86/ia32/ia32entry.S
14898+++ b/arch/x86/ia32/ia32entry.S
14899@@ -15,8 +15,10 @@
14900 #include <asm/irqflags.h>
14901 #include <asm/asm.h>
14902 #include <asm/smap.h>
14903+#include <asm/pgtable.h>
14904 #include <linux/linkage.h>
14905 #include <linux/err.h>
14906+#include <asm/alternative-asm.h>
14907
14908 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14909 #include <linux/elf-em.h>
14910@@ -62,12 +64,12 @@
14911 */
14912 .macro LOAD_ARGS32 offset, _r9=0
14913 .if \_r9
14914- movl \offset+16(%rsp),%r9d
14915+ movl \offset+R9(%rsp),%r9d
14916 .endif
14917- movl \offset+40(%rsp),%ecx
14918- movl \offset+48(%rsp),%edx
14919- movl \offset+56(%rsp),%esi
14920- movl \offset+64(%rsp),%edi
14921+ movl \offset+RCX(%rsp),%ecx
14922+ movl \offset+RDX(%rsp),%edx
14923+ movl \offset+RSI(%rsp),%esi
14924+ movl \offset+RDI(%rsp),%edi
14925 movl %eax,%eax /* zero extension */
14926 .endm
14927
14928@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
14929 ENDPROC(native_irq_enable_sysexit)
14930 #endif
14931
14932+ .macro pax_enter_kernel_user
14933+ pax_set_fptr_mask
14934+#ifdef CONFIG_PAX_MEMORY_UDEREF
14935+ call pax_enter_kernel_user
14936+#endif
14937+ .endm
14938+
14939+ .macro pax_exit_kernel_user
14940+#ifdef CONFIG_PAX_MEMORY_UDEREF
14941+ call pax_exit_kernel_user
14942+#endif
14943+#ifdef CONFIG_PAX_RANDKSTACK
14944+ pushq %rax
14945+ pushq %r11
14946+ call pax_randomize_kstack
14947+ popq %r11
14948+ popq %rax
14949+#endif
14950+ .endm
14951+
14952+ .macro pax_erase_kstack
14953+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14954+ call pax_erase_kstack
14955+#endif
14956+ .endm
14957+
14958 /*
14959 * 32bit SYSENTER instruction entry.
14960 *
14961@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
14962 CFI_REGISTER rsp,rbp
14963 SWAPGS_UNSAFE_STACK
14964 movq PER_CPU_VAR(kernel_stack), %rsp
14965- addq $(KERNEL_STACK_OFFSET),%rsp
14966- /*
14967- * No need to follow this irqs on/off section: the syscall
14968- * disabled irqs, here we enable it straight after entry:
14969- */
14970- ENABLE_INTERRUPTS(CLBR_NONE)
14971 movl %ebp,%ebp /* zero extension */
14972 pushq_cfi $__USER32_DS
14973 /*CFI_REL_OFFSET ss,0*/
14974@@ -135,24 +157,49 @@ ENTRY(ia32_sysenter_target)
14975 CFI_REL_OFFSET rsp,0
14976 pushfq_cfi
14977 /*CFI_REL_OFFSET rflags,0*/
14978- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
14979- CFI_REGISTER rip,r10
14980+ orl $X86_EFLAGS_IF,(%rsp)
14981+ GET_THREAD_INFO(%r11)
14982+ movl TI_sysenter_return(%r11), %r11d
14983+ CFI_REGISTER rip,r11
14984 pushq_cfi $__USER32_CS
14985 /*CFI_REL_OFFSET cs,0*/
14986 movl %eax, %eax
14987- pushq_cfi %r10
14988+ pushq_cfi %r11
14989 CFI_REL_OFFSET rip,0
14990 pushq_cfi %rax
14991 cld
14992 SAVE_ARGS 0,1,0
14993+ pax_enter_kernel_user
14994+
14995+#ifdef CONFIG_PAX_RANDKSTACK
14996+ pax_erase_kstack
14997+#endif
14998+
14999+ /*
15000+ * No need to follow this irqs on/off section: the syscall
15001+ * disabled irqs, here we enable it straight after entry:
15002+ */
15003+ ENABLE_INTERRUPTS(CLBR_NONE)
15004 /* no need to do an access_ok check here because rbp has been
15005 32bit zero extended */
15006+
15007+#ifdef CONFIG_PAX_MEMORY_UDEREF
15008+ addq pax_user_shadow_base,%rbp
15009+ ASM_PAX_OPEN_USERLAND
15010+#endif
15011+
15012 ASM_STAC
15013 1: movl (%rbp),%ebp
15014 _ASM_EXTABLE(1b,ia32_badarg)
15015 ASM_CLAC
15016- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15017- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15018+
15019+#ifdef CONFIG_PAX_MEMORY_UDEREF
15020+ ASM_PAX_CLOSE_USERLAND
15021+#endif
15022+
15023+ GET_THREAD_INFO(%r11)
15024+ orl $TS_COMPAT,TI_status(%r11)
15025+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15026 CFI_REMEMBER_STATE
15027 jnz sysenter_tracesys
15028 cmpq $(IA32_NR_syscalls-1),%rax
15029@@ -162,15 +209,18 @@ sysenter_do_call:
15030 sysenter_dispatch:
15031 call *ia32_sys_call_table(,%rax,8)
15032 movq %rax,RAX-ARGOFFSET(%rsp)
15033+ GET_THREAD_INFO(%r11)
15034 DISABLE_INTERRUPTS(CLBR_NONE)
15035 TRACE_IRQS_OFF
15036- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15037+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
15038 jnz sysexit_audit
15039 sysexit_from_sys_call:
15040- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15041+ pax_exit_kernel_user
15042+ pax_erase_kstack
15043+ andl $~TS_COMPAT,TI_status(%r11)
15044 /* clear IF, that popfq doesn't enable interrupts early */
15045- andl $~0x200,EFLAGS-R11(%rsp)
15046- movl RIP-R11(%rsp),%edx /* User %eip */
15047+ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
15048+ movl RIP(%rsp),%edx /* User %eip */
15049 CFI_REGISTER rip,rdx
15050 RESTORE_ARGS 0,24,0,0,0,0
15051 xorq %r8,%r8
15052@@ -193,6 +243,9 @@ sysexit_from_sys_call:
15053 movl %eax,%esi /* 2nd arg: syscall number */
15054 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
15055 call __audit_syscall_entry
15056+
15057+ pax_erase_kstack
15058+
15059 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
15060 cmpq $(IA32_NR_syscalls-1),%rax
15061 ja ia32_badsys
15062@@ -204,7 +257,7 @@ sysexit_from_sys_call:
15063 .endm
15064
15065 .macro auditsys_exit exit
15066- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15067+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15068 jnz ia32_ret_from_sys_call
15069 TRACE_IRQS_ON
15070 ENABLE_INTERRUPTS(CLBR_NONE)
15071@@ -215,11 +268,12 @@ sysexit_from_sys_call:
15072 1: setbe %al /* 1 if error, 0 if not */
15073 movzbl %al,%edi /* zero-extend that into %edi */
15074 call __audit_syscall_exit
15075+ GET_THREAD_INFO(%r11)
15076 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
15077 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
15078 DISABLE_INTERRUPTS(CLBR_NONE)
15079 TRACE_IRQS_OFF
15080- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15081+ testl %edi,TI_flags(%r11)
15082 jz \exit
15083 CLEAR_RREGS -ARGOFFSET
15084 jmp int_with_check
15085@@ -237,7 +291,7 @@ sysexit_audit:
15086
15087 sysenter_tracesys:
15088 #ifdef CONFIG_AUDITSYSCALL
15089- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15090+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15091 jz sysenter_auditsys
15092 #endif
15093 SAVE_REST
15094@@ -249,6 +303,9 @@ sysenter_tracesys:
15095 RESTORE_REST
15096 cmpq $(IA32_NR_syscalls-1),%rax
15097 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
15098+
15099+ pax_erase_kstack
15100+
15101 jmp sysenter_do_call
15102 CFI_ENDPROC
15103 ENDPROC(ia32_sysenter_target)
15104@@ -276,19 +333,25 @@ ENDPROC(ia32_sysenter_target)
15105 ENTRY(ia32_cstar_target)
15106 CFI_STARTPROC32 simple
15107 CFI_SIGNAL_FRAME
15108- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
15109+ CFI_DEF_CFA rsp,0
15110 CFI_REGISTER rip,rcx
15111 /*CFI_REGISTER rflags,r11*/
15112 SWAPGS_UNSAFE_STACK
15113 movl %esp,%r8d
15114 CFI_REGISTER rsp,r8
15115 movq PER_CPU_VAR(kernel_stack),%rsp
15116+ SAVE_ARGS 8*6,0,0
15117+ pax_enter_kernel_user
15118+
15119+#ifdef CONFIG_PAX_RANDKSTACK
15120+ pax_erase_kstack
15121+#endif
15122+
15123 /*
15124 * No need to follow this irqs on/off section: the syscall
15125 * disabled irqs and here we enable it straight after entry:
15126 */
15127 ENABLE_INTERRUPTS(CLBR_NONE)
15128- SAVE_ARGS 8,0,0
15129 movl %eax,%eax /* zero extension */
15130 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
15131 movq %rcx,RIP-ARGOFFSET(%rsp)
15132@@ -304,12 +367,25 @@ ENTRY(ia32_cstar_target)
15133 /* no need to do an access_ok check here because r8 has been
15134 32bit zero extended */
15135 /* hardware stack frame is complete now */
15136+
15137+#ifdef CONFIG_PAX_MEMORY_UDEREF
15138+ ASM_PAX_OPEN_USERLAND
15139+ movq pax_user_shadow_base,%r8
15140+ addq RSP-ARGOFFSET(%rsp),%r8
15141+#endif
15142+
15143 ASM_STAC
15144 1: movl (%r8),%r9d
15145 _ASM_EXTABLE(1b,ia32_badarg)
15146 ASM_CLAC
15147- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15148- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15149+
15150+#ifdef CONFIG_PAX_MEMORY_UDEREF
15151+ ASM_PAX_CLOSE_USERLAND
15152+#endif
15153+
15154+ GET_THREAD_INFO(%r11)
15155+ orl $TS_COMPAT,TI_status(%r11)
15156+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15157 CFI_REMEMBER_STATE
15158 jnz cstar_tracesys
15159 cmpq $IA32_NR_syscalls-1,%rax
15160@@ -319,13 +395,16 @@ cstar_do_call:
15161 cstar_dispatch:
15162 call *ia32_sys_call_table(,%rax,8)
15163 movq %rax,RAX-ARGOFFSET(%rsp)
15164+ GET_THREAD_INFO(%r11)
15165 DISABLE_INTERRUPTS(CLBR_NONE)
15166 TRACE_IRQS_OFF
15167- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15168+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
15169 jnz sysretl_audit
15170 sysretl_from_sys_call:
15171- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15172- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
15173+ pax_exit_kernel_user
15174+ pax_erase_kstack
15175+ andl $~TS_COMPAT,TI_status(%r11)
15176+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
15177 movl RIP-ARGOFFSET(%rsp),%ecx
15178 CFI_REGISTER rip,rcx
15179 movl EFLAGS-ARGOFFSET(%rsp),%r11d
15180@@ -352,7 +431,7 @@ sysretl_audit:
15181
15182 cstar_tracesys:
15183 #ifdef CONFIG_AUDITSYSCALL
15184- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15185+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15186 jz cstar_auditsys
15187 #endif
15188 xchgl %r9d,%ebp
15189@@ -366,11 +445,19 @@ cstar_tracesys:
15190 xchgl %ebp,%r9d
15191 cmpq $(IA32_NR_syscalls-1),%rax
15192 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
15193+
15194+ pax_erase_kstack
15195+
15196 jmp cstar_do_call
15197 END(ia32_cstar_target)
15198
15199 ia32_badarg:
15200 ASM_CLAC
15201+
15202+#ifdef CONFIG_PAX_MEMORY_UDEREF
15203+ ASM_PAX_CLOSE_USERLAND
15204+#endif
15205+
15206 movq $-EFAULT,%rax
15207 jmp ia32_sysret
15208 CFI_ENDPROC
15209@@ -407,19 +494,26 @@ ENTRY(ia32_syscall)
15210 CFI_REL_OFFSET rip,RIP-RIP
15211 PARAVIRT_ADJUST_EXCEPTION_FRAME
15212 SWAPGS
15213- /*
15214- * No need to follow this irqs on/off section: the syscall
15215- * disabled irqs and here we enable it straight after entry:
15216- */
15217- ENABLE_INTERRUPTS(CLBR_NONE)
15218 movl %eax,%eax
15219 pushq_cfi %rax
15220 cld
15221 /* note the registers are not zero extended to the sf.
15222 this could be a problem. */
15223 SAVE_ARGS 0,1,0
15224- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15225- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15226+ pax_enter_kernel_user
15227+
15228+#ifdef CONFIG_PAX_RANDKSTACK
15229+ pax_erase_kstack
15230+#endif
15231+
15232+ /*
15233+ * No need to follow this irqs on/off section: the syscall
15234+ * disabled irqs and here we enable it straight after entry:
15235+ */
15236+ ENABLE_INTERRUPTS(CLBR_NONE)
15237+ GET_THREAD_INFO(%r11)
15238+ orl $TS_COMPAT,TI_status(%r11)
15239+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15240 jnz ia32_tracesys
15241 cmpq $(IA32_NR_syscalls-1),%rax
15242 ja ia32_badsys
15243@@ -442,6 +536,9 @@ ia32_tracesys:
15244 RESTORE_REST
15245 cmpq $(IA32_NR_syscalls-1),%rax
15246 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
15247+
15248+ pax_erase_kstack
15249+
15250 jmp ia32_do_call
15251 END(ia32_syscall)
15252
15253diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
15254index 8e0ceec..af13504 100644
15255--- a/arch/x86/ia32/sys_ia32.c
15256+++ b/arch/x86/ia32/sys_ia32.c
15257@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
15258 */
15259 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
15260 {
15261- typeof(ubuf->st_uid) uid = 0;
15262- typeof(ubuf->st_gid) gid = 0;
15263+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
15264+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
15265 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
15266 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
15267 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
15268diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
15269index 372231c..51b537d 100644
15270--- a/arch/x86/include/asm/alternative-asm.h
15271+++ b/arch/x86/include/asm/alternative-asm.h
15272@@ -18,6 +18,45 @@
15273 .endm
15274 #endif
15275
15276+#ifdef KERNEXEC_PLUGIN
15277+ .macro pax_force_retaddr_bts rip=0
15278+ btsq $63,\rip(%rsp)
15279+ .endm
15280+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15281+ .macro pax_force_retaddr rip=0, reload=0
15282+ btsq $63,\rip(%rsp)
15283+ .endm
15284+ .macro pax_force_fptr ptr
15285+ btsq $63,\ptr
15286+ .endm
15287+ .macro pax_set_fptr_mask
15288+ .endm
15289+#endif
15290+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15291+ .macro pax_force_retaddr rip=0, reload=0
15292+ .if \reload
15293+ pax_set_fptr_mask
15294+ .endif
15295+ orq %r12,\rip(%rsp)
15296+ .endm
15297+ .macro pax_force_fptr ptr
15298+ orq %r12,\ptr
15299+ .endm
15300+ .macro pax_set_fptr_mask
15301+ movabs $0x8000000000000000,%r12
15302+ .endm
15303+#endif
15304+#else
15305+ .macro pax_force_retaddr rip=0, reload=0
15306+ .endm
15307+ .macro pax_force_fptr ptr
15308+ .endm
15309+ .macro pax_force_retaddr_bts rip=0
15310+ .endm
15311+ .macro pax_set_fptr_mask
15312+ .endm
15313+#endif
15314+
15315 .macro altinstruction_entry orig alt feature orig_len alt_len
15316 .long \orig - .
15317 .long \alt - .
15318diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
15319index 0a3f9c9..c9d081d 100644
15320--- a/arch/x86/include/asm/alternative.h
15321+++ b/arch/x86/include/asm/alternative.h
15322@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15323 ".pushsection .discard,\"aw\",@progbits\n" \
15324 DISCARD_ENTRY(1) \
15325 ".popsection\n" \
15326- ".pushsection .altinstr_replacement, \"ax\"\n" \
15327+ ".pushsection .altinstr_replacement, \"a\"\n" \
15328 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
15329 ".popsection"
15330
15331@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15332 DISCARD_ENTRY(1) \
15333 DISCARD_ENTRY(2) \
15334 ".popsection\n" \
15335- ".pushsection .altinstr_replacement, \"ax\"\n" \
15336+ ".pushsection .altinstr_replacement, \"a\"\n" \
15337 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
15338 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
15339 ".popsection"
15340diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
15341index 19b0eba..12254cd 100644
15342--- a/arch/x86/include/asm/apic.h
15343+++ b/arch/x86/include/asm/apic.h
15344@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15345
15346 #ifdef CONFIG_X86_LOCAL_APIC
15347
15348-extern unsigned int apic_verbosity;
15349+extern int apic_verbosity;
15350 extern int local_apic_timer_c2_ok;
15351
15352 extern int disable_apic;
15353diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
15354index 20370c6..a2eb9b0 100644
15355--- a/arch/x86/include/asm/apm.h
15356+++ b/arch/x86/include/asm/apm.h
15357@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
15358 __asm__ __volatile__(APM_DO_ZERO_SEGS
15359 "pushl %%edi\n\t"
15360 "pushl %%ebp\n\t"
15361- "lcall *%%cs:apm_bios_entry\n\t"
15362+ "lcall *%%ss:apm_bios_entry\n\t"
15363 "setc %%al\n\t"
15364 "popl %%ebp\n\t"
15365 "popl %%edi\n\t"
15366@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
15367 __asm__ __volatile__(APM_DO_ZERO_SEGS
15368 "pushl %%edi\n\t"
15369 "pushl %%ebp\n\t"
15370- "lcall *%%cs:apm_bios_entry\n\t"
15371+ "lcall *%%ss:apm_bios_entry\n\t"
15372 "setc %%bl\n\t"
15373 "popl %%ebp\n\t"
15374 "popl %%edi\n\t"
15375diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
15376index 6dd1c7dd..2edd216 100644
15377--- a/arch/x86/include/asm/atomic.h
15378+++ b/arch/x86/include/asm/atomic.h
15379@@ -24,7 +24,18 @@
15380 */
15381 static inline int atomic_read(const atomic_t *v)
15382 {
15383- return (*(volatile int *)&(v)->counter);
15384+ return (*(volatile const int *)&(v)->counter);
15385+}
15386+
15387+/**
15388+ * atomic_read_unchecked - read atomic variable
15389+ * @v: pointer of type atomic_unchecked_t
15390+ *
15391+ * Atomically reads the value of @v.
15392+ */
15393+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
15394+{
15395+ return (*(volatile const int *)&(v)->counter);
15396 }
15397
15398 /**
15399@@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
15400 }
15401
15402 /**
15403+ * atomic_set_unchecked - set atomic variable
15404+ * @v: pointer of type atomic_unchecked_t
15405+ * @i: required value
15406+ *
15407+ * Atomically sets the value of @v to @i.
15408+ */
15409+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
15410+{
15411+ v->counter = i;
15412+}
15413+
15414+/**
15415 * atomic_add - add integer to atomic variable
15416 * @i: integer value to add
15417 * @v: pointer of type atomic_t
15418@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
15419 */
15420 static inline void atomic_add(int i, atomic_t *v)
15421 {
15422- asm volatile(LOCK_PREFIX "addl %1,%0"
15423+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15424+
15425+#ifdef CONFIG_PAX_REFCOUNT
15426+ "jno 0f\n"
15427+ LOCK_PREFIX "subl %1,%0\n"
15428+ "int $4\n0:\n"
15429+ _ASM_EXTABLE(0b, 0b)
15430+#endif
15431+
15432+ : "+m" (v->counter)
15433+ : "ir" (i));
15434+}
15435+
15436+/**
15437+ * atomic_add_unchecked - add integer to atomic variable
15438+ * @i: integer value to add
15439+ * @v: pointer of type atomic_unchecked_t
15440+ *
15441+ * Atomically adds @i to @v.
15442+ */
15443+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
15444+{
15445+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15446 : "+m" (v->counter)
15447 : "ir" (i));
15448 }
15449@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
15450 */
15451 static inline void atomic_sub(int i, atomic_t *v)
15452 {
15453- asm volatile(LOCK_PREFIX "subl %1,%0"
15454+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15455+
15456+#ifdef CONFIG_PAX_REFCOUNT
15457+ "jno 0f\n"
15458+ LOCK_PREFIX "addl %1,%0\n"
15459+ "int $4\n0:\n"
15460+ _ASM_EXTABLE(0b, 0b)
15461+#endif
15462+
15463+ : "+m" (v->counter)
15464+ : "ir" (i));
15465+}
15466+
15467+/**
15468+ * atomic_sub_unchecked - subtract integer from atomic variable
15469+ * @i: integer value to subtract
15470+ * @v: pointer of type atomic_unchecked_t
15471+ *
15472+ * Atomically subtracts @i from @v.
15473+ */
15474+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
15475+{
15476+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15477 : "+m" (v->counter)
15478 : "ir" (i));
15479 }
15480@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
15481 */
15482 static inline int atomic_sub_and_test(int i, atomic_t *v)
15483 {
15484- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
15485+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
15486 }
15487
15488 /**
15489@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
15490 */
15491 static inline void atomic_inc(atomic_t *v)
15492 {
15493- asm volatile(LOCK_PREFIX "incl %0"
15494+ asm volatile(LOCK_PREFIX "incl %0\n"
15495+
15496+#ifdef CONFIG_PAX_REFCOUNT
15497+ "jno 0f\n"
15498+ LOCK_PREFIX "decl %0\n"
15499+ "int $4\n0:\n"
15500+ _ASM_EXTABLE(0b, 0b)
15501+#endif
15502+
15503+ : "+m" (v->counter));
15504+}
15505+
15506+/**
15507+ * atomic_inc_unchecked - increment atomic variable
15508+ * @v: pointer of type atomic_unchecked_t
15509+ *
15510+ * Atomically increments @v by 1.
15511+ */
15512+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
15513+{
15514+ asm volatile(LOCK_PREFIX "incl %0\n"
15515 : "+m" (v->counter));
15516 }
15517
15518@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
15519 */
15520 static inline void atomic_dec(atomic_t *v)
15521 {
15522- asm volatile(LOCK_PREFIX "decl %0"
15523+ asm volatile(LOCK_PREFIX "decl %0\n"
15524+
15525+#ifdef CONFIG_PAX_REFCOUNT
15526+ "jno 0f\n"
15527+ LOCK_PREFIX "incl %0\n"
15528+ "int $4\n0:\n"
15529+ _ASM_EXTABLE(0b, 0b)
15530+#endif
15531+
15532+ : "+m" (v->counter));
15533+}
15534+
15535+/**
15536+ * atomic_dec_unchecked - decrement atomic variable
15537+ * @v: pointer of type atomic_unchecked_t
15538+ *
15539+ * Atomically decrements @v by 1.
15540+ */
15541+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
15542+{
15543+ asm volatile(LOCK_PREFIX "decl %0\n"
15544 : "+m" (v->counter));
15545 }
15546
15547@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
15548 */
15549 static inline int atomic_dec_and_test(atomic_t *v)
15550 {
15551- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
15552+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
15553 }
15554
15555 /**
15556@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
15557 */
15558 static inline int atomic_inc_and_test(atomic_t *v)
15559 {
15560- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
15561+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
15562+}
15563+
15564+/**
15565+ * atomic_inc_and_test_unchecked - increment and test
15566+ * @v: pointer of type atomic_unchecked_t
15567+ *
15568+ * Atomically increments @v by 1
15569+ * and returns true if the result is zero, or false for all
15570+ * other cases.
15571+ */
15572+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
15573+{
15574+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
15575 }
15576
15577 /**
15578@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
15579 */
15580 static inline int atomic_add_negative(int i, atomic_t *v)
15581 {
15582- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
15583+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
15584 }
15585
15586 /**
15587@@ -154,6 +274,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
15588 */
15589 static inline int atomic_add_return(int i, atomic_t *v)
15590 {
15591+ return i + xadd_check_overflow(&v->counter, i);
15592+}
15593+
15594+/**
15595+ * atomic_add_return_unchecked - add integer and return
15596+ * @i: integer value to add
15597+ * @v: pointer of type atomic_unchecked_t
15598+ *
15599+ * Atomically adds @i to @v and returns @i + @v
15600+ */
15601+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
15602+{
15603 return i + xadd(&v->counter, i);
15604 }
15605
15606@@ -170,9 +302,18 @@ static inline int atomic_sub_return(int i, atomic_t *v)
15607 }
15608
15609 #define atomic_inc_return(v) (atomic_add_return(1, v))
15610+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
15611+{
15612+ return atomic_add_return_unchecked(1, v);
15613+}
15614 #define atomic_dec_return(v) (atomic_sub_return(1, v))
15615
15616-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
15617+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
15618+{
15619+ return cmpxchg(&v->counter, old, new);
15620+}
15621+
15622+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
15623 {
15624 return cmpxchg(&v->counter, old, new);
15625 }
15626@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
15627 return xchg(&v->counter, new);
15628 }
15629
15630+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
15631+{
15632+ return xchg(&v->counter, new);
15633+}
15634+
15635 /**
15636 * __atomic_add_unless - add unless the number is already a given value
15637 * @v: pointer of type atomic_t
15638@@ -191,14 +337,27 @@ static inline int atomic_xchg(atomic_t *v, int new)
15639 * Atomically adds @a to @v, so long as @v was not already @u.
15640 * Returns the old value of @v.
15641 */
15642-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15643+static inline int __intentional_overflow(-1) __atomic_add_unless(atomic_t *v, int a, int u)
15644 {
15645- int c, old;
15646+ int c, old, new;
15647 c = atomic_read(v);
15648 for (;;) {
15649- if (unlikely(c == (u)))
15650+ if (unlikely(c == u))
15651 break;
15652- old = atomic_cmpxchg((v), c, c + (a));
15653+
15654+ asm volatile("addl %2,%0\n"
15655+
15656+#ifdef CONFIG_PAX_REFCOUNT
15657+ "jno 0f\n"
15658+ "subl %2,%0\n"
15659+ "int $4\n0:\n"
15660+ _ASM_EXTABLE(0b, 0b)
15661+#endif
15662+
15663+ : "=r" (new)
15664+ : "0" (c), "ir" (a));
15665+
15666+ old = atomic_cmpxchg(v, c, new);
15667 if (likely(old == c))
15668 break;
15669 c = old;
15670@@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15671 }
15672
15673 /**
15674+ * atomic_inc_not_zero_hint - increment if not null
15675+ * @v: pointer of type atomic_t
15676+ * @hint: probable value of the atomic before the increment
15677+ *
15678+ * This version of atomic_inc_not_zero() gives a hint of probable
15679+ * value of the atomic. This helps processor to not read the memory
15680+ * before doing the atomic read/modify/write cycle, lowering
15681+ * number of bus transactions on some arches.
15682+ *
15683+ * Returns: 0 if increment was not done, 1 otherwise.
15684+ */
15685+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
15686+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
15687+{
15688+ int val, c = hint, new;
15689+
15690+ /* sanity test, should be removed by compiler if hint is a constant */
15691+ if (!hint)
15692+ return __atomic_add_unless(v, 1, 0);
15693+
15694+ do {
15695+ asm volatile("incl %0\n"
15696+
15697+#ifdef CONFIG_PAX_REFCOUNT
15698+ "jno 0f\n"
15699+ "decl %0\n"
15700+ "int $4\n0:\n"
15701+ _ASM_EXTABLE(0b, 0b)
15702+#endif
15703+
15704+ : "=r" (new)
15705+ : "0" (c));
15706+
15707+ val = atomic_cmpxchg(v, c, new);
15708+ if (val == c)
15709+ return 1;
15710+ c = val;
15711+ } while (c);
15712+
15713+ return 0;
15714+}
15715+
15716+/**
15717 * atomic_inc_short - increment of a short integer
15718 * @v: pointer to type int
15719 *
15720@@ -235,14 +437,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
15721 #endif
15722
15723 /* These are x86-specific, used by some header files */
15724-#define atomic_clear_mask(mask, addr) \
15725- asm volatile(LOCK_PREFIX "andl %0,%1" \
15726- : : "r" (~(mask)), "m" (*(addr)) : "memory")
15727+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
15728+{
15729+ asm volatile(LOCK_PREFIX "andl %1,%0"
15730+ : "+m" (v->counter)
15731+ : "r" (~(mask))
15732+ : "memory");
15733+}
15734
15735-#define atomic_set_mask(mask, addr) \
15736- asm volatile(LOCK_PREFIX "orl %0,%1" \
15737- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
15738- : "memory")
15739+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15740+{
15741+ asm volatile(LOCK_PREFIX "andl %1,%0"
15742+ : "+m" (v->counter)
15743+ : "r" (~(mask))
15744+ : "memory");
15745+}
15746+
15747+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
15748+{
15749+ asm volatile(LOCK_PREFIX "orl %1,%0"
15750+ : "+m" (v->counter)
15751+ : "r" (mask)
15752+ : "memory");
15753+}
15754+
15755+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15756+{
15757+ asm volatile(LOCK_PREFIX "orl %1,%0"
15758+ : "+m" (v->counter)
15759+ : "r" (mask)
15760+ : "memory");
15761+}
15762
15763 #ifdef CONFIG_X86_32
15764 # include <asm/atomic64_32.h>
15765diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
15766index b154de7..bf18a5a 100644
15767--- a/arch/x86/include/asm/atomic64_32.h
15768+++ b/arch/x86/include/asm/atomic64_32.h
15769@@ -12,6 +12,14 @@ typedef struct {
15770 u64 __aligned(8) counter;
15771 } atomic64_t;
15772
15773+#ifdef CONFIG_PAX_REFCOUNT
15774+typedef struct {
15775+ u64 __aligned(8) counter;
15776+} atomic64_unchecked_t;
15777+#else
15778+typedef atomic64_t atomic64_unchecked_t;
15779+#endif
15780+
15781 #define ATOMIC64_INIT(val) { (val) }
15782
15783 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
15784@@ -37,21 +45,31 @@ typedef struct {
15785 ATOMIC64_DECL_ONE(sym##_386)
15786
15787 ATOMIC64_DECL_ONE(add_386);
15788+ATOMIC64_DECL_ONE(add_unchecked_386);
15789 ATOMIC64_DECL_ONE(sub_386);
15790+ATOMIC64_DECL_ONE(sub_unchecked_386);
15791 ATOMIC64_DECL_ONE(inc_386);
15792+ATOMIC64_DECL_ONE(inc_unchecked_386);
15793 ATOMIC64_DECL_ONE(dec_386);
15794+ATOMIC64_DECL_ONE(dec_unchecked_386);
15795 #endif
15796
15797 #define alternative_atomic64(f, out, in...) \
15798 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
15799
15800 ATOMIC64_DECL(read);
15801+ATOMIC64_DECL(read_unchecked);
15802 ATOMIC64_DECL(set);
15803+ATOMIC64_DECL(set_unchecked);
15804 ATOMIC64_DECL(xchg);
15805 ATOMIC64_DECL(add_return);
15806+ATOMIC64_DECL(add_return_unchecked);
15807 ATOMIC64_DECL(sub_return);
15808+ATOMIC64_DECL(sub_return_unchecked);
15809 ATOMIC64_DECL(inc_return);
15810+ATOMIC64_DECL(inc_return_unchecked);
15811 ATOMIC64_DECL(dec_return);
15812+ATOMIC64_DECL(dec_return_unchecked);
15813 ATOMIC64_DECL(dec_if_positive);
15814 ATOMIC64_DECL(inc_not_zero);
15815 ATOMIC64_DECL(add_unless);
15816@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
15817 }
15818
15819 /**
15820+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
15821+ * @p: pointer to type atomic64_unchecked_t
15822+ * @o: expected value
15823+ * @n: new value
15824+ *
15825+ * Atomically sets @v to @n if it was equal to @o and returns
15826+ * the old value.
15827+ */
15828+
15829+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
15830+{
15831+ return cmpxchg64(&v->counter, o, n);
15832+}
15833+
15834+/**
15835 * atomic64_xchg - xchg atomic64 variable
15836 * @v: pointer to type atomic64_t
15837 * @n: value to assign
15838@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
15839 }
15840
15841 /**
15842+ * atomic64_set_unchecked - set atomic64 variable
15843+ * @v: pointer to type atomic64_unchecked_t
15844+ * @n: value to assign
15845+ *
15846+ * Atomically sets the value of @v to @n.
15847+ */
15848+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
15849+{
15850+ unsigned high = (unsigned)(i >> 32);
15851+ unsigned low = (unsigned)i;
15852+ alternative_atomic64(set, /* no output */,
15853+ "S" (v), "b" (low), "c" (high)
15854+ : "eax", "edx", "memory");
15855+}
15856+
15857+/**
15858 * atomic64_read - read atomic64 variable
15859 * @v: pointer to type atomic64_t
15860 *
15861@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
15862 }
15863
15864 /**
15865+ * atomic64_read_unchecked - read atomic64 variable
15866+ * @v: pointer to type atomic64_unchecked_t
15867+ *
15868+ * Atomically reads the value of @v and returns it.
15869+ */
15870+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
15871+{
15872+ long long r;
15873+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
15874+ return r;
15875+ }
15876+
15877+/**
15878 * atomic64_add_return - add and return
15879 * @i: integer value to add
15880 * @v: pointer to type atomic64_t
15881@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
15882 return i;
15883 }
15884
15885+/**
15886+ * atomic64_add_return_unchecked - add and return
15887+ * @i: integer value to add
15888+ * @v: pointer to type atomic64_unchecked_t
15889+ *
15890+ * Atomically adds @i to @v and returns @i + *@v
15891+ */
15892+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
15893+{
15894+ alternative_atomic64(add_return_unchecked,
15895+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15896+ ASM_NO_INPUT_CLOBBER("memory"));
15897+ return i;
15898+}
15899+
15900 /*
15901 * Other variants with different arithmetic operators:
15902 */
15903@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
15904 return a;
15905 }
15906
15907+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15908+{
15909+ long long a;
15910+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
15911+ "S" (v) : "memory", "ecx");
15912+ return a;
15913+}
15914+
15915 static inline long long atomic64_dec_return(atomic64_t *v)
15916 {
15917 long long a;
15918@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
15919 }
15920
15921 /**
15922+ * atomic64_add_unchecked - add integer to atomic64 variable
15923+ * @i: integer value to add
15924+ * @v: pointer to type atomic64_unchecked_t
15925+ *
15926+ * Atomically adds @i to @v.
15927+ */
15928+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
15929+{
15930+ __alternative_atomic64(add_unchecked, add_return_unchecked,
15931+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15932+ ASM_NO_INPUT_CLOBBER("memory"));
15933+ return i;
15934+}
15935+
15936+/**
15937 * atomic64_sub - subtract the atomic64 variable
15938 * @i: integer value to subtract
15939 * @v: pointer to type atomic64_t
15940diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
15941index 46e9052..ae45136 100644
15942--- a/arch/x86/include/asm/atomic64_64.h
15943+++ b/arch/x86/include/asm/atomic64_64.h
15944@@ -18,7 +18,19 @@
15945 */
15946 static inline long atomic64_read(const atomic64_t *v)
15947 {
15948- return (*(volatile long *)&(v)->counter);
15949+ return (*(volatile const long *)&(v)->counter);
15950+}
15951+
15952+/**
15953+ * atomic64_read_unchecked - read atomic64 variable
15954+ * @v: pointer of type atomic64_unchecked_t
15955+ *
15956+ * Atomically reads the value of @v.
15957+ * Doesn't imply a read memory barrier.
15958+ */
15959+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
15960+{
15961+ return (*(volatile const long *)&(v)->counter);
15962 }
15963
15964 /**
15965@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
15966 }
15967
15968 /**
15969+ * atomic64_set_unchecked - set atomic64 variable
15970+ * @v: pointer to type atomic64_unchecked_t
15971+ * @i: required value
15972+ *
15973+ * Atomically sets the value of @v to @i.
15974+ */
15975+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
15976+{
15977+ v->counter = i;
15978+}
15979+
15980+/**
15981 * atomic64_add - add integer to atomic64 variable
15982 * @i: integer value to add
15983 * @v: pointer to type atomic64_t
15984@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
15985 */
15986 static inline void atomic64_add(long i, atomic64_t *v)
15987 {
15988+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
15989+
15990+#ifdef CONFIG_PAX_REFCOUNT
15991+ "jno 0f\n"
15992+ LOCK_PREFIX "subq %1,%0\n"
15993+ "int $4\n0:\n"
15994+ _ASM_EXTABLE(0b, 0b)
15995+#endif
15996+
15997+ : "=m" (v->counter)
15998+ : "er" (i), "m" (v->counter));
15999+}
16000+
16001+/**
16002+ * atomic64_add_unchecked - add integer to atomic64 variable
16003+ * @i: integer value to add
16004+ * @v: pointer to type atomic64_unchecked_t
16005+ *
16006+ * Atomically adds @i to @v.
16007+ */
16008+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
16009+{
16010 asm volatile(LOCK_PREFIX "addq %1,%0"
16011 : "=m" (v->counter)
16012 : "er" (i), "m" (v->counter));
16013@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
16014 */
16015 static inline void atomic64_sub(long i, atomic64_t *v)
16016 {
16017- asm volatile(LOCK_PREFIX "subq %1,%0"
16018+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
16019+
16020+#ifdef CONFIG_PAX_REFCOUNT
16021+ "jno 0f\n"
16022+ LOCK_PREFIX "addq %1,%0\n"
16023+ "int $4\n0:\n"
16024+ _ASM_EXTABLE(0b, 0b)
16025+#endif
16026+
16027+ : "=m" (v->counter)
16028+ : "er" (i), "m" (v->counter));
16029+}
16030+
16031+/**
16032+ * atomic64_sub_unchecked - subtract the atomic64 variable
16033+ * @i: integer value to subtract
16034+ * @v: pointer to type atomic64_unchecked_t
16035+ *
16036+ * Atomically subtracts @i from @v.
16037+ */
16038+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
16039+{
16040+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
16041 : "=m" (v->counter)
16042 : "er" (i), "m" (v->counter));
16043 }
16044@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
16045 */
16046 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
16047 {
16048- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
16049+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
16050 }
16051
16052 /**
16053@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
16054 */
16055 static inline void atomic64_inc(atomic64_t *v)
16056 {
16057+ asm volatile(LOCK_PREFIX "incq %0\n"
16058+
16059+#ifdef CONFIG_PAX_REFCOUNT
16060+ "jno 0f\n"
16061+ LOCK_PREFIX "decq %0\n"
16062+ "int $4\n0:\n"
16063+ _ASM_EXTABLE(0b, 0b)
16064+#endif
16065+
16066+ : "=m" (v->counter)
16067+ : "m" (v->counter));
16068+}
16069+
16070+/**
16071+ * atomic64_inc_unchecked - increment atomic64 variable
16072+ * @v: pointer to type atomic64_unchecked_t
16073+ *
16074+ * Atomically increments @v by 1.
16075+ */
16076+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
16077+{
16078 asm volatile(LOCK_PREFIX "incq %0"
16079 : "=m" (v->counter)
16080 : "m" (v->counter));
16081@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
16082 */
16083 static inline void atomic64_dec(atomic64_t *v)
16084 {
16085- asm volatile(LOCK_PREFIX "decq %0"
16086+ asm volatile(LOCK_PREFIX "decq %0\n"
16087+
16088+#ifdef CONFIG_PAX_REFCOUNT
16089+ "jno 0f\n"
16090+ LOCK_PREFIX "incq %0\n"
16091+ "int $4\n0:\n"
16092+ _ASM_EXTABLE(0b, 0b)
16093+#endif
16094+
16095+ : "=m" (v->counter)
16096+ : "m" (v->counter));
16097+}
16098+
16099+/**
16100+ * atomic64_dec_unchecked - decrement atomic64 variable
16101+ * @v: pointer to type atomic64_t
16102+ *
16103+ * Atomically decrements @v by 1.
16104+ */
16105+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
16106+{
16107+ asm volatile(LOCK_PREFIX "decq %0\n"
16108 : "=m" (v->counter)
16109 : "m" (v->counter));
16110 }
16111@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
16112 */
16113 static inline int atomic64_dec_and_test(atomic64_t *v)
16114 {
16115- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
16116+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
16117 }
16118
16119 /**
16120@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
16121 */
16122 static inline int atomic64_inc_and_test(atomic64_t *v)
16123 {
16124- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
16125+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
16126 }
16127
16128 /**
16129@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
16130 */
16131 static inline int atomic64_add_negative(long i, atomic64_t *v)
16132 {
16133- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
16134+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
16135 }
16136
16137 /**
16138@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
16139 */
16140 static inline long atomic64_add_return(long i, atomic64_t *v)
16141 {
16142+ return i + xadd_check_overflow(&v->counter, i);
16143+}
16144+
16145+/**
16146+ * atomic64_add_return_unchecked - add and return
16147+ * @i: integer value to add
16148+ * @v: pointer to type atomic64_unchecked_t
16149+ *
16150+ * Atomically adds @i to @v and returns @i + @v
16151+ */
16152+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
16153+{
16154 return i + xadd(&v->counter, i);
16155 }
16156
16157@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
16158 }
16159
16160 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
16161+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
16162+{
16163+ return atomic64_add_return_unchecked(1, v);
16164+}
16165 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
16166
16167 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16168@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16169 return cmpxchg(&v->counter, old, new);
16170 }
16171
16172+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
16173+{
16174+ return cmpxchg(&v->counter, old, new);
16175+}
16176+
16177 static inline long atomic64_xchg(atomic64_t *v, long new)
16178 {
16179 return xchg(&v->counter, new);
16180@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
16181 */
16182 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
16183 {
16184- long c, old;
16185+ long c, old, new;
16186 c = atomic64_read(v);
16187 for (;;) {
16188- if (unlikely(c == (u)))
16189+ if (unlikely(c == u))
16190 break;
16191- old = atomic64_cmpxchg((v), c, c + (a));
16192+
16193+ asm volatile("add %2,%0\n"
16194+
16195+#ifdef CONFIG_PAX_REFCOUNT
16196+ "jno 0f\n"
16197+ "sub %2,%0\n"
16198+ "int $4\n0:\n"
16199+ _ASM_EXTABLE(0b, 0b)
16200+#endif
16201+
16202+ : "=r" (new)
16203+ : "0" (c), "ir" (a));
16204+
16205+ old = atomic64_cmpxchg(v, c, new);
16206 if (likely(old == c))
16207 break;
16208 c = old;
16209 }
16210- return c != (u);
16211+ return c != u;
16212 }
16213
16214 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
16215diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
16216index 5c7198c..44180b5 100644
16217--- a/arch/x86/include/asm/barrier.h
16218+++ b/arch/x86/include/asm/barrier.h
16219@@ -107,7 +107,7 @@
16220 do { \
16221 compiletime_assert_atomic_type(*p); \
16222 smp_mb(); \
16223- ACCESS_ONCE(*p) = (v); \
16224+ ACCESS_ONCE_RW(*p) = (v); \
16225 } while (0)
16226
16227 #define smp_load_acquire(p) \
16228@@ -124,7 +124,7 @@ do { \
16229 do { \
16230 compiletime_assert_atomic_type(*p); \
16231 barrier(); \
16232- ACCESS_ONCE(*p) = (v); \
16233+ ACCESS_ONCE_RW(*p) = (v); \
16234 } while (0)
16235
16236 #define smp_load_acquire(p) \
16237diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
16238index afcd35d..141b32d 100644
16239--- a/arch/x86/include/asm/bitops.h
16240+++ b/arch/x86/include/asm/bitops.h
16241@@ -50,7 +50,7 @@
16242 * a mask operation on a byte.
16243 */
16244 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
16245-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
16246+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
16247 #define CONST_MASK(nr) (1 << ((nr) & 7))
16248
16249 /**
16250@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
16251 */
16252 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
16253 {
16254- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16255+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16256 }
16257
16258 /**
16259@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
16260 */
16261 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
16262 {
16263- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16264+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16265 }
16266
16267 /**
16268@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
16269 */
16270 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
16271 {
16272- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16273+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16274 }
16275
16276 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
16277@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
16278 *
16279 * Undefined if no bit exists, so code should check against 0 first.
16280 */
16281-static inline unsigned long __ffs(unsigned long word)
16282+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
16283 {
16284 asm("rep; bsf %1,%0"
16285 : "=r" (word)
16286@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
16287 *
16288 * Undefined if no zero exists, so code should check against ~0UL first.
16289 */
16290-static inline unsigned long ffz(unsigned long word)
16291+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
16292 {
16293 asm("rep; bsf %1,%0"
16294 : "=r" (word)
16295@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
16296 *
16297 * Undefined if no set bit exists, so code should check against 0 first.
16298 */
16299-static inline unsigned long __fls(unsigned long word)
16300+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
16301 {
16302 asm("bsr %1,%0"
16303 : "=r" (word)
16304@@ -434,7 +434,7 @@ static inline int ffs(int x)
16305 * set bit if value is nonzero. The last (most significant) bit is
16306 * at position 32.
16307 */
16308-static inline int fls(int x)
16309+static inline int __intentional_overflow(-1) fls(int x)
16310 {
16311 int r;
16312
16313@@ -476,7 +476,7 @@ static inline int fls(int x)
16314 * at position 64.
16315 */
16316 #ifdef CONFIG_X86_64
16317-static __always_inline int fls64(__u64 x)
16318+static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
16319 {
16320 int bitpos = -1;
16321 /*
16322diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
16323index 4fa687a..60f2d39 100644
16324--- a/arch/x86/include/asm/boot.h
16325+++ b/arch/x86/include/asm/boot.h
16326@@ -6,10 +6,15 @@
16327 #include <uapi/asm/boot.h>
16328
16329 /* Physical address where kernel should be loaded. */
16330-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16331+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16332 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16333 & ~(CONFIG_PHYSICAL_ALIGN - 1))
16334
16335+#ifndef __ASSEMBLY__
16336+extern unsigned char __LOAD_PHYSICAL_ADDR[];
16337+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
16338+#endif
16339+
16340 /* Minimum kernel alignment, as a power of two */
16341 #ifdef CONFIG_X86_64
16342 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16343diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
16344index 48f99f1..d78ebf9 100644
16345--- a/arch/x86/include/asm/cache.h
16346+++ b/arch/x86/include/asm/cache.h
16347@@ -5,12 +5,13 @@
16348
16349 /* L1 cache line size */
16350 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
16351-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16352+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
16353
16354 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
16355+#define __read_only __attribute__((__section__(".data..read_only")))
16356
16357 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
16358-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
16359+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
16360
16361 #ifdef CONFIG_X86_VSMP
16362 #ifdef CONFIG_SMP
16363diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
16364index 9863ee3..4a1f8e1 100644
16365--- a/arch/x86/include/asm/cacheflush.h
16366+++ b/arch/x86/include/asm/cacheflush.h
16367@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
16368 unsigned long pg_flags = pg->flags & _PGMT_MASK;
16369
16370 if (pg_flags == _PGMT_DEFAULT)
16371- return -1;
16372+ return ~0UL;
16373 else if (pg_flags == _PGMT_WC)
16374 return _PAGE_CACHE_WC;
16375 else if (pg_flags == _PGMT_UC_MINUS)
16376diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
16377index cb4c73b..c473c29 100644
16378--- a/arch/x86/include/asm/calling.h
16379+++ b/arch/x86/include/asm/calling.h
16380@@ -82,103 +82,113 @@ For 32-bit we have the following conventions - kernel is built with
16381 #define RSP 152
16382 #define SS 160
16383
16384-#define ARGOFFSET R11
16385-#define SWFRAME ORIG_RAX
16386+#define ARGOFFSET R15
16387
16388 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1
16389- subq $9*8+\addskip, %rsp
16390- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
16391- movq_cfi rdi, 8*8
16392- movq_cfi rsi, 7*8
16393- movq_cfi rdx, 6*8
16394+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
16395+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
16396+ movq_cfi rdi, RDI
16397+ movq_cfi rsi, RSI
16398+ movq_cfi rdx, RDX
16399
16400 .if \save_rcx
16401- movq_cfi rcx, 5*8
16402+ movq_cfi rcx, RCX
16403 .endif
16404
16405- movq_cfi rax, 4*8
16406+ movq_cfi rax, RAX
16407
16408 .if \save_r891011
16409- movq_cfi r8, 3*8
16410- movq_cfi r9, 2*8
16411- movq_cfi r10, 1*8
16412- movq_cfi r11, 0*8
16413+ movq_cfi r8, R8
16414+ movq_cfi r9, R9
16415+ movq_cfi r10, R10
16416+ movq_cfi r11, R11
16417 .endif
16418
16419+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16420+ movq_cfi r12, R12
16421+#endif
16422+
16423 .endm
16424
16425-#define ARG_SKIP (9*8)
16426+#define ARG_SKIP ORIG_RAX
16427
16428 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
16429 rstor_r8910=1, rstor_rdx=1
16430+
16431+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16432+ movq_cfi_restore R12, r12
16433+#endif
16434+
16435 .if \rstor_r11
16436- movq_cfi_restore 0*8, r11
16437+ movq_cfi_restore R11, r11
16438 .endif
16439
16440 .if \rstor_r8910
16441- movq_cfi_restore 1*8, r10
16442- movq_cfi_restore 2*8, r9
16443- movq_cfi_restore 3*8, r8
16444+ movq_cfi_restore R10, r10
16445+ movq_cfi_restore R9, r9
16446+ movq_cfi_restore R8, r8
16447 .endif
16448
16449 .if \rstor_rax
16450- movq_cfi_restore 4*8, rax
16451+ movq_cfi_restore RAX, rax
16452 .endif
16453
16454 .if \rstor_rcx
16455- movq_cfi_restore 5*8, rcx
16456+ movq_cfi_restore RCX, rcx
16457 .endif
16458
16459 .if \rstor_rdx
16460- movq_cfi_restore 6*8, rdx
16461+ movq_cfi_restore RDX, rdx
16462 .endif
16463
16464- movq_cfi_restore 7*8, rsi
16465- movq_cfi_restore 8*8, rdi
16466+ movq_cfi_restore RSI, rsi
16467+ movq_cfi_restore RDI, rdi
16468
16469- .if ARG_SKIP+\addskip > 0
16470- addq $ARG_SKIP+\addskip, %rsp
16471- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
16472+ .if ORIG_RAX+\addskip > 0
16473+ addq $ORIG_RAX+\addskip, %rsp
16474+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
16475 .endif
16476 .endm
16477
16478- .macro LOAD_ARGS offset, skiprax=0
16479- movq \offset(%rsp), %r11
16480- movq \offset+8(%rsp), %r10
16481- movq \offset+16(%rsp), %r9
16482- movq \offset+24(%rsp), %r8
16483- movq \offset+40(%rsp), %rcx
16484- movq \offset+48(%rsp), %rdx
16485- movq \offset+56(%rsp), %rsi
16486- movq \offset+64(%rsp), %rdi
16487+ .macro LOAD_ARGS skiprax=0
16488+ movq R11(%rsp), %r11
16489+ movq R10(%rsp), %r10
16490+ movq R9(%rsp), %r9
16491+ movq R8(%rsp), %r8
16492+ movq RCX(%rsp), %rcx
16493+ movq RDX(%rsp), %rdx
16494+ movq RSI(%rsp), %rsi
16495+ movq RDI(%rsp), %rdi
16496 .if \skiprax
16497 .else
16498- movq \offset+72(%rsp), %rax
16499+ movq RAX(%rsp), %rax
16500 .endif
16501 .endm
16502
16503-#define REST_SKIP (6*8)
16504-
16505 .macro SAVE_REST
16506- subq $REST_SKIP, %rsp
16507- CFI_ADJUST_CFA_OFFSET REST_SKIP
16508- movq_cfi rbx, 5*8
16509- movq_cfi rbp, 4*8
16510- movq_cfi r12, 3*8
16511- movq_cfi r13, 2*8
16512- movq_cfi r14, 1*8
16513- movq_cfi r15, 0*8
16514+ movq_cfi rbx, RBX
16515+ movq_cfi rbp, RBP
16516+
16517+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16518+ movq_cfi r12, R12
16519+#endif
16520+
16521+ movq_cfi r13, R13
16522+ movq_cfi r14, R14
16523+ movq_cfi r15, R15
16524 .endm
16525
16526 .macro RESTORE_REST
16527- movq_cfi_restore 0*8, r15
16528- movq_cfi_restore 1*8, r14
16529- movq_cfi_restore 2*8, r13
16530- movq_cfi_restore 3*8, r12
16531- movq_cfi_restore 4*8, rbp
16532- movq_cfi_restore 5*8, rbx
16533- addq $REST_SKIP, %rsp
16534- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
16535+ movq_cfi_restore R15, r15
16536+ movq_cfi_restore R14, r14
16537+ movq_cfi_restore R13, r13
16538+
16539+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16540+ movq_cfi_restore R12, r12
16541+#endif
16542+
16543+ movq_cfi_restore RBP, rbp
16544+ movq_cfi_restore RBX, rbx
16545 .endm
16546
16547 .macro SAVE_ALL
16548diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
16549index f50de69..2b0a458 100644
16550--- a/arch/x86/include/asm/checksum_32.h
16551+++ b/arch/x86/include/asm/checksum_32.h
16552@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
16553 int len, __wsum sum,
16554 int *src_err_ptr, int *dst_err_ptr);
16555
16556+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
16557+ int len, __wsum sum,
16558+ int *src_err_ptr, int *dst_err_ptr);
16559+
16560+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
16561+ int len, __wsum sum,
16562+ int *src_err_ptr, int *dst_err_ptr);
16563+
16564 /*
16565 * Note: when you get a NULL pointer exception here this means someone
16566 * passed in an incorrect kernel address to one of these functions.
16567@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
16568
16569 might_sleep();
16570 stac();
16571- ret = csum_partial_copy_generic((__force void *)src, dst,
16572+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
16573 len, sum, err_ptr, NULL);
16574 clac();
16575
16576@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
16577 might_sleep();
16578 if (access_ok(VERIFY_WRITE, dst, len)) {
16579 stac();
16580- ret = csum_partial_copy_generic(src, (__force void *)dst,
16581+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
16582 len, sum, NULL, err_ptr);
16583 clac();
16584 return ret;
16585diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
16586index d47786a..2d8883e 100644
16587--- a/arch/x86/include/asm/cmpxchg.h
16588+++ b/arch/x86/include/asm/cmpxchg.h
16589@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
16590 __compiletime_error("Bad argument size for cmpxchg");
16591 extern void __xadd_wrong_size(void)
16592 __compiletime_error("Bad argument size for xadd");
16593+extern void __xadd_check_overflow_wrong_size(void)
16594+ __compiletime_error("Bad argument size for xadd_check_overflow");
16595 extern void __add_wrong_size(void)
16596 __compiletime_error("Bad argument size for add");
16597+extern void __add_check_overflow_wrong_size(void)
16598+ __compiletime_error("Bad argument size for add_check_overflow");
16599
16600 /*
16601 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
16602@@ -67,6 +71,38 @@ extern void __add_wrong_size(void)
16603 __ret; \
16604 })
16605
16606+#ifdef CONFIG_PAX_REFCOUNT
16607+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
16608+ ({ \
16609+ __typeof__ (*(ptr)) __ret = (arg); \
16610+ switch (sizeof(*(ptr))) { \
16611+ case __X86_CASE_L: \
16612+ asm volatile (lock #op "l %0, %1\n" \
16613+ "jno 0f\n" \
16614+ "mov %0,%1\n" \
16615+ "int $4\n0:\n" \
16616+ _ASM_EXTABLE(0b, 0b) \
16617+ : "+r" (__ret), "+m" (*(ptr)) \
16618+ : : "memory", "cc"); \
16619+ break; \
16620+ case __X86_CASE_Q: \
16621+ asm volatile (lock #op "q %q0, %1\n" \
16622+ "jno 0f\n" \
16623+ "mov %0,%1\n" \
16624+ "int $4\n0:\n" \
16625+ _ASM_EXTABLE(0b, 0b) \
16626+ : "+r" (__ret), "+m" (*(ptr)) \
16627+ : : "memory", "cc"); \
16628+ break; \
16629+ default: \
16630+ __ ## op ## _check_overflow_wrong_size(); \
16631+ } \
16632+ __ret; \
16633+ })
16634+#else
16635+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
16636+#endif
16637+
16638 /*
16639 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16640 * Since this is generally used to protect other memory information, we
16641@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
16642 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
16643 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
16644
16645+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
16646+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
16647+
16648 #define __add(ptr, inc, lock) \
16649 ({ \
16650 __typeof__ (*(ptr)) __ret = (inc); \
16651diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
16652index 59c6c40..5e0b22c 100644
16653--- a/arch/x86/include/asm/compat.h
16654+++ b/arch/x86/include/asm/compat.h
16655@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
16656 typedef u32 compat_uint_t;
16657 typedef u32 compat_ulong_t;
16658 typedef u64 __attribute__((aligned(4))) compat_u64;
16659-typedef u32 compat_uptr_t;
16660+typedef u32 __user compat_uptr_t;
16661
16662 struct compat_timespec {
16663 compat_time_t tv_sec;
16664diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
16665index e265ff9..72c253b 100644
16666--- a/arch/x86/include/asm/cpufeature.h
16667+++ b/arch/x86/include/asm/cpufeature.h
16668@@ -203,7 +203,7 @@
16669 #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
16670 #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
16671 #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
16672-
16673+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
16674
16675 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
16676 #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
16677@@ -211,7 +211,7 @@
16678 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
16679 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
16680 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
16681-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
16682+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
16683 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
16684 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
16685 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
16686@@ -359,6 +359,7 @@ extern const char * const x86_power_flags[32];
16687 #undef cpu_has_centaur_mcr
16688 #define cpu_has_centaur_mcr 0
16689
16690+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
16691 #endif /* CONFIG_X86_64 */
16692
16693 #if __GNUC__ >= 4
16694@@ -411,7 +412,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16695
16696 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
16697 t_warn:
16698- warn_pre_alternatives();
16699+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
16700+ warn_pre_alternatives();
16701 return false;
16702 #endif
16703
16704@@ -431,7 +433,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16705 ".section .discard,\"aw\",@progbits\n"
16706 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16707 ".previous\n"
16708- ".section .altinstr_replacement,\"ax\"\n"
16709+ ".section .altinstr_replacement,\"a\"\n"
16710 "3: movb $1,%0\n"
16711 "4:\n"
16712 ".previous\n"
16713@@ -468,7 +470,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16714 " .byte 2b - 1b\n" /* src len */
16715 " .byte 4f - 3f\n" /* repl len */
16716 ".previous\n"
16717- ".section .altinstr_replacement,\"ax\"\n"
16718+ ".section .altinstr_replacement,\"a\"\n"
16719 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
16720 "4:\n"
16721 ".previous\n"
16722@@ -501,7 +503,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16723 ".section .discard,\"aw\",@progbits\n"
16724 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16725 ".previous\n"
16726- ".section .altinstr_replacement,\"ax\"\n"
16727+ ".section .altinstr_replacement,\"a\"\n"
16728 "3: movb $0,%0\n"
16729 "4:\n"
16730 ".previous\n"
16731@@ -515,7 +517,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16732 ".section .discard,\"aw\",@progbits\n"
16733 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
16734 ".previous\n"
16735- ".section .altinstr_replacement,\"ax\"\n"
16736+ ".section .altinstr_replacement,\"a\"\n"
16737 "5: movb $1,%0\n"
16738 "6:\n"
16739 ".previous\n"
16740diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
16741index 50d033a..37deb26 100644
16742--- a/arch/x86/include/asm/desc.h
16743+++ b/arch/x86/include/asm/desc.h
16744@@ -4,6 +4,7 @@
16745 #include <asm/desc_defs.h>
16746 #include <asm/ldt.h>
16747 #include <asm/mmu.h>
16748+#include <asm/pgtable.h>
16749
16750 #include <linux/smp.h>
16751 #include <linux/percpu.h>
16752@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16753
16754 desc->type = (info->read_exec_only ^ 1) << 1;
16755 desc->type |= info->contents << 2;
16756+ desc->type |= info->seg_not_present ^ 1;
16757
16758 desc->s = 1;
16759 desc->dpl = 0x3;
16760@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16761 }
16762
16763 extern struct desc_ptr idt_descr;
16764-extern gate_desc idt_table[];
16765-extern struct desc_ptr debug_idt_descr;
16766-extern gate_desc debug_idt_table[];
16767-
16768-struct gdt_page {
16769- struct desc_struct gdt[GDT_ENTRIES];
16770-} __attribute__((aligned(PAGE_SIZE)));
16771-
16772-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
16773+extern gate_desc idt_table[IDT_ENTRIES];
16774+extern const struct desc_ptr debug_idt_descr;
16775+extern gate_desc debug_idt_table[IDT_ENTRIES];
16776
16777+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
16778 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
16779 {
16780- return per_cpu(gdt_page, cpu).gdt;
16781+ return cpu_gdt_table[cpu];
16782 }
16783
16784 #ifdef CONFIG_X86_64
16785@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
16786 unsigned long base, unsigned dpl, unsigned flags,
16787 unsigned short seg)
16788 {
16789- gate->a = (seg << 16) | (base & 0xffff);
16790- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
16791+ gate->gate.offset_low = base;
16792+ gate->gate.seg = seg;
16793+ gate->gate.reserved = 0;
16794+ gate->gate.type = type;
16795+ gate->gate.s = 0;
16796+ gate->gate.dpl = dpl;
16797+ gate->gate.p = 1;
16798+ gate->gate.offset_high = base >> 16;
16799 }
16800
16801 #endif
16802@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
16803
16804 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
16805 {
16806+ pax_open_kernel();
16807 memcpy(&idt[entry], gate, sizeof(*gate));
16808+ pax_close_kernel();
16809 }
16810
16811 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
16812 {
16813+ pax_open_kernel();
16814 memcpy(&ldt[entry], desc, 8);
16815+ pax_close_kernel();
16816 }
16817
16818 static inline void
16819@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
16820 default: size = sizeof(*gdt); break;
16821 }
16822
16823+ pax_open_kernel();
16824 memcpy(&gdt[entry], desc, size);
16825+ pax_close_kernel();
16826 }
16827
16828 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
16829@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
16830
16831 static inline void native_load_tr_desc(void)
16832 {
16833+ pax_open_kernel();
16834 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
16835+ pax_close_kernel();
16836 }
16837
16838 static inline void native_load_gdt(const struct desc_ptr *dtr)
16839@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
16840 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16841 unsigned int i;
16842
16843+ pax_open_kernel();
16844 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
16845 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
16846+ pax_close_kernel();
16847 }
16848
16849 #define _LDT_empty(info) \
16850@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
16851 preempt_enable();
16852 }
16853
16854-static inline unsigned long get_desc_base(const struct desc_struct *desc)
16855+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
16856 {
16857 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
16858 }
16859@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
16860 }
16861
16862 #ifdef CONFIG_X86_64
16863-static inline void set_nmi_gate(int gate, void *addr)
16864+static inline void set_nmi_gate(int gate, const void *addr)
16865 {
16866 gate_desc s;
16867
16868@@ -321,14 +334,14 @@ static inline void set_nmi_gate(int gate, void *addr)
16869 #endif
16870
16871 #ifdef CONFIG_TRACING
16872-extern struct desc_ptr trace_idt_descr;
16873-extern gate_desc trace_idt_table[];
16874+extern const struct desc_ptr trace_idt_descr;
16875+extern gate_desc trace_idt_table[IDT_ENTRIES];
16876 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16877 {
16878 write_idt_entry(trace_idt_table, entry, gate);
16879 }
16880
16881-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
16882+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
16883 unsigned dpl, unsigned ist, unsigned seg)
16884 {
16885 gate_desc s;
16886@@ -348,7 +361,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16887 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
16888 #endif
16889
16890-static inline void _set_gate(int gate, unsigned type, void *addr,
16891+static inline void _set_gate(int gate, unsigned type, const void *addr,
16892 unsigned dpl, unsigned ist, unsigned seg)
16893 {
16894 gate_desc s;
16895@@ -371,9 +384,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
16896 #define set_intr_gate(n, addr) \
16897 do { \
16898 BUG_ON((unsigned)n > 0xFF); \
16899- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
16900+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
16901 __KERNEL_CS); \
16902- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
16903+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
16904 0, 0, __KERNEL_CS); \
16905 } while (0)
16906
16907@@ -401,19 +414,19 @@ static inline void alloc_system_vector(int vector)
16908 /*
16909 * This routine sets up an interrupt gate at directory privilege level 3.
16910 */
16911-static inline void set_system_intr_gate(unsigned int n, void *addr)
16912+static inline void set_system_intr_gate(unsigned int n, const void *addr)
16913 {
16914 BUG_ON((unsigned)n > 0xFF);
16915 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
16916 }
16917
16918-static inline void set_system_trap_gate(unsigned int n, void *addr)
16919+static inline void set_system_trap_gate(unsigned int n, const void *addr)
16920 {
16921 BUG_ON((unsigned)n > 0xFF);
16922 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
16923 }
16924
16925-static inline void set_trap_gate(unsigned int n, void *addr)
16926+static inline void set_trap_gate(unsigned int n, const void *addr)
16927 {
16928 BUG_ON((unsigned)n > 0xFF);
16929 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
16930@@ -422,16 +435,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
16931 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
16932 {
16933 BUG_ON((unsigned)n > 0xFF);
16934- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
16935+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
16936 }
16937
16938-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
16939+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
16940 {
16941 BUG_ON((unsigned)n > 0xFF);
16942 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
16943 }
16944
16945-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
16946+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
16947 {
16948 BUG_ON((unsigned)n > 0xFF);
16949 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
16950@@ -503,4 +516,17 @@ static inline void load_current_idt(void)
16951 else
16952 load_idt((const struct desc_ptr *)&idt_descr);
16953 }
16954+
16955+#ifdef CONFIG_X86_32
16956+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
16957+{
16958+ struct desc_struct d;
16959+
16960+ if (likely(limit))
16961+ limit = (limit - 1UL) >> PAGE_SHIFT;
16962+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
16963+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
16964+}
16965+#endif
16966+
16967 #endif /* _ASM_X86_DESC_H */
16968diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
16969index 278441f..b95a174 100644
16970--- a/arch/x86/include/asm/desc_defs.h
16971+++ b/arch/x86/include/asm/desc_defs.h
16972@@ -31,6 +31,12 @@ struct desc_struct {
16973 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
16974 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
16975 };
16976+ struct {
16977+ u16 offset_low;
16978+ u16 seg;
16979+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
16980+ unsigned offset_high: 16;
16981+ } gate;
16982 };
16983 } __attribute__((packed));
16984
16985diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
16986index ced283a..ffe04cc 100644
16987--- a/arch/x86/include/asm/div64.h
16988+++ b/arch/x86/include/asm/div64.h
16989@@ -39,7 +39,7 @@
16990 __mod; \
16991 })
16992
16993-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16994+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16995 {
16996 union {
16997 u64 v64;
16998diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
16999index 1a055c8..a1701de 100644
17000--- a/arch/x86/include/asm/elf.h
17001+++ b/arch/x86/include/asm/elf.h
17002@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
17003
17004 #include <asm/vdso.h>
17005
17006-#ifdef CONFIG_X86_64
17007-extern unsigned int vdso64_enabled;
17008-#endif
17009 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
17010 extern unsigned int vdso32_enabled;
17011 #endif
17012@@ -248,7 +245,25 @@ extern int force_personality32;
17013 the loader. We need to make sure that it is out of the way of the program
17014 that it will "exec", and that there is sufficient room for the brk. */
17015
17016+#ifdef CONFIG_PAX_SEGMEXEC
17017+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
17018+#else
17019 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
17020+#endif
17021+
17022+#ifdef CONFIG_PAX_ASLR
17023+#ifdef CONFIG_X86_32
17024+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
17025+
17026+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
17027+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
17028+#else
17029+#define PAX_ELF_ET_DYN_BASE 0x400000UL
17030+
17031+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
17032+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
17033+#endif
17034+#endif
17035
17036 /* This yields a mask that user programs can use to figure out what
17037 instruction set this CPU supports. This could be done in user space,
17038@@ -297,17 +312,13 @@ do { \
17039
17040 #define ARCH_DLINFO \
17041 do { \
17042- if (vdso64_enabled) \
17043- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
17044- (unsigned long __force)current->mm->context.vdso); \
17045+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
17046 } while (0)
17047
17048 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
17049 #define ARCH_DLINFO_X32 \
17050 do { \
17051- if (vdso64_enabled) \
17052- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
17053- (unsigned long __force)current->mm->context.vdso); \
17054+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
17055 } while (0)
17056
17057 #define AT_SYSINFO 32
17058@@ -322,10 +333,10 @@ else \
17059
17060 #endif /* !CONFIG_X86_32 */
17061
17062-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
17063+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
17064
17065 #define VDSO_ENTRY \
17066- ((unsigned long)current->mm->context.vdso + \
17067+ (current->mm->context.vdso + \
17068 selected_vdso32->sym___kernel_vsyscall)
17069
17070 struct linux_binprm;
17071@@ -337,9 +348,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
17072 int uses_interp);
17073 #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
17074
17075-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
17076-#define arch_randomize_brk arch_randomize_brk
17077-
17078 /*
17079 * True on X86_32 or when emulating IA32 on X86_64
17080 */
17081diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
17082index 77a99ac..39ff7f5 100644
17083--- a/arch/x86/include/asm/emergency-restart.h
17084+++ b/arch/x86/include/asm/emergency-restart.h
17085@@ -1,6 +1,6 @@
17086 #ifndef _ASM_X86_EMERGENCY_RESTART_H
17087 #define _ASM_X86_EMERGENCY_RESTART_H
17088
17089-extern void machine_emergency_restart(void);
17090+extern void machine_emergency_restart(void) __noreturn;
17091
17092 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
17093diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
17094index 1c7eefe..d0e4702 100644
17095--- a/arch/x86/include/asm/floppy.h
17096+++ b/arch/x86/include/asm/floppy.h
17097@@ -229,18 +229,18 @@ static struct fd_routine_l {
17098 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
17099 } fd_routine[] = {
17100 {
17101- request_dma,
17102- free_dma,
17103- get_dma_residue,
17104- dma_mem_alloc,
17105- hard_dma_setup
17106+ ._request_dma = request_dma,
17107+ ._free_dma = free_dma,
17108+ ._get_dma_residue = get_dma_residue,
17109+ ._dma_mem_alloc = dma_mem_alloc,
17110+ ._dma_setup = hard_dma_setup
17111 },
17112 {
17113- vdma_request_dma,
17114- vdma_nop,
17115- vdma_get_dma_residue,
17116- vdma_mem_alloc,
17117- vdma_dma_setup
17118+ ._request_dma = vdma_request_dma,
17119+ ._free_dma = vdma_nop,
17120+ ._get_dma_residue = vdma_get_dma_residue,
17121+ ._dma_mem_alloc = vdma_mem_alloc,
17122+ ._dma_setup = vdma_dma_setup
17123 }
17124 };
17125
17126diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
17127index 115e368..76ecf6c 100644
17128--- a/arch/x86/include/asm/fpu-internal.h
17129+++ b/arch/x86/include/asm/fpu-internal.h
17130@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
17131 #define user_insn(insn, output, input...) \
17132 ({ \
17133 int err; \
17134+ pax_open_userland(); \
17135 asm volatile(ASM_STAC "\n" \
17136- "1:" #insn "\n\t" \
17137+ "1:" \
17138+ __copyuser_seg \
17139+ #insn "\n\t" \
17140 "2: " ASM_CLAC "\n" \
17141 ".section .fixup,\"ax\"\n" \
17142 "3: movl $-1,%[err]\n" \
17143@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
17144 _ASM_EXTABLE(1b, 3b) \
17145 : [err] "=r" (err), output \
17146 : "0"(0), input); \
17147+ pax_close_userland(); \
17148 err; \
17149 })
17150
17151@@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
17152 "fnclex\n\t"
17153 "emms\n\t"
17154 "fildl %P[addr]" /* set F?P to defined value */
17155- : : [addr] "m" (tsk->thread.fpu.has_fpu));
17156+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
17157 }
17158
17159 return fpu_restore_checking(&tsk->thread.fpu);
17160diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
17161index b4c1f54..e290c08 100644
17162--- a/arch/x86/include/asm/futex.h
17163+++ b/arch/x86/include/asm/futex.h
17164@@ -12,6 +12,7 @@
17165 #include <asm/smap.h>
17166
17167 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
17168+ typecheck(u32 __user *, uaddr); \
17169 asm volatile("\t" ASM_STAC "\n" \
17170 "1:\t" insn "\n" \
17171 "2:\t" ASM_CLAC "\n" \
17172@@ -20,15 +21,16 @@
17173 "\tjmp\t2b\n" \
17174 "\t.previous\n" \
17175 _ASM_EXTABLE(1b, 3b) \
17176- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
17177+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
17178 : "i" (-EFAULT), "0" (oparg), "1" (0))
17179
17180 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
17181+ typecheck(u32 __user *, uaddr); \
17182 asm volatile("\t" ASM_STAC "\n" \
17183 "1:\tmovl %2, %0\n" \
17184 "\tmovl\t%0, %3\n" \
17185 "\t" insn "\n" \
17186- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
17187+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
17188 "\tjnz\t1b\n" \
17189 "3:\t" ASM_CLAC "\n" \
17190 "\t.section .fixup,\"ax\"\n" \
17191@@ -38,7 +40,7 @@
17192 _ASM_EXTABLE(1b, 4b) \
17193 _ASM_EXTABLE(2b, 4b) \
17194 : "=&a" (oldval), "=&r" (ret), \
17195- "+m" (*uaddr), "=&r" (tem) \
17196+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
17197 : "r" (oparg), "i" (-EFAULT), "1" (0))
17198
17199 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17200@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17201
17202 pagefault_disable();
17203
17204+ pax_open_userland();
17205 switch (op) {
17206 case FUTEX_OP_SET:
17207- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
17208+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
17209 break;
17210 case FUTEX_OP_ADD:
17211- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
17212+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
17213 uaddr, oparg);
17214 break;
17215 case FUTEX_OP_OR:
17216@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17217 default:
17218 ret = -ENOSYS;
17219 }
17220+ pax_close_userland();
17221
17222 pagefault_enable();
17223
17224diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
17225index 4615906..788c817 100644
17226--- a/arch/x86/include/asm/hw_irq.h
17227+++ b/arch/x86/include/asm/hw_irq.h
17228@@ -164,8 +164,8 @@ extern void setup_ioapic_dest(void);
17229 extern void enable_IO_APIC(void);
17230
17231 /* Statistics */
17232-extern atomic_t irq_err_count;
17233-extern atomic_t irq_mis_count;
17234+extern atomic_unchecked_t irq_err_count;
17235+extern atomic_unchecked_t irq_mis_count;
17236
17237 /* EISA */
17238 extern void eisa_set_level_irq(unsigned int irq);
17239diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
17240index a203659..9889f1c 100644
17241--- a/arch/x86/include/asm/i8259.h
17242+++ b/arch/x86/include/asm/i8259.h
17243@@ -62,7 +62,7 @@ struct legacy_pic {
17244 void (*init)(int auto_eoi);
17245 int (*irq_pending)(unsigned int irq);
17246 void (*make_irq)(unsigned int irq);
17247-};
17248+} __do_const;
17249
17250 extern struct legacy_pic *legacy_pic;
17251 extern struct legacy_pic null_legacy_pic;
17252diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
17253index b8237d8..3e8864e 100644
17254--- a/arch/x86/include/asm/io.h
17255+++ b/arch/x86/include/asm/io.h
17256@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
17257 "m" (*(volatile type __force *)addr) barrier); }
17258
17259 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
17260-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
17261-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
17262+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
17263+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
17264
17265 build_mmio_read(__readb, "b", unsigned char, "=q", )
17266-build_mmio_read(__readw, "w", unsigned short, "=r", )
17267-build_mmio_read(__readl, "l", unsigned int, "=r", )
17268+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
17269+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
17270
17271 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
17272 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
17273@@ -109,7 +109,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
17274 * this function
17275 */
17276
17277-static inline phys_addr_t virt_to_phys(volatile void *address)
17278+static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
17279 {
17280 return __pa(address);
17281 }
17282@@ -185,7 +185,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
17283 return ioremap_nocache(offset, size);
17284 }
17285
17286-extern void iounmap(volatile void __iomem *addr);
17287+extern void iounmap(const volatile void __iomem *addr);
17288
17289 extern void set_iounmap_nonlazy(void);
17290
17291@@ -195,6 +195,17 @@ extern void set_iounmap_nonlazy(void);
17292
17293 #include <linux/vmalloc.h>
17294
17295+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
17296+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
17297+{
17298+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17299+}
17300+
17301+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
17302+{
17303+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17304+}
17305+
17306 /*
17307 * Convert a virtual cached pointer to an uncached pointer
17308 */
17309diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
17310index 0a8b519..80e7d5b 100644
17311--- a/arch/x86/include/asm/irqflags.h
17312+++ b/arch/x86/include/asm/irqflags.h
17313@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
17314 sti; \
17315 sysexit
17316
17317+#define GET_CR0_INTO_RDI mov %cr0, %rdi
17318+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
17319+#define GET_CR3_INTO_RDI mov %cr3, %rdi
17320+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
17321+
17322 #else
17323 #define INTERRUPT_RETURN iret
17324 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
17325diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
17326index 53cdfb2..d1369e6 100644
17327--- a/arch/x86/include/asm/kprobes.h
17328+++ b/arch/x86/include/asm/kprobes.h
17329@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
17330 #define RELATIVEJUMP_SIZE 5
17331 #define RELATIVECALL_OPCODE 0xe8
17332 #define RELATIVE_ADDR_SIZE 4
17333-#define MAX_STACK_SIZE 64
17334-#define MIN_STACK_SIZE(ADDR) \
17335- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
17336- THREAD_SIZE - (unsigned long)(ADDR))) \
17337- ? (MAX_STACK_SIZE) \
17338- : (((unsigned long)current_thread_info()) + \
17339- THREAD_SIZE - (unsigned long)(ADDR)))
17340+#define MAX_STACK_SIZE 64UL
17341+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
17342
17343 #define flush_insn_slot(p) do { } while (0)
17344
17345diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
17346index 4ad6560..75c7bdd 100644
17347--- a/arch/x86/include/asm/local.h
17348+++ b/arch/x86/include/asm/local.h
17349@@ -10,33 +10,97 @@ typedef struct {
17350 atomic_long_t a;
17351 } local_t;
17352
17353+typedef struct {
17354+ atomic_long_unchecked_t a;
17355+} local_unchecked_t;
17356+
17357 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
17358
17359 #define local_read(l) atomic_long_read(&(l)->a)
17360+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
17361 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17362+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
17363
17364 static inline void local_inc(local_t *l)
17365 {
17366- asm volatile(_ASM_INC "%0"
17367+ asm volatile(_ASM_INC "%0\n"
17368+
17369+#ifdef CONFIG_PAX_REFCOUNT
17370+ "jno 0f\n"
17371+ _ASM_DEC "%0\n"
17372+ "int $4\n0:\n"
17373+ _ASM_EXTABLE(0b, 0b)
17374+#endif
17375+
17376+ : "+m" (l->a.counter));
17377+}
17378+
17379+static inline void local_inc_unchecked(local_unchecked_t *l)
17380+{
17381+ asm volatile(_ASM_INC "%0\n"
17382 : "+m" (l->a.counter));
17383 }
17384
17385 static inline void local_dec(local_t *l)
17386 {
17387- asm volatile(_ASM_DEC "%0"
17388+ asm volatile(_ASM_DEC "%0\n"
17389+
17390+#ifdef CONFIG_PAX_REFCOUNT
17391+ "jno 0f\n"
17392+ _ASM_INC "%0\n"
17393+ "int $4\n0:\n"
17394+ _ASM_EXTABLE(0b, 0b)
17395+#endif
17396+
17397+ : "+m" (l->a.counter));
17398+}
17399+
17400+static inline void local_dec_unchecked(local_unchecked_t *l)
17401+{
17402+ asm volatile(_ASM_DEC "%0\n"
17403 : "+m" (l->a.counter));
17404 }
17405
17406 static inline void local_add(long i, local_t *l)
17407 {
17408- asm volatile(_ASM_ADD "%1,%0"
17409+ asm volatile(_ASM_ADD "%1,%0\n"
17410+
17411+#ifdef CONFIG_PAX_REFCOUNT
17412+ "jno 0f\n"
17413+ _ASM_SUB "%1,%0\n"
17414+ "int $4\n0:\n"
17415+ _ASM_EXTABLE(0b, 0b)
17416+#endif
17417+
17418+ : "+m" (l->a.counter)
17419+ : "ir" (i));
17420+}
17421+
17422+static inline void local_add_unchecked(long i, local_unchecked_t *l)
17423+{
17424+ asm volatile(_ASM_ADD "%1,%0\n"
17425 : "+m" (l->a.counter)
17426 : "ir" (i));
17427 }
17428
17429 static inline void local_sub(long i, local_t *l)
17430 {
17431- asm volatile(_ASM_SUB "%1,%0"
17432+ asm volatile(_ASM_SUB "%1,%0\n"
17433+
17434+#ifdef CONFIG_PAX_REFCOUNT
17435+ "jno 0f\n"
17436+ _ASM_ADD "%1,%0\n"
17437+ "int $4\n0:\n"
17438+ _ASM_EXTABLE(0b, 0b)
17439+#endif
17440+
17441+ : "+m" (l->a.counter)
17442+ : "ir" (i));
17443+}
17444+
17445+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
17446+{
17447+ asm volatile(_ASM_SUB "%1,%0\n"
17448 : "+m" (l->a.counter)
17449 : "ir" (i));
17450 }
17451@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
17452 */
17453 static inline int local_sub_and_test(long i, local_t *l)
17454 {
17455- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
17456+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
17457 }
17458
17459 /**
17460@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
17461 */
17462 static inline int local_dec_and_test(local_t *l)
17463 {
17464- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
17465+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
17466 }
17467
17468 /**
17469@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
17470 */
17471 static inline int local_inc_and_test(local_t *l)
17472 {
17473- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
17474+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
17475 }
17476
17477 /**
17478@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
17479 */
17480 static inline int local_add_negative(long i, local_t *l)
17481 {
17482- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
17483+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
17484 }
17485
17486 /**
17487@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
17488 static inline long local_add_return(long i, local_t *l)
17489 {
17490 long __i = i;
17491+ asm volatile(_ASM_XADD "%0, %1\n"
17492+
17493+#ifdef CONFIG_PAX_REFCOUNT
17494+ "jno 0f\n"
17495+ _ASM_MOV "%0,%1\n"
17496+ "int $4\n0:\n"
17497+ _ASM_EXTABLE(0b, 0b)
17498+#endif
17499+
17500+ : "+r" (i), "+m" (l->a.counter)
17501+ : : "memory");
17502+ return i + __i;
17503+}
17504+
17505+/**
17506+ * local_add_return_unchecked - add and return
17507+ * @i: integer value to add
17508+ * @l: pointer to type local_unchecked_t
17509+ *
17510+ * Atomically adds @i to @l and returns @i + @l
17511+ */
17512+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
17513+{
17514+ long __i = i;
17515 asm volatile(_ASM_XADD "%0, %1;"
17516 : "+r" (i), "+m" (l->a.counter)
17517 : : "memory");
17518@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
17519
17520 #define local_cmpxchg(l, o, n) \
17521 (cmpxchg_local(&((l)->a.counter), (o), (n)))
17522+#define local_cmpxchg_unchecked(l, o, n) \
17523+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
17524 /* Always has a lock prefix */
17525 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
17526
17527diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
17528new file mode 100644
17529index 0000000..2bfd3ba
17530--- /dev/null
17531+++ b/arch/x86/include/asm/mman.h
17532@@ -0,0 +1,15 @@
17533+#ifndef _X86_MMAN_H
17534+#define _X86_MMAN_H
17535+
17536+#include <uapi/asm/mman.h>
17537+
17538+#ifdef __KERNEL__
17539+#ifndef __ASSEMBLY__
17540+#ifdef CONFIG_X86_32
17541+#define arch_mmap_check i386_mmap_check
17542+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
17543+#endif
17544+#endif
17545+#endif
17546+
17547+#endif /* X86_MMAN_H */
17548diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
17549index 876e74e..e20bfb1 100644
17550--- a/arch/x86/include/asm/mmu.h
17551+++ b/arch/x86/include/asm/mmu.h
17552@@ -9,7 +9,7 @@
17553 * we put the segment information here.
17554 */
17555 typedef struct {
17556- void *ldt;
17557+ struct desc_struct *ldt;
17558 int size;
17559
17560 #ifdef CONFIG_X86_64
17561@@ -18,7 +18,19 @@ typedef struct {
17562 #endif
17563
17564 struct mutex lock;
17565- void __user *vdso;
17566+ unsigned long vdso;
17567+
17568+#ifdef CONFIG_X86_32
17569+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17570+ unsigned long user_cs_base;
17571+ unsigned long user_cs_limit;
17572+
17573+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17574+ cpumask_t cpu_user_cs_mask;
17575+#endif
17576+
17577+#endif
17578+#endif
17579 } mm_context_t;
17580
17581 #ifdef CONFIG_SMP
17582diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
17583index be12c53..07fd3ca 100644
17584--- a/arch/x86/include/asm/mmu_context.h
17585+++ b/arch/x86/include/asm/mmu_context.h
17586@@ -24,6 +24,20 @@ void destroy_context(struct mm_struct *mm);
17587
17588 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17589 {
17590+
17591+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17592+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
17593+ unsigned int i;
17594+ pgd_t *pgd;
17595+
17596+ pax_open_kernel();
17597+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
17598+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
17599+ set_pgd_batched(pgd+i, native_make_pgd(0));
17600+ pax_close_kernel();
17601+ }
17602+#endif
17603+
17604 #ifdef CONFIG_SMP
17605 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
17606 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
17607@@ -34,16 +48,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17608 struct task_struct *tsk)
17609 {
17610 unsigned cpu = smp_processor_id();
17611+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17612+ int tlbstate = TLBSTATE_OK;
17613+#endif
17614
17615 if (likely(prev != next)) {
17616 #ifdef CONFIG_SMP
17617+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17618+ tlbstate = this_cpu_read(cpu_tlbstate.state);
17619+#endif
17620 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17621 this_cpu_write(cpu_tlbstate.active_mm, next);
17622 #endif
17623 cpumask_set_cpu(cpu, mm_cpumask(next));
17624
17625 /* Re-load page tables */
17626+#ifdef CONFIG_PAX_PER_CPU_PGD
17627+ pax_open_kernel();
17628+
17629+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17630+ if (static_cpu_has(X86_FEATURE_PCID))
17631+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17632+ else
17633+#endif
17634+
17635+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17636+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17637+ pax_close_kernel();
17638+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17639+
17640+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17641+ if (static_cpu_has(X86_FEATURE_PCID)) {
17642+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17643+ u64 descriptor[2];
17644+ descriptor[0] = PCID_USER;
17645+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17646+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17647+ descriptor[0] = PCID_KERNEL;
17648+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17649+ }
17650+ } else {
17651+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17652+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17653+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17654+ else
17655+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17656+ }
17657+ } else
17658+#endif
17659+
17660+ load_cr3(get_cpu_pgd(cpu, kernel));
17661+#else
17662 load_cr3(next->pgd);
17663+#endif
17664
17665 /* Stop flush ipis for the previous mm */
17666 cpumask_clear_cpu(cpu, mm_cpumask(prev));
17667@@ -51,9 +108,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17668 /* Load the LDT, if the LDT is different: */
17669 if (unlikely(prev->context.ldt != next->context.ldt))
17670 load_LDT_nolock(&next->context);
17671+
17672+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17673+ if (!(__supported_pte_mask & _PAGE_NX)) {
17674+ smp_mb__before_atomic();
17675+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
17676+ smp_mb__after_atomic();
17677+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17678+ }
17679+#endif
17680+
17681+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17682+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
17683+ prev->context.user_cs_limit != next->context.user_cs_limit))
17684+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17685+#ifdef CONFIG_SMP
17686+ else if (unlikely(tlbstate != TLBSTATE_OK))
17687+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17688+#endif
17689+#endif
17690+
17691 }
17692+ else {
17693+
17694+#ifdef CONFIG_PAX_PER_CPU_PGD
17695+ pax_open_kernel();
17696+
17697+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17698+ if (static_cpu_has(X86_FEATURE_PCID))
17699+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17700+ else
17701+#endif
17702+
17703+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17704+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17705+ pax_close_kernel();
17706+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17707+
17708+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17709+ if (static_cpu_has(X86_FEATURE_PCID)) {
17710+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17711+ u64 descriptor[2];
17712+ descriptor[0] = PCID_USER;
17713+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17714+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17715+ descriptor[0] = PCID_KERNEL;
17716+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17717+ }
17718+ } else {
17719+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17720+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17721+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17722+ else
17723+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17724+ }
17725+ } else
17726+#endif
17727+
17728+ load_cr3(get_cpu_pgd(cpu, kernel));
17729+#endif
17730+
17731 #ifdef CONFIG_SMP
17732- else {
17733 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17734 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
17735
17736@@ -70,11 +185,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17737 * tlb flush IPI delivery. We must reload CR3
17738 * to make sure to use no freed page tables.
17739 */
17740+
17741+#ifndef CONFIG_PAX_PER_CPU_PGD
17742 load_cr3(next->pgd);
17743+#endif
17744+
17745 load_LDT_nolock(&next->context);
17746+
17747+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17748+ if (!(__supported_pte_mask & _PAGE_NX))
17749+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17750+#endif
17751+
17752+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17753+#ifdef CONFIG_PAX_PAGEEXEC
17754+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
17755+#endif
17756+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17757+#endif
17758+
17759 }
17760+#endif
17761 }
17762-#endif
17763 }
17764
17765 #define activate_mm(prev, next) \
17766diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
17767index e3b7819..b257c64 100644
17768--- a/arch/x86/include/asm/module.h
17769+++ b/arch/x86/include/asm/module.h
17770@@ -5,6 +5,7 @@
17771
17772 #ifdef CONFIG_X86_64
17773 /* X86_64 does not define MODULE_PROC_FAMILY */
17774+#define MODULE_PROC_FAMILY ""
17775 #elif defined CONFIG_M486
17776 #define MODULE_PROC_FAMILY "486 "
17777 #elif defined CONFIG_M586
17778@@ -57,8 +58,20 @@
17779 #error unknown processor family
17780 #endif
17781
17782-#ifdef CONFIG_X86_32
17783-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
17784+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
17785+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
17786+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
17787+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
17788+#else
17789+#define MODULE_PAX_KERNEXEC ""
17790 #endif
17791
17792+#ifdef CONFIG_PAX_MEMORY_UDEREF
17793+#define MODULE_PAX_UDEREF "UDEREF "
17794+#else
17795+#define MODULE_PAX_UDEREF ""
17796+#endif
17797+
17798+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
17799+
17800 #endif /* _ASM_X86_MODULE_H */
17801diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
17802index 5f2fc44..106caa6 100644
17803--- a/arch/x86/include/asm/nmi.h
17804+++ b/arch/x86/include/asm/nmi.h
17805@@ -36,26 +36,35 @@ enum {
17806
17807 typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
17808
17809+struct nmiaction;
17810+
17811+struct nmiwork {
17812+ const struct nmiaction *action;
17813+ u64 max_duration;
17814+ struct irq_work irq_work;
17815+};
17816+
17817 struct nmiaction {
17818 struct list_head list;
17819 nmi_handler_t handler;
17820- u64 max_duration;
17821- struct irq_work irq_work;
17822 unsigned long flags;
17823 const char *name;
17824-};
17825+ struct nmiwork *work;
17826+} __do_const;
17827
17828 #define register_nmi_handler(t, fn, fg, n, init...) \
17829 ({ \
17830- static struct nmiaction init fn##_na = { \
17831+ static struct nmiwork fn##_nw; \
17832+ static const struct nmiaction init fn##_na = { \
17833 .handler = (fn), \
17834 .name = (n), \
17835 .flags = (fg), \
17836+ .work = &fn##_nw, \
17837 }; \
17838 __register_nmi_handler((t), &fn##_na); \
17839 })
17840
17841-int __register_nmi_handler(unsigned int, struct nmiaction *);
17842+int __register_nmi_handler(unsigned int, const struct nmiaction *);
17843
17844 void unregister_nmi_handler(unsigned int, const char *);
17845
17846diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
17847index 775873d..04cd306 100644
17848--- a/arch/x86/include/asm/page.h
17849+++ b/arch/x86/include/asm/page.h
17850@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17851 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
17852
17853 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
17854+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
17855
17856 #define __boot_va(x) __va(x)
17857 #define __boot_pa(x) __pa(x)
17858@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17859 * virt_to_page(kaddr) returns a valid pointer if and only if
17860 * virt_addr_valid(kaddr) returns true.
17861 */
17862-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17863 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
17864 extern bool __virt_addr_valid(unsigned long kaddr);
17865 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
17866
17867+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
17868+#define virt_to_page(kaddr) \
17869+ ({ \
17870+ const void *__kaddr = (const void *)(kaddr); \
17871+ BUG_ON(!virt_addr_valid(__kaddr)); \
17872+ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
17873+ })
17874+#else
17875+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17876+#endif
17877+
17878 #endif /* __ASSEMBLY__ */
17879
17880 #include <asm-generic/memory_model.h>
17881diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
17882index 0f1ddee..e2fc3d1 100644
17883--- a/arch/x86/include/asm/page_64.h
17884+++ b/arch/x86/include/asm/page_64.h
17885@@ -7,9 +7,9 @@
17886
17887 /* duplicated to the one in bootmem.h */
17888 extern unsigned long max_pfn;
17889-extern unsigned long phys_base;
17890+extern const unsigned long phys_base;
17891
17892-static inline unsigned long __phys_addr_nodebug(unsigned long x)
17893+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
17894 {
17895 unsigned long y = x - __START_KERNEL_map;
17896
17897diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
17898index cd6e1610..70f4418 100644
17899--- a/arch/x86/include/asm/paravirt.h
17900+++ b/arch/x86/include/asm/paravirt.h
17901@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
17902 return (pmd_t) { ret };
17903 }
17904
17905-static inline pmdval_t pmd_val(pmd_t pmd)
17906+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
17907 {
17908 pmdval_t ret;
17909
17910@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
17911 val);
17912 }
17913
17914+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17915+{
17916+ pgdval_t val = native_pgd_val(pgd);
17917+
17918+ if (sizeof(pgdval_t) > sizeof(long))
17919+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
17920+ val, (u64)val >> 32);
17921+ else
17922+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
17923+ val);
17924+}
17925+
17926 static inline void pgd_clear(pgd_t *pgdp)
17927 {
17928 set_pgd(pgdp, __pgd(0));
17929@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
17930 pv_mmu_ops.set_fixmap(idx, phys, flags);
17931 }
17932
17933+#ifdef CONFIG_PAX_KERNEXEC
17934+static inline unsigned long pax_open_kernel(void)
17935+{
17936+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
17937+}
17938+
17939+static inline unsigned long pax_close_kernel(void)
17940+{
17941+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
17942+}
17943+#else
17944+static inline unsigned long pax_open_kernel(void) { return 0; }
17945+static inline unsigned long pax_close_kernel(void) { return 0; }
17946+#endif
17947+
17948 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
17949
17950 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
17951@@ -906,7 +933,7 @@ extern void default_banner(void);
17952
17953 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
17954 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
17955-#define PARA_INDIRECT(addr) *%cs:addr
17956+#define PARA_INDIRECT(addr) *%ss:addr
17957 #endif
17958
17959 #define INTERRUPT_RETURN \
17960@@ -981,6 +1008,21 @@ extern void default_banner(void);
17961 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
17962 CLBR_NONE, \
17963 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
17964+
17965+#define GET_CR0_INTO_RDI \
17966+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
17967+ mov %rax,%rdi
17968+
17969+#define SET_RDI_INTO_CR0 \
17970+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
17971+
17972+#define GET_CR3_INTO_RDI \
17973+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
17974+ mov %rax,%rdi
17975+
17976+#define SET_RDI_INTO_CR3 \
17977+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
17978+
17979 #endif /* CONFIG_X86_32 */
17980
17981 #endif /* __ASSEMBLY__ */
17982diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
17983index 7549b8b..f0edfda 100644
17984--- a/arch/x86/include/asm/paravirt_types.h
17985+++ b/arch/x86/include/asm/paravirt_types.h
17986@@ -84,7 +84,7 @@ struct pv_init_ops {
17987 */
17988 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
17989 unsigned long addr, unsigned len);
17990-};
17991+} __no_const __no_randomize_layout;
17992
17993
17994 struct pv_lazy_ops {
17995@@ -92,13 +92,13 @@ struct pv_lazy_ops {
17996 void (*enter)(void);
17997 void (*leave)(void);
17998 void (*flush)(void);
17999-};
18000+} __no_randomize_layout;
18001
18002 struct pv_time_ops {
18003 unsigned long long (*sched_clock)(void);
18004 unsigned long long (*steal_clock)(int cpu);
18005 unsigned long (*get_tsc_khz)(void);
18006-};
18007+} __no_const __no_randomize_layout;
18008
18009 struct pv_cpu_ops {
18010 /* hooks for various privileged instructions */
18011@@ -192,7 +192,7 @@ struct pv_cpu_ops {
18012
18013 void (*start_context_switch)(struct task_struct *prev);
18014 void (*end_context_switch)(struct task_struct *next);
18015-};
18016+} __no_const __no_randomize_layout;
18017
18018 struct pv_irq_ops {
18019 /*
18020@@ -215,7 +215,7 @@ struct pv_irq_ops {
18021 #ifdef CONFIG_X86_64
18022 void (*adjust_exception_frame)(void);
18023 #endif
18024-};
18025+} __no_randomize_layout;
18026
18027 struct pv_apic_ops {
18028 #ifdef CONFIG_X86_LOCAL_APIC
18029@@ -223,7 +223,7 @@ struct pv_apic_ops {
18030 unsigned long start_eip,
18031 unsigned long start_esp);
18032 #endif
18033-};
18034+} __no_const __no_randomize_layout;
18035
18036 struct pv_mmu_ops {
18037 unsigned long (*read_cr2)(void);
18038@@ -313,6 +313,7 @@ struct pv_mmu_ops {
18039 struct paravirt_callee_save make_pud;
18040
18041 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
18042+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
18043 #endif /* PAGETABLE_LEVELS == 4 */
18044 #endif /* PAGETABLE_LEVELS >= 3 */
18045
18046@@ -324,7 +325,13 @@ struct pv_mmu_ops {
18047 an mfn. We can tell which is which from the index. */
18048 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
18049 phys_addr_t phys, pgprot_t flags);
18050-};
18051+
18052+#ifdef CONFIG_PAX_KERNEXEC
18053+ unsigned long (*pax_open_kernel)(void);
18054+ unsigned long (*pax_close_kernel)(void);
18055+#endif
18056+
18057+} __no_randomize_layout;
18058
18059 struct arch_spinlock;
18060 #ifdef CONFIG_SMP
18061@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
18062 struct pv_lock_ops {
18063 struct paravirt_callee_save lock_spinning;
18064 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
18065-};
18066+} __no_randomize_layout;
18067
18068 /* This contains all the paravirt structures: we get a convenient
18069 * number for each function using the offset which we use to indicate
18070- * what to patch. */
18071+ * what to patch.
18072+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
18073+ */
18074+
18075 struct paravirt_patch_template {
18076 struct pv_init_ops pv_init_ops;
18077 struct pv_time_ops pv_time_ops;
18078@@ -349,7 +359,7 @@ struct paravirt_patch_template {
18079 struct pv_apic_ops pv_apic_ops;
18080 struct pv_mmu_ops pv_mmu_ops;
18081 struct pv_lock_ops pv_lock_ops;
18082-};
18083+} __no_randomize_layout;
18084
18085 extern struct pv_info pv_info;
18086 extern struct pv_init_ops pv_init_ops;
18087diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
18088index c4412e9..90e88c5 100644
18089--- a/arch/x86/include/asm/pgalloc.h
18090+++ b/arch/x86/include/asm/pgalloc.h
18091@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
18092 pmd_t *pmd, pte_t *pte)
18093 {
18094 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
18095+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
18096+}
18097+
18098+static inline void pmd_populate_user(struct mm_struct *mm,
18099+ pmd_t *pmd, pte_t *pte)
18100+{
18101+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
18102 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
18103 }
18104
18105@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
18106
18107 #ifdef CONFIG_X86_PAE
18108 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
18109+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
18110+{
18111+ pud_populate(mm, pudp, pmd);
18112+}
18113 #else /* !CONFIG_X86_PAE */
18114 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
18115 {
18116 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
18117 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
18118 }
18119+
18120+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
18121+{
18122+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
18123+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
18124+}
18125 #endif /* CONFIG_X86_PAE */
18126
18127 #if PAGETABLE_LEVELS > 3
18128@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
18129 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
18130 }
18131
18132+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
18133+{
18134+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
18135+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
18136+}
18137+
18138 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
18139 {
18140 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
18141diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
18142index 206a87f..1623b06 100644
18143--- a/arch/x86/include/asm/pgtable-2level.h
18144+++ b/arch/x86/include/asm/pgtable-2level.h
18145@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
18146
18147 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18148 {
18149+ pax_open_kernel();
18150 *pmdp = pmd;
18151+ pax_close_kernel();
18152 }
18153
18154 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18155diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
18156index 81bb91b..9392125 100644
18157--- a/arch/x86/include/asm/pgtable-3level.h
18158+++ b/arch/x86/include/asm/pgtable-3level.h
18159@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18160
18161 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18162 {
18163+ pax_open_kernel();
18164 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
18165+ pax_close_kernel();
18166 }
18167
18168 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18169 {
18170+ pax_open_kernel();
18171 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
18172+ pax_close_kernel();
18173 }
18174
18175 /*
18176diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
18177index aa97a07..f169e5b 100644
18178--- a/arch/x86/include/asm/pgtable.h
18179+++ b/arch/x86/include/asm/pgtable.h
18180@@ -46,6 +46,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18181
18182 #ifndef __PAGETABLE_PUD_FOLDED
18183 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
18184+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
18185 #define pgd_clear(pgd) native_pgd_clear(pgd)
18186 #endif
18187
18188@@ -83,12 +84,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18189
18190 #define arch_end_context_switch(prev) do {} while(0)
18191
18192+#define pax_open_kernel() native_pax_open_kernel()
18193+#define pax_close_kernel() native_pax_close_kernel()
18194 #endif /* CONFIG_PARAVIRT */
18195
18196+#define __HAVE_ARCH_PAX_OPEN_KERNEL
18197+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
18198+
18199+#ifdef CONFIG_PAX_KERNEXEC
18200+static inline unsigned long native_pax_open_kernel(void)
18201+{
18202+ unsigned long cr0;
18203+
18204+ preempt_disable();
18205+ barrier();
18206+ cr0 = read_cr0() ^ X86_CR0_WP;
18207+ BUG_ON(cr0 & X86_CR0_WP);
18208+ write_cr0(cr0);
18209+ return cr0 ^ X86_CR0_WP;
18210+}
18211+
18212+static inline unsigned long native_pax_close_kernel(void)
18213+{
18214+ unsigned long cr0;
18215+
18216+ cr0 = read_cr0() ^ X86_CR0_WP;
18217+ BUG_ON(!(cr0 & X86_CR0_WP));
18218+ write_cr0(cr0);
18219+ barrier();
18220+ preempt_enable_no_resched();
18221+ return cr0 ^ X86_CR0_WP;
18222+}
18223+#else
18224+static inline unsigned long native_pax_open_kernel(void) { return 0; }
18225+static inline unsigned long native_pax_close_kernel(void) { return 0; }
18226+#endif
18227+
18228 /*
18229 * The following only work if pte_present() is true.
18230 * Undefined behaviour if not..
18231 */
18232+static inline int pte_user(pte_t pte)
18233+{
18234+ return pte_val(pte) & _PAGE_USER;
18235+}
18236+
18237 static inline int pte_dirty(pte_t pte)
18238 {
18239 return pte_flags(pte) & _PAGE_DIRTY;
18240@@ -155,6 +195,11 @@ static inline unsigned long pud_pfn(pud_t pud)
18241 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
18242 }
18243
18244+static inline unsigned long pgd_pfn(pgd_t pgd)
18245+{
18246+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
18247+}
18248+
18249 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
18250
18251 static inline int pmd_large(pmd_t pte)
18252@@ -208,9 +253,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
18253 return pte_clear_flags(pte, _PAGE_RW);
18254 }
18255
18256+static inline pte_t pte_mkread(pte_t pte)
18257+{
18258+ return __pte(pte_val(pte) | _PAGE_USER);
18259+}
18260+
18261 static inline pte_t pte_mkexec(pte_t pte)
18262 {
18263- return pte_clear_flags(pte, _PAGE_NX);
18264+#ifdef CONFIG_X86_PAE
18265+ if (__supported_pte_mask & _PAGE_NX)
18266+ return pte_clear_flags(pte, _PAGE_NX);
18267+ else
18268+#endif
18269+ return pte_set_flags(pte, _PAGE_USER);
18270+}
18271+
18272+static inline pte_t pte_exprotect(pte_t pte)
18273+{
18274+#ifdef CONFIG_X86_PAE
18275+ if (__supported_pte_mask & _PAGE_NX)
18276+ return pte_set_flags(pte, _PAGE_NX);
18277+ else
18278+#endif
18279+ return pte_clear_flags(pte, _PAGE_USER);
18280 }
18281
18282 static inline pte_t pte_mkdirty(pte_t pte)
18283@@ -440,6 +505,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
18284 #endif
18285
18286 #ifndef __ASSEMBLY__
18287+
18288+#ifdef CONFIG_PAX_PER_CPU_PGD
18289+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
18290+enum cpu_pgd_type {kernel = 0, user = 1};
18291+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
18292+{
18293+ return cpu_pgd[cpu][type];
18294+}
18295+#endif
18296+
18297 #include <linux/mm_types.h>
18298 #include <linux/mmdebug.h>
18299 #include <linux/log2.h>
18300@@ -586,7 +661,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
18301 * Currently stuck as a macro due to indirect forward reference to
18302 * linux/mmzone.h's __section_mem_map_addr() definition:
18303 */
18304-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
18305+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
18306
18307 /* Find an entry in the second-level page table.. */
18308 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
18309@@ -626,7 +701,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
18310 * Currently stuck as a macro due to indirect forward reference to
18311 * linux/mmzone.h's __section_mem_map_addr() definition:
18312 */
18313-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
18314+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
18315
18316 /* to find an entry in a page-table-directory. */
18317 static inline unsigned long pud_index(unsigned long address)
18318@@ -641,7 +716,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
18319
18320 static inline int pgd_bad(pgd_t pgd)
18321 {
18322- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
18323+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
18324 }
18325
18326 static inline int pgd_none(pgd_t pgd)
18327@@ -664,7 +739,12 @@ static inline int pgd_none(pgd_t pgd)
18328 * pgd_offset() returns a (pgd_t *)
18329 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
18330 */
18331-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
18332+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
18333+
18334+#ifdef CONFIG_PAX_PER_CPU_PGD
18335+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
18336+#endif
18337+
18338 /*
18339 * a shortcut which implies the use of the kernel's pgd, instead
18340 * of a process's
18341@@ -675,6 +755,23 @@ static inline int pgd_none(pgd_t pgd)
18342 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
18343 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
18344
18345+#ifdef CONFIG_X86_32
18346+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
18347+#else
18348+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
18349+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
18350+
18351+#ifdef CONFIG_PAX_MEMORY_UDEREF
18352+#ifdef __ASSEMBLY__
18353+#define pax_user_shadow_base pax_user_shadow_base(%rip)
18354+#else
18355+extern unsigned long pax_user_shadow_base;
18356+extern pgdval_t clone_pgd_mask;
18357+#endif
18358+#endif
18359+
18360+#endif
18361+
18362 #ifndef __ASSEMBLY__
18363
18364 extern int direct_gbpages;
18365@@ -841,11 +938,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
18366 * dst and src can be on the same page, but the range must not overlap,
18367 * and must not cross a page boundary.
18368 */
18369-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
18370+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
18371 {
18372- memcpy(dst, src, count * sizeof(pgd_t));
18373+ pax_open_kernel();
18374+ while (count--)
18375+ *dst++ = *src++;
18376+ pax_close_kernel();
18377 }
18378
18379+#ifdef CONFIG_PAX_PER_CPU_PGD
18380+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
18381+#endif
18382+
18383+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18384+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
18385+#else
18386+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
18387+#endif
18388+
18389 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
18390 static inline int page_level_shift(enum pg_level level)
18391 {
18392diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
18393index 9ee3221..b979c6b 100644
18394--- a/arch/x86/include/asm/pgtable_32.h
18395+++ b/arch/x86/include/asm/pgtable_32.h
18396@@ -25,9 +25,6 @@
18397 struct mm_struct;
18398 struct vm_area_struct;
18399
18400-extern pgd_t swapper_pg_dir[1024];
18401-extern pgd_t initial_page_table[1024];
18402-
18403 static inline void pgtable_cache_init(void) { }
18404 static inline void check_pgt_cache(void) { }
18405 void paging_init(void);
18406@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
18407 # include <asm/pgtable-2level.h>
18408 #endif
18409
18410+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
18411+extern pgd_t initial_page_table[PTRS_PER_PGD];
18412+#ifdef CONFIG_X86_PAE
18413+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
18414+#endif
18415+
18416 #if defined(CONFIG_HIGHPTE)
18417 #define pte_offset_map(dir, address) \
18418 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
18419@@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
18420 /* Clear a kernel PTE and flush it from the TLB */
18421 #define kpte_clear_flush(ptep, vaddr) \
18422 do { \
18423+ pax_open_kernel(); \
18424 pte_clear(&init_mm, (vaddr), (ptep)); \
18425+ pax_close_kernel(); \
18426 __flush_tlb_one((vaddr)); \
18427 } while (0)
18428
18429 #endif /* !__ASSEMBLY__ */
18430
18431+#define HAVE_ARCH_UNMAPPED_AREA
18432+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
18433+
18434 /*
18435 * kern_addr_valid() is (1) for FLATMEM and (0) for
18436 * SPARSEMEM and DISCONTIGMEM
18437diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
18438index ed5903b..c7fe163 100644
18439--- a/arch/x86/include/asm/pgtable_32_types.h
18440+++ b/arch/x86/include/asm/pgtable_32_types.h
18441@@ -8,7 +8,7 @@
18442 */
18443 #ifdef CONFIG_X86_PAE
18444 # include <asm/pgtable-3level_types.h>
18445-# define PMD_SIZE (1UL << PMD_SHIFT)
18446+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
18447 # define PMD_MASK (~(PMD_SIZE - 1))
18448 #else
18449 # include <asm/pgtable-2level_types.h>
18450@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
18451 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
18452 #endif
18453
18454+#ifdef CONFIG_PAX_KERNEXEC
18455+#ifndef __ASSEMBLY__
18456+extern unsigned char MODULES_EXEC_VADDR[];
18457+extern unsigned char MODULES_EXEC_END[];
18458+#endif
18459+#include <asm/boot.h>
18460+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
18461+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
18462+#else
18463+#define ktla_ktva(addr) (addr)
18464+#define ktva_ktla(addr) (addr)
18465+#endif
18466+
18467 #define MODULES_VADDR VMALLOC_START
18468 #define MODULES_END VMALLOC_END
18469 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
18470diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
18471index 5be9063..d62185b 100644
18472--- a/arch/x86/include/asm/pgtable_64.h
18473+++ b/arch/x86/include/asm/pgtable_64.h
18474@@ -16,10 +16,14 @@
18475
18476 extern pud_t level3_kernel_pgt[512];
18477 extern pud_t level3_ident_pgt[512];
18478+extern pud_t level3_vmalloc_start_pgt[512];
18479+extern pud_t level3_vmalloc_end_pgt[512];
18480+extern pud_t level3_vmemmap_pgt[512];
18481+extern pud_t level2_vmemmap_pgt[512];
18482 extern pmd_t level2_kernel_pgt[512];
18483 extern pmd_t level2_fixmap_pgt[512];
18484-extern pmd_t level2_ident_pgt[512];
18485-extern pgd_t init_level4_pgt[];
18486+extern pmd_t level2_ident_pgt[512*2];
18487+extern pgd_t init_level4_pgt[512];
18488
18489 #define swapper_pg_dir init_level4_pgt
18490
18491@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18492
18493 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18494 {
18495+ pax_open_kernel();
18496 *pmdp = pmd;
18497+ pax_close_kernel();
18498 }
18499
18500 static inline void native_pmd_clear(pmd_t *pmd)
18501@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
18502
18503 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18504 {
18505+ pax_open_kernel();
18506 *pudp = pud;
18507+ pax_close_kernel();
18508 }
18509
18510 static inline void native_pud_clear(pud_t *pud)
18511@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
18512
18513 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
18514 {
18515+ pax_open_kernel();
18516+ *pgdp = pgd;
18517+ pax_close_kernel();
18518+}
18519+
18520+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
18521+{
18522 *pgdp = pgd;
18523 }
18524
18525diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
18526index 7166e25..baaa6fe 100644
18527--- a/arch/x86/include/asm/pgtable_64_types.h
18528+++ b/arch/x86/include/asm/pgtable_64_types.h
18529@@ -61,9 +61,14 @@ typedef struct { pteval_t pte; } pte_t;
18530 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
18531 #define MODULES_END _AC(0xffffffffff000000, UL)
18532 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
18533+#define MODULES_EXEC_VADDR MODULES_VADDR
18534+#define MODULES_EXEC_END MODULES_END
18535 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
18536 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
18537
18538+#define ktla_ktva(addr) (addr)
18539+#define ktva_ktla(addr) (addr)
18540+
18541 #define EARLY_DYNAMIC_PAGE_TABLES 64
18542
18543 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
18544diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
18545index f216963..6bd7c21 100644
18546--- a/arch/x86/include/asm/pgtable_types.h
18547+++ b/arch/x86/include/asm/pgtable_types.h
18548@@ -111,8 +111,10 @@
18549
18550 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
18551 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
18552-#else
18553+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
18554 #define _PAGE_NX (_AT(pteval_t, 0))
18555+#else
18556+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
18557 #endif
18558
18559 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
18560@@ -151,6 +153,9 @@
18561 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
18562 _PAGE_ACCESSED)
18563
18564+#define PAGE_READONLY_NOEXEC PAGE_READONLY
18565+#define PAGE_SHARED_NOEXEC PAGE_SHARED
18566+
18567 #define __PAGE_KERNEL_EXEC \
18568 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
18569 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
18570@@ -161,7 +166,7 @@
18571 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
18572 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
18573 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
18574-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
18575+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
18576 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
18577 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
18578 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
18579@@ -218,7 +223,7 @@
18580 #ifdef CONFIG_X86_64
18581 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
18582 #else
18583-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
18584+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18585 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18586 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
18587 #endif
18588@@ -257,7 +262,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
18589 {
18590 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
18591 }
18592+#endif
18593
18594+#if PAGETABLE_LEVELS == 3
18595+#include <asm-generic/pgtable-nopud.h>
18596+#endif
18597+
18598+#if PAGETABLE_LEVELS == 2
18599+#include <asm-generic/pgtable-nopmd.h>
18600+#endif
18601+
18602+#ifndef __ASSEMBLY__
18603 #if PAGETABLE_LEVELS > 3
18604 typedef struct { pudval_t pud; } pud_t;
18605
18606@@ -271,8 +286,6 @@ static inline pudval_t native_pud_val(pud_t pud)
18607 return pud.pud;
18608 }
18609 #else
18610-#include <asm-generic/pgtable-nopud.h>
18611-
18612 static inline pudval_t native_pud_val(pud_t pud)
18613 {
18614 return native_pgd_val(pud.pgd);
18615@@ -292,8 +305,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
18616 return pmd.pmd;
18617 }
18618 #else
18619-#include <asm-generic/pgtable-nopmd.h>
18620-
18621 static inline pmdval_t native_pmd_val(pmd_t pmd)
18622 {
18623 return native_pgd_val(pmd.pud.pgd);
18624@@ -333,7 +344,6 @@ typedef struct page *pgtable_t;
18625
18626 extern pteval_t __supported_pte_mask;
18627 extern void set_nx(void);
18628-extern int nx_enabled;
18629
18630 #define pgprot_writecombine pgprot_writecombine
18631 extern pgprot_t pgprot_writecombine(pgprot_t prot);
18632diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
18633index 7024c12..71c46b9 100644
18634--- a/arch/x86/include/asm/preempt.h
18635+++ b/arch/x86/include/asm/preempt.h
18636@@ -87,7 +87,7 @@ static __always_inline void __preempt_count_sub(int val)
18637 */
18638 static __always_inline bool __preempt_count_dec_and_test(void)
18639 {
18640- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
18641+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
18642 }
18643
18644 /*
18645diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18646index a4ea023..2ff3cb8 100644
18647--- a/arch/x86/include/asm/processor.h
18648+++ b/arch/x86/include/asm/processor.h
18649@@ -128,7 +128,7 @@ struct cpuinfo_x86 {
18650 /* Index into per_cpu list: */
18651 u16 cpu_index;
18652 u32 microcode;
18653-} __attribute__((__aligned__(SMP_CACHE_BYTES)));
18654+} __attribute__((__aligned__(SMP_CACHE_BYTES))) __randomize_layout;
18655
18656 #define X86_VENDOR_INTEL 0
18657 #define X86_VENDOR_CYRIX 1
18658@@ -199,9 +199,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
18659 : "memory");
18660 }
18661
18662+/* invpcid (%rdx),%rax */
18663+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
18664+
18665+#define INVPCID_SINGLE_ADDRESS 0UL
18666+#define INVPCID_SINGLE_CONTEXT 1UL
18667+#define INVPCID_ALL_GLOBAL 2UL
18668+#define INVPCID_ALL_NONGLOBAL 3UL
18669+
18670+#define PCID_KERNEL 0UL
18671+#define PCID_USER 1UL
18672+#define PCID_NOFLUSH (1UL << 63)
18673+
18674 static inline void load_cr3(pgd_t *pgdir)
18675 {
18676- write_cr3(__pa(pgdir));
18677+ write_cr3(__pa(pgdir) | PCID_KERNEL);
18678 }
18679
18680 #ifdef CONFIG_X86_32
18681@@ -283,7 +295,7 @@ struct tss_struct {
18682
18683 } ____cacheline_aligned;
18684
18685-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
18686+extern struct tss_struct init_tss[NR_CPUS];
18687
18688 /*
18689 * Save the original ist values for checking stack pointers during debugging
18690@@ -479,6 +491,7 @@ struct thread_struct {
18691 unsigned short ds;
18692 unsigned short fsindex;
18693 unsigned short gsindex;
18694+ unsigned short ss;
18695 #endif
18696 #ifdef CONFIG_X86_32
18697 unsigned long ip;
18698@@ -588,29 +601,8 @@ static inline void load_sp0(struct tss_struct *tss,
18699 extern unsigned long mmu_cr4_features;
18700 extern u32 *trampoline_cr4_features;
18701
18702-static inline void set_in_cr4(unsigned long mask)
18703-{
18704- unsigned long cr4;
18705-
18706- mmu_cr4_features |= mask;
18707- if (trampoline_cr4_features)
18708- *trampoline_cr4_features = mmu_cr4_features;
18709- cr4 = read_cr4();
18710- cr4 |= mask;
18711- write_cr4(cr4);
18712-}
18713-
18714-static inline void clear_in_cr4(unsigned long mask)
18715-{
18716- unsigned long cr4;
18717-
18718- mmu_cr4_features &= ~mask;
18719- if (trampoline_cr4_features)
18720- *trampoline_cr4_features = mmu_cr4_features;
18721- cr4 = read_cr4();
18722- cr4 &= ~mask;
18723- write_cr4(cr4);
18724-}
18725+extern void set_in_cr4(unsigned long mask);
18726+extern void clear_in_cr4(unsigned long mask);
18727
18728 typedef struct {
18729 unsigned long seg;
18730@@ -836,11 +828,18 @@ static inline void spin_lock_prefetch(const void *x)
18731 */
18732 #define TASK_SIZE PAGE_OFFSET
18733 #define TASK_SIZE_MAX TASK_SIZE
18734+
18735+#ifdef CONFIG_PAX_SEGMEXEC
18736+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
18737+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
18738+#else
18739 #define STACK_TOP TASK_SIZE
18740-#define STACK_TOP_MAX STACK_TOP
18741+#endif
18742+
18743+#define STACK_TOP_MAX TASK_SIZE
18744
18745 #define INIT_THREAD { \
18746- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18747+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18748 .vm86_info = NULL, \
18749 .sysenter_cs = __KERNEL_CS, \
18750 .io_bitmap_ptr = NULL, \
18751@@ -854,7 +853,7 @@ static inline void spin_lock_prefetch(const void *x)
18752 */
18753 #define INIT_TSS { \
18754 .x86_tss = { \
18755- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18756+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18757 .ss0 = __KERNEL_DS, \
18758 .ss1 = __KERNEL_CS, \
18759 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
18760@@ -865,11 +864,7 @@ static inline void spin_lock_prefetch(const void *x)
18761 extern unsigned long thread_saved_pc(struct task_struct *tsk);
18762
18763 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
18764-#define KSTK_TOP(info) \
18765-({ \
18766- unsigned long *__ptr = (unsigned long *)(info); \
18767- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
18768-})
18769+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
18770
18771 /*
18772 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
18773@@ -884,7 +879,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18774 #define task_pt_regs(task) \
18775 ({ \
18776 struct pt_regs *__regs__; \
18777- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
18778+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
18779 __regs__ - 1; \
18780 })
18781
18782@@ -894,13 +889,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18783 /*
18784 * User space process size. 47bits minus one guard page.
18785 */
18786-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
18787+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
18788
18789 /* This decides where the kernel will search for a free chunk of vm
18790 * space during mmap's.
18791 */
18792 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
18793- 0xc0000000 : 0xFFFFe000)
18794+ 0xc0000000 : 0xFFFFf000)
18795
18796 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
18797 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
18798@@ -911,11 +906,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18799 #define STACK_TOP_MAX TASK_SIZE_MAX
18800
18801 #define INIT_THREAD { \
18802- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18803+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18804 }
18805
18806 #define INIT_TSS { \
18807- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18808+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18809 }
18810
18811 /*
18812@@ -943,6 +938,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
18813 */
18814 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
18815
18816+#ifdef CONFIG_PAX_SEGMEXEC
18817+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
18818+#endif
18819+
18820 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
18821
18822 /* Get/set a process' ability to use the timestamp counter instruction */
18823@@ -969,7 +968,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18824 return 0;
18825 }
18826
18827-extern unsigned long arch_align_stack(unsigned long sp);
18828+#define arch_align_stack(x) ((x) & ~0xfUL)
18829 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
18830
18831 void default_idle(void);
18832@@ -979,6 +978,6 @@ bool xen_set_default_idle(void);
18833 #define xen_set_default_idle 0
18834 #endif
18835
18836-void stop_this_cpu(void *dummy);
18837+void stop_this_cpu(void *dummy) __noreturn;
18838 void df_debug(struct pt_regs *regs, long error_code);
18839 #endif /* _ASM_X86_PROCESSOR_H */
18840diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
18841index 6205f0c..688a3a9 100644
18842--- a/arch/x86/include/asm/ptrace.h
18843+++ b/arch/x86/include/asm/ptrace.h
18844@@ -84,28 +84,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
18845 }
18846
18847 /*
18848- * user_mode_vm(regs) determines whether a register set came from user mode.
18849+ * user_mode(regs) determines whether a register set came from user mode.
18850 * This is true if V8086 mode was enabled OR if the register set was from
18851 * protected mode with RPL-3 CS value. This tricky test checks that with
18852 * one comparison. Many places in the kernel can bypass this full check
18853- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
18854+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
18855+ * be used.
18856 */
18857-static inline int user_mode(struct pt_regs *regs)
18858+static inline int user_mode_novm(struct pt_regs *regs)
18859 {
18860 #ifdef CONFIG_X86_32
18861 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
18862 #else
18863- return !!(regs->cs & 3);
18864+ return !!(regs->cs & SEGMENT_RPL_MASK);
18865 #endif
18866 }
18867
18868-static inline int user_mode_vm(struct pt_regs *regs)
18869+static inline int user_mode(struct pt_regs *regs)
18870 {
18871 #ifdef CONFIG_X86_32
18872 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
18873 USER_RPL;
18874 #else
18875- return user_mode(regs);
18876+ return user_mode_novm(regs);
18877 #endif
18878 }
18879
18880@@ -121,15 +122,16 @@ static inline int v8086_mode(struct pt_regs *regs)
18881 #ifdef CONFIG_X86_64
18882 static inline bool user_64bit_mode(struct pt_regs *regs)
18883 {
18884+ unsigned long cs = regs->cs & 0xffff;
18885 #ifndef CONFIG_PARAVIRT
18886 /*
18887 * On non-paravirt systems, this is the only long mode CPL 3
18888 * selector. We do not allow long mode selectors in the LDT.
18889 */
18890- return regs->cs == __USER_CS;
18891+ return cs == __USER_CS;
18892 #else
18893 /* Headers are too twisted for this to go in paravirt.h. */
18894- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
18895+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
18896 #endif
18897 }
18898
18899@@ -180,9 +182,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
18900 * Traps from the kernel do not save sp and ss.
18901 * Use the helper function to retrieve sp.
18902 */
18903- if (offset == offsetof(struct pt_regs, sp) &&
18904- regs->cs == __KERNEL_CS)
18905- return kernel_stack_pointer(regs);
18906+ if (offset == offsetof(struct pt_regs, sp)) {
18907+ unsigned long cs = regs->cs & 0xffff;
18908+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
18909+ return kernel_stack_pointer(regs);
18910+ }
18911 #endif
18912 return *(unsigned long *)((unsigned long)regs + offset);
18913 }
18914diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
18915index 70f46f0..adfbdb4 100644
18916--- a/arch/x86/include/asm/qrwlock.h
18917+++ b/arch/x86/include/asm/qrwlock.h
18918@@ -7,8 +7,8 @@
18919 #define queue_write_unlock queue_write_unlock
18920 static inline void queue_write_unlock(struct qrwlock *lock)
18921 {
18922- barrier();
18923- ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
18924+ barrier();
18925+ ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
18926 }
18927 #endif
18928
18929diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
18930index 9c6b890..5305f53 100644
18931--- a/arch/x86/include/asm/realmode.h
18932+++ b/arch/x86/include/asm/realmode.h
18933@@ -22,16 +22,14 @@ struct real_mode_header {
18934 #endif
18935 /* APM/BIOS reboot */
18936 u32 machine_real_restart_asm;
18937-#ifdef CONFIG_X86_64
18938 u32 machine_real_restart_seg;
18939-#endif
18940 };
18941
18942 /* This must match data at trampoline_32/64.S */
18943 struct trampoline_header {
18944 #ifdef CONFIG_X86_32
18945 u32 start;
18946- u16 gdt_pad;
18947+ u16 boot_cs;
18948 u16 gdt_limit;
18949 u32 gdt_base;
18950 #else
18951diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
18952index a82c4f1..ac45053 100644
18953--- a/arch/x86/include/asm/reboot.h
18954+++ b/arch/x86/include/asm/reboot.h
18955@@ -6,13 +6,13 @@
18956 struct pt_regs;
18957
18958 struct machine_ops {
18959- void (*restart)(char *cmd);
18960- void (*halt)(void);
18961- void (*power_off)(void);
18962+ void (* __noreturn restart)(char *cmd);
18963+ void (* __noreturn halt)(void);
18964+ void (* __noreturn power_off)(void);
18965 void (*shutdown)(void);
18966 void (*crash_shutdown)(struct pt_regs *);
18967- void (*emergency_restart)(void);
18968-};
18969+ void (* __noreturn emergency_restart)(void);
18970+} __no_const;
18971
18972 extern struct machine_ops machine_ops;
18973
18974diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
18975index 8f7866a..e442f20 100644
18976--- a/arch/x86/include/asm/rmwcc.h
18977+++ b/arch/x86/include/asm/rmwcc.h
18978@@ -3,7 +3,34 @@
18979
18980 #ifdef CC_HAVE_ASM_GOTO
18981
18982-#define __GEN_RMWcc(fullop, var, cc, ...) \
18983+#ifdef CONFIG_PAX_REFCOUNT
18984+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18985+do { \
18986+ asm_volatile_goto (fullop \
18987+ ";jno 0f\n" \
18988+ fullantiop \
18989+ ";int $4\n0:\n" \
18990+ _ASM_EXTABLE(0b, 0b) \
18991+ ";j" cc " %l[cc_label]" \
18992+ : : "m" (var), ## __VA_ARGS__ \
18993+ : "memory" : cc_label); \
18994+ return 0; \
18995+cc_label: \
18996+ return 1; \
18997+} while (0)
18998+#else
18999+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19000+do { \
19001+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
19002+ : : "m" (var), ## __VA_ARGS__ \
19003+ : "memory" : cc_label); \
19004+ return 0; \
19005+cc_label: \
19006+ return 1; \
19007+} while (0)
19008+#endif
19009+
19010+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
19011 do { \
19012 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
19013 : : "m" (var), ## __VA_ARGS__ \
19014@@ -13,15 +40,46 @@ cc_label: \
19015 return 1; \
19016 } while (0)
19017
19018-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
19019- __GEN_RMWcc(op " " arg0, var, cc)
19020+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
19021+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
19022
19023-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
19024- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
19025+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
19026+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
19027+
19028+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
19029+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
19030+
19031+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
19032+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
19033
19034 #else /* !CC_HAVE_ASM_GOTO */
19035
19036-#define __GEN_RMWcc(fullop, var, cc, ...) \
19037+#ifdef CONFIG_PAX_REFCOUNT
19038+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19039+do { \
19040+ char c; \
19041+ asm volatile (fullop \
19042+ ";jno 0f\n" \
19043+ fullantiop \
19044+ ";int $4\n0:\n" \
19045+ _ASM_EXTABLE(0b, 0b) \
19046+ "; set" cc " %1" \
19047+ : "+m" (var), "=qm" (c) \
19048+ : __VA_ARGS__ : "memory"); \
19049+ return c != 0; \
19050+} while (0)
19051+#else
19052+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19053+do { \
19054+ char c; \
19055+ asm volatile (fullop "; set" cc " %1" \
19056+ : "+m" (var), "=qm" (c) \
19057+ : __VA_ARGS__ : "memory"); \
19058+ return c != 0; \
19059+} while (0)
19060+#endif
19061+
19062+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
19063 do { \
19064 char c; \
19065 asm volatile (fullop "; set" cc " %1" \
19066@@ -30,11 +88,17 @@ do { \
19067 return c != 0; \
19068 } while (0)
19069
19070-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
19071- __GEN_RMWcc(op " " arg0, var, cc)
19072+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
19073+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
19074+
19075+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
19076+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
19077+
19078+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
19079+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
19080
19081-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
19082- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
19083+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
19084+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
19085
19086 #endif /* CC_HAVE_ASM_GOTO */
19087
19088diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
19089index cad82c9..2e5c5c1 100644
19090--- a/arch/x86/include/asm/rwsem.h
19091+++ b/arch/x86/include/asm/rwsem.h
19092@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
19093 {
19094 asm volatile("# beginning down_read\n\t"
19095 LOCK_PREFIX _ASM_INC "(%1)\n\t"
19096+
19097+#ifdef CONFIG_PAX_REFCOUNT
19098+ "jno 0f\n"
19099+ LOCK_PREFIX _ASM_DEC "(%1)\n"
19100+ "int $4\n0:\n"
19101+ _ASM_EXTABLE(0b, 0b)
19102+#endif
19103+
19104 /* adds 0x00000001 */
19105 " jns 1f\n"
19106 " call call_rwsem_down_read_failed\n"
19107@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
19108 "1:\n\t"
19109 " mov %1,%2\n\t"
19110 " add %3,%2\n\t"
19111+
19112+#ifdef CONFIG_PAX_REFCOUNT
19113+ "jno 0f\n"
19114+ "sub %3,%2\n"
19115+ "int $4\n0:\n"
19116+ _ASM_EXTABLE(0b, 0b)
19117+#endif
19118+
19119 " jle 2f\n\t"
19120 LOCK_PREFIX " cmpxchg %2,%0\n\t"
19121 " jnz 1b\n\t"
19122@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
19123 long tmp;
19124 asm volatile("# beginning down_write\n\t"
19125 LOCK_PREFIX " xadd %1,(%2)\n\t"
19126+
19127+#ifdef CONFIG_PAX_REFCOUNT
19128+ "jno 0f\n"
19129+ "mov %1,(%2)\n"
19130+ "int $4\n0:\n"
19131+ _ASM_EXTABLE(0b, 0b)
19132+#endif
19133+
19134 /* adds 0xffff0001, returns the old value */
19135 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
19136 /* was the active mask 0 before? */
19137@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
19138 long tmp;
19139 asm volatile("# beginning __up_read\n\t"
19140 LOCK_PREFIX " xadd %1,(%2)\n\t"
19141+
19142+#ifdef CONFIG_PAX_REFCOUNT
19143+ "jno 0f\n"
19144+ "mov %1,(%2)\n"
19145+ "int $4\n0:\n"
19146+ _ASM_EXTABLE(0b, 0b)
19147+#endif
19148+
19149 /* subtracts 1, returns the old value */
19150 " jns 1f\n\t"
19151 " call call_rwsem_wake\n" /* expects old value in %edx */
19152@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
19153 long tmp;
19154 asm volatile("# beginning __up_write\n\t"
19155 LOCK_PREFIX " xadd %1,(%2)\n\t"
19156+
19157+#ifdef CONFIG_PAX_REFCOUNT
19158+ "jno 0f\n"
19159+ "mov %1,(%2)\n"
19160+ "int $4\n0:\n"
19161+ _ASM_EXTABLE(0b, 0b)
19162+#endif
19163+
19164 /* subtracts 0xffff0001, returns the old value */
19165 " jns 1f\n\t"
19166 " call call_rwsem_wake\n" /* expects old value in %edx */
19167@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
19168 {
19169 asm volatile("# beginning __downgrade_write\n\t"
19170 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
19171+
19172+#ifdef CONFIG_PAX_REFCOUNT
19173+ "jno 0f\n"
19174+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
19175+ "int $4\n0:\n"
19176+ _ASM_EXTABLE(0b, 0b)
19177+#endif
19178+
19179 /*
19180 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
19181 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
19182@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
19183 */
19184 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19185 {
19186- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
19187+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
19188+
19189+#ifdef CONFIG_PAX_REFCOUNT
19190+ "jno 0f\n"
19191+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
19192+ "int $4\n0:\n"
19193+ _ASM_EXTABLE(0b, 0b)
19194+#endif
19195+
19196 : "+m" (sem->count)
19197 : "er" (delta));
19198 }
19199@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19200 */
19201 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
19202 {
19203- return delta + xadd(&sem->count, delta);
19204+ return delta + xadd_check_overflow(&sem->count, delta);
19205 }
19206
19207 #endif /* __KERNEL__ */
19208diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
19209index 6f1c3a8..7744f19 100644
19210--- a/arch/x86/include/asm/segment.h
19211+++ b/arch/x86/include/asm/segment.h
19212@@ -64,10 +64,15 @@
19213 * 26 - ESPFIX small SS
19214 * 27 - per-cpu [ offset to per-cpu data area ]
19215 * 28 - stack_canary-20 [ for stack protector ]
19216- * 29 - unused
19217- * 30 - unused
19218+ * 29 - PCI BIOS CS
19219+ * 30 - PCI BIOS DS
19220 * 31 - TSS for double fault handler
19221 */
19222+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
19223+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
19224+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
19225+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
19226+
19227 #define GDT_ENTRY_TLS_MIN 6
19228 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
19229
19230@@ -79,6 +84,8 @@
19231
19232 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
19233
19234+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
19235+
19236 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
19237
19238 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
19239@@ -104,6 +111,12 @@
19240 #define __KERNEL_STACK_CANARY 0
19241 #endif
19242
19243+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
19244+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
19245+
19246+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
19247+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
19248+
19249 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
19250
19251 /*
19252@@ -141,7 +154,7 @@
19253 */
19254
19255 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
19256-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
19257+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
19258
19259
19260 #else
19261@@ -165,6 +178,8 @@
19262 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
19263 #define __USER32_DS __USER_DS
19264
19265+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
19266+
19267 #define GDT_ENTRY_TSS 8 /* needs two entries */
19268 #define GDT_ENTRY_LDT 10 /* needs two entries */
19269 #define GDT_ENTRY_TLS_MIN 12
19270@@ -173,6 +188,8 @@
19271 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
19272 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
19273
19274+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
19275+
19276 /* TLS indexes for 64bit - hardcoded in arch_prctl */
19277 #define FS_TLS 0
19278 #define GS_TLS 1
19279@@ -180,12 +197,14 @@
19280 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
19281 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
19282
19283-#define GDT_ENTRIES 16
19284+#define GDT_ENTRIES 17
19285
19286 #endif
19287
19288 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19289+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19290 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19291+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
19292 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
19293 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
19294 #ifndef CONFIG_PARAVIRT
19295@@ -268,7 +287,7 @@ static inline unsigned long get_limit(unsigned long segment)
19296 {
19297 unsigned long __limit;
19298 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
19299- return __limit + 1;
19300+ return __limit;
19301 }
19302
19303 #endif /* !__ASSEMBLY__ */
19304diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
19305index 8d3120f..352b440 100644
19306--- a/arch/x86/include/asm/smap.h
19307+++ b/arch/x86/include/asm/smap.h
19308@@ -25,11 +25,40 @@
19309
19310 #include <asm/alternative-asm.h>
19311
19312+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19313+#define ASM_PAX_OPEN_USERLAND \
19314+ 661: jmp 663f; \
19315+ .pushsection .altinstr_replacement, "a" ; \
19316+ 662: pushq %rax; nop; \
19317+ .popsection ; \
19318+ .pushsection .altinstructions, "a" ; \
19319+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19320+ .popsection ; \
19321+ call __pax_open_userland; \
19322+ popq %rax; \
19323+ 663:
19324+
19325+#define ASM_PAX_CLOSE_USERLAND \
19326+ 661: jmp 663f; \
19327+ .pushsection .altinstr_replacement, "a" ; \
19328+ 662: pushq %rax; nop; \
19329+ .popsection; \
19330+ .pushsection .altinstructions, "a" ; \
19331+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19332+ .popsection; \
19333+ call __pax_close_userland; \
19334+ popq %rax; \
19335+ 663:
19336+#else
19337+#define ASM_PAX_OPEN_USERLAND
19338+#define ASM_PAX_CLOSE_USERLAND
19339+#endif
19340+
19341 #ifdef CONFIG_X86_SMAP
19342
19343 #define ASM_CLAC \
19344 661: ASM_NOP3 ; \
19345- .pushsection .altinstr_replacement, "ax" ; \
19346+ .pushsection .altinstr_replacement, "a" ; \
19347 662: __ASM_CLAC ; \
19348 .popsection ; \
19349 .pushsection .altinstructions, "a" ; \
19350@@ -38,7 +67,7 @@
19351
19352 #define ASM_STAC \
19353 661: ASM_NOP3 ; \
19354- .pushsection .altinstr_replacement, "ax" ; \
19355+ .pushsection .altinstr_replacement, "a" ; \
19356 662: __ASM_STAC ; \
19357 .popsection ; \
19358 .pushsection .altinstructions, "a" ; \
19359@@ -56,6 +85,37 @@
19360
19361 #include <asm/alternative.h>
19362
19363+#define __HAVE_ARCH_PAX_OPEN_USERLAND
19364+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
19365+
19366+extern void __pax_open_userland(void);
19367+static __always_inline unsigned long pax_open_userland(void)
19368+{
19369+
19370+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19371+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
19372+ :
19373+ : [open] "i" (__pax_open_userland)
19374+ : "memory", "rax");
19375+#endif
19376+
19377+ return 0;
19378+}
19379+
19380+extern void __pax_close_userland(void);
19381+static __always_inline unsigned long pax_close_userland(void)
19382+{
19383+
19384+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19385+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
19386+ :
19387+ : [close] "i" (__pax_close_userland)
19388+ : "memory", "rax");
19389+#endif
19390+
19391+ return 0;
19392+}
19393+
19394 #ifdef CONFIG_X86_SMAP
19395
19396 static __always_inline void clac(void)
19397diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
19398index 8cd27e0..7f05ec8 100644
19399--- a/arch/x86/include/asm/smp.h
19400+++ b/arch/x86/include/asm/smp.h
19401@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
19402 /* cpus sharing the last level cache: */
19403 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19404 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
19405-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
19406+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
19407
19408 static inline struct cpumask *cpu_sibling_mask(int cpu)
19409 {
19410@@ -78,7 +78,7 @@ struct smp_ops {
19411
19412 void (*send_call_func_ipi)(const struct cpumask *mask);
19413 void (*send_call_func_single_ipi)(int cpu);
19414-};
19415+} __no_const;
19416
19417 /* Globals due to paravirt */
19418 extern void set_cpu_sibling_map(int cpu);
19419@@ -190,14 +190,8 @@ extern unsigned disabled_cpus;
19420 extern int safe_smp_processor_id(void);
19421
19422 #elif defined(CONFIG_X86_64_SMP)
19423-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19424-
19425-#define stack_smp_processor_id() \
19426-({ \
19427- struct thread_info *ti; \
19428- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
19429- ti->cpu; \
19430-})
19431+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19432+#define stack_smp_processor_id() raw_smp_processor_id()
19433 #define safe_smp_processor_id() smp_processor_id()
19434
19435 #endif
19436diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
19437index 54f1c80..39362a5 100644
19438--- a/arch/x86/include/asm/spinlock.h
19439+++ b/arch/x86/include/asm/spinlock.h
19440@@ -223,6 +223,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
19441 static inline void arch_read_lock(arch_rwlock_t *rw)
19442 {
19443 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
19444+
19445+#ifdef CONFIG_PAX_REFCOUNT
19446+ "jno 0f\n"
19447+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
19448+ "int $4\n0:\n"
19449+ _ASM_EXTABLE(0b, 0b)
19450+#endif
19451+
19452 "jns 1f\n"
19453 "call __read_lock_failed\n\t"
19454 "1:\n"
19455@@ -232,6 +240,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
19456 static inline void arch_write_lock(arch_rwlock_t *rw)
19457 {
19458 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
19459+
19460+#ifdef CONFIG_PAX_REFCOUNT
19461+ "jno 0f\n"
19462+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
19463+ "int $4\n0:\n"
19464+ _ASM_EXTABLE(0b, 0b)
19465+#endif
19466+
19467 "jz 1f\n"
19468 "call __write_lock_failed\n\t"
19469 "1:\n"
19470@@ -261,13 +277,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
19471
19472 static inline void arch_read_unlock(arch_rwlock_t *rw)
19473 {
19474- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
19475+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
19476+
19477+#ifdef CONFIG_PAX_REFCOUNT
19478+ "jno 0f\n"
19479+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
19480+ "int $4\n0:\n"
19481+ _ASM_EXTABLE(0b, 0b)
19482+#endif
19483+
19484 :"+m" (rw->lock) : : "memory");
19485 }
19486
19487 static inline void arch_write_unlock(arch_rwlock_t *rw)
19488 {
19489- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
19490+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
19491+
19492+#ifdef CONFIG_PAX_REFCOUNT
19493+ "jno 0f\n"
19494+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
19495+ "int $4\n0:\n"
19496+ _ASM_EXTABLE(0b, 0b)
19497+#endif
19498+
19499 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
19500 }
19501 #else
19502diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
19503index 6a99859..03cb807 100644
19504--- a/arch/x86/include/asm/stackprotector.h
19505+++ b/arch/x86/include/asm/stackprotector.h
19506@@ -47,7 +47,7 @@
19507 * head_32 for boot CPU and setup_per_cpu_areas() for others.
19508 */
19509 #define GDT_STACK_CANARY_INIT \
19510- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
19511+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
19512
19513 /*
19514 * Initialize the stackprotector canary value.
19515@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
19516
19517 static inline void load_stack_canary_segment(void)
19518 {
19519-#ifdef CONFIG_X86_32
19520+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19521 asm volatile ("mov %0, %%gs" : : "r" (0));
19522 #endif
19523 }
19524diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
19525index 70bbe39..4ae2bd4 100644
19526--- a/arch/x86/include/asm/stacktrace.h
19527+++ b/arch/x86/include/asm/stacktrace.h
19528@@ -11,28 +11,20 @@
19529
19530 extern int kstack_depth_to_print;
19531
19532-struct thread_info;
19533+struct task_struct;
19534 struct stacktrace_ops;
19535
19536-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
19537- unsigned long *stack,
19538- unsigned long bp,
19539- const struct stacktrace_ops *ops,
19540- void *data,
19541- unsigned long *end,
19542- int *graph);
19543+typedef unsigned long walk_stack_t(struct task_struct *task,
19544+ void *stack_start,
19545+ unsigned long *stack,
19546+ unsigned long bp,
19547+ const struct stacktrace_ops *ops,
19548+ void *data,
19549+ unsigned long *end,
19550+ int *graph);
19551
19552-extern unsigned long
19553-print_context_stack(struct thread_info *tinfo,
19554- unsigned long *stack, unsigned long bp,
19555- const struct stacktrace_ops *ops, void *data,
19556- unsigned long *end, int *graph);
19557-
19558-extern unsigned long
19559-print_context_stack_bp(struct thread_info *tinfo,
19560- unsigned long *stack, unsigned long bp,
19561- const struct stacktrace_ops *ops, void *data,
19562- unsigned long *end, int *graph);
19563+extern walk_stack_t print_context_stack;
19564+extern walk_stack_t print_context_stack_bp;
19565
19566 /* Generic stack tracer with callbacks */
19567
19568@@ -40,7 +32,7 @@ struct stacktrace_ops {
19569 void (*address)(void *data, unsigned long address, int reliable);
19570 /* On negative return stop dumping */
19571 int (*stack)(void *data, char *name);
19572- walk_stack_t walk_stack;
19573+ walk_stack_t *walk_stack;
19574 };
19575
19576 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
19577diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
19578index d7f3b3b..3cc39f1 100644
19579--- a/arch/x86/include/asm/switch_to.h
19580+++ b/arch/x86/include/asm/switch_to.h
19581@@ -108,7 +108,7 @@ do { \
19582 "call __switch_to\n\t" \
19583 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
19584 __switch_canary \
19585- "movq %P[thread_info](%%rsi),%%r8\n\t" \
19586+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
19587 "movq %%rax,%%rdi\n\t" \
19588 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
19589 "jnz ret_from_fork\n\t" \
19590@@ -119,7 +119,7 @@ do { \
19591 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
19592 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
19593 [_tif_fork] "i" (_TIF_FORK), \
19594- [thread_info] "i" (offsetof(struct task_struct, stack)), \
19595+ [thread_info] "m" (current_tinfo), \
19596 [current_task] "m" (current_task) \
19597 __switch_canary_iparam \
19598 : "memory", "cc" __EXTRA_CLOBBER)
19599diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
19600index 8540538..4b0b5e9 100644
19601--- a/arch/x86/include/asm/thread_info.h
19602+++ b/arch/x86/include/asm/thread_info.h
19603@@ -24,7 +24,6 @@ struct exec_domain;
19604 #include <linux/atomic.h>
19605
19606 struct thread_info {
19607- struct task_struct *task; /* main task structure */
19608 struct exec_domain *exec_domain; /* execution domain */
19609 __u32 flags; /* low level flags */
19610 __u32 status; /* thread synchronous flags */
19611@@ -33,13 +32,13 @@ struct thread_info {
19612 mm_segment_t addr_limit;
19613 struct restart_block restart_block;
19614 void __user *sysenter_return;
19615+ unsigned long lowest_stack;
19616 unsigned int sig_on_uaccess_error:1;
19617 unsigned int uaccess_err:1; /* uaccess failed */
19618 };
19619
19620-#define INIT_THREAD_INFO(tsk) \
19621+#define INIT_THREAD_INFO \
19622 { \
19623- .task = &tsk, \
19624 .exec_domain = &default_exec_domain, \
19625 .flags = 0, \
19626 .cpu = 0, \
19627@@ -50,7 +49,7 @@ struct thread_info {
19628 }, \
19629 }
19630
19631-#define init_thread_info (init_thread_union.thread_info)
19632+#define init_thread_info (init_thread_union.stack)
19633 #define init_stack (init_thread_union.stack)
19634
19635 #else /* !__ASSEMBLY__ */
19636@@ -91,6 +90,7 @@ struct thread_info {
19637 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
19638 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
19639 #define TIF_X32 30 /* 32-bit native x86-64 binary */
19640+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
19641
19642 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
19643 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
19644@@ -115,17 +115,18 @@ struct thread_info {
19645 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
19646 #define _TIF_ADDR32 (1 << TIF_ADDR32)
19647 #define _TIF_X32 (1 << TIF_X32)
19648+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
19649
19650 /* work to do in syscall_trace_enter() */
19651 #define _TIF_WORK_SYSCALL_ENTRY \
19652 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
19653 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
19654- _TIF_NOHZ)
19655+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19656
19657 /* work to do in syscall_trace_leave() */
19658 #define _TIF_WORK_SYSCALL_EXIT \
19659 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
19660- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
19661+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
19662
19663 /* work to do on interrupt/exception return */
19664 #define _TIF_WORK_MASK \
19665@@ -136,7 +137,7 @@ struct thread_info {
19666 /* work to do on any return to user space */
19667 #define _TIF_ALLWORK_MASK \
19668 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
19669- _TIF_NOHZ)
19670+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19671
19672 /* Only used for 64 bit */
19673 #define _TIF_DO_NOTIFY_MASK \
19674@@ -151,7 +152,6 @@ struct thread_info {
19675 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
19676
19677 #define STACK_WARN (THREAD_SIZE/8)
19678-#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
19679
19680 /*
19681 * macros/functions for gaining access to the thread information structure
19682@@ -162,26 +162,18 @@ struct thread_info {
19683
19684 DECLARE_PER_CPU(unsigned long, kernel_stack);
19685
19686+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
19687+
19688 static inline struct thread_info *current_thread_info(void)
19689 {
19690- struct thread_info *ti;
19691- ti = (void *)(this_cpu_read_stable(kernel_stack) +
19692- KERNEL_STACK_OFFSET - THREAD_SIZE);
19693- return ti;
19694+ return this_cpu_read_stable(current_tinfo);
19695 }
19696
19697 #else /* !__ASSEMBLY__ */
19698
19699 /* how to get the thread information struct from ASM */
19700 #define GET_THREAD_INFO(reg) \
19701- _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
19702- _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
19703-
19704-/*
19705- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
19706- * a certain register (to be used in assembler memory operands).
19707- */
19708-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
19709+ _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
19710
19711 #endif
19712
19713@@ -237,5 +229,12 @@ static inline bool is_ia32_task(void)
19714 extern void arch_task_cache_init(void);
19715 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
19716 extern void arch_release_task_struct(struct task_struct *tsk);
19717+
19718+#define __HAVE_THREAD_FUNCTIONS
19719+#define task_thread_info(task) (&(task)->tinfo)
19720+#define task_stack_page(task) ((task)->stack)
19721+#define setup_thread_stack(p, org) do {} while (0)
19722+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
19723+
19724 #endif
19725 #endif /* _ASM_X86_THREAD_INFO_H */
19726diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
19727index 04905bf..1178cdf 100644
19728--- a/arch/x86/include/asm/tlbflush.h
19729+++ b/arch/x86/include/asm/tlbflush.h
19730@@ -17,18 +17,44 @@
19731
19732 static inline void __native_flush_tlb(void)
19733 {
19734+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19735+ u64 descriptor[2];
19736+
19737+ descriptor[0] = PCID_KERNEL;
19738+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory");
19739+ return;
19740+ }
19741+
19742+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19743+ if (static_cpu_has(X86_FEATURE_PCID)) {
19744+ unsigned int cpu = raw_get_cpu();
19745+
19746+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
19747+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
19748+ raw_put_cpu_no_resched();
19749+ return;
19750+ }
19751+#endif
19752+
19753 native_write_cr3(native_read_cr3());
19754 }
19755
19756 static inline void __native_flush_tlb_global_irq_disabled(void)
19757 {
19758- unsigned long cr4;
19759+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19760+ u64 descriptor[2];
19761
19762- cr4 = native_read_cr4();
19763- /* clear PGE */
19764- native_write_cr4(cr4 & ~X86_CR4_PGE);
19765- /* write old PGE again and flush TLBs */
19766- native_write_cr4(cr4);
19767+ descriptor[0] = PCID_KERNEL;
19768+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
19769+ } else {
19770+ unsigned long cr4;
19771+
19772+ cr4 = native_read_cr4();
19773+ /* clear PGE */
19774+ native_write_cr4(cr4 & ~X86_CR4_PGE);
19775+ /* write old PGE again and flush TLBs */
19776+ native_write_cr4(cr4);
19777+ }
19778 }
19779
19780 static inline void __native_flush_tlb_global(void)
19781@@ -49,6 +75,41 @@ static inline void __native_flush_tlb_global(void)
19782
19783 static inline void __native_flush_tlb_single(unsigned long addr)
19784 {
19785+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19786+ u64 descriptor[2];
19787+
19788+ descriptor[0] = PCID_KERNEL;
19789+ descriptor[1] = addr;
19790+
19791+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19792+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
19793+ if (addr < TASK_SIZE_MAX)
19794+ descriptor[1] += pax_user_shadow_base;
19795+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19796+ }
19797+
19798+ descriptor[0] = PCID_USER;
19799+ descriptor[1] = addr;
19800+#endif
19801+
19802+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19803+ return;
19804+ }
19805+
19806+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19807+ if (static_cpu_has(X86_FEATURE_PCID)) {
19808+ unsigned int cpu = raw_get_cpu();
19809+
19810+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
19811+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19812+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
19813+ raw_put_cpu_no_resched();
19814+
19815+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
19816+ addr += pax_user_shadow_base;
19817+ }
19818+#endif
19819+
19820 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19821 }
19822
19823diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
19824index 0d592e0..526f797 100644
19825--- a/arch/x86/include/asm/uaccess.h
19826+++ b/arch/x86/include/asm/uaccess.h
19827@@ -7,6 +7,7 @@
19828 #include <linux/compiler.h>
19829 #include <linux/thread_info.h>
19830 #include <linux/string.h>
19831+#include <linux/spinlock.h>
19832 #include <asm/asm.h>
19833 #include <asm/page.h>
19834 #include <asm/smap.h>
19835@@ -29,7 +30,12 @@
19836
19837 #define get_ds() (KERNEL_DS)
19838 #define get_fs() (current_thread_info()->addr_limit)
19839+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19840+void __set_fs(mm_segment_t x);
19841+void set_fs(mm_segment_t x);
19842+#else
19843 #define set_fs(x) (current_thread_info()->addr_limit = (x))
19844+#endif
19845
19846 #define segment_eq(a, b) ((a).seg == (b).seg)
19847
19848@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
19849 * checks that the pointer is in the user space range - after calling
19850 * this function, memory access functions may still return -EFAULT.
19851 */
19852-#define access_ok(type, addr, size) \
19853- likely(!__range_not_ok(addr, size, user_addr_max()))
19854+extern int _cond_resched(void);
19855+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
19856+#define access_ok(type, addr, size) \
19857+({ \
19858+ unsigned long __size = size; \
19859+ unsigned long __addr = (unsigned long)addr; \
19860+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
19861+ if (__ret_ao && __size) { \
19862+ unsigned long __addr_ao = __addr & PAGE_MASK; \
19863+ unsigned long __end_ao = __addr + __size - 1; \
19864+ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
19865+ while (__addr_ao <= __end_ao) { \
19866+ char __c_ao; \
19867+ __addr_ao += PAGE_SIZE; \
19868+ if (__size > PAGE_SIZE) \
19869+ _cond_resched(); \
19870+ if (__get_user(__c_ao, (char __user *)__addr)) \
19871+ break; \
19872+ if (type != VERIFY_WRITE) { \
19873+ __addr = __addr_ao; \
19874+ continue; \
19875+ } \
19876+ if (__put_user(__c_ao, (char __user *)__addr)) \
19877+ break; \
19878+ __addr = __addr_ao; \
19879+ } \
19880+ } \
19881+ } \
19882+ __ret_ao; \
19883+})
19884
19885 /*
19886 * The exception table consists of pairs of addresses relative to the
19887@@ -176,10 +210,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19888 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
19889 __chk_user_ptr(ptr); \
19890 might_fault(); \
19891+ pax_open_userland(); \
19892 asm volatile("call __get_user_%P3" \
19893 : "=a" (__ret_gu), "=r" (__val_gu) \
19894 : "0" (ptr), "i" (sizeof(*(ptr)))); \
19895 (x) = (__typeof__(*(ptr))) __val_gu; \
19896+ pax_close_userland(); \
19897 __ret_gu; \
19898 })
19899
19900@@ -187,13 +223,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19901 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
19902 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
19903
19904-
19905+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19906+#define __copyuser_seg "gs;"
19907+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
19908+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
19909+#else
19910+#define __copyuser_seg
19911+#define __COPYUSER_SET_ES
19912+#define __COPYUSER_RESTORE_ES
19913+#endif
19914
19915 #ifdef CONFIG_X86_32
19916 #define __put_user_asm_u64(x, addr, err, errret) \
19917 asm volatile(ASM_STAC "\n" \
19918- "1: movl %%eax,0(%2)\n" \
19919- "2: movl %%edx,4(%2)\n" \
19920+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
19921+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
19922 "3: " ASM_CLAC "\n" \
19923 ".section .fixup,\"ax\"\n" \
19924 "4: movl %3,%0\n" \
19925@@ -206,8 +250,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19926
19927 #define __put_user_asm_ex_u64(x, addr) \
19928 asm volatile(ASM_STAC "\n" \
19929- "1: movl %%eax,0(%1)\n" \
19930- "2: movl %%edx,4(%1)\n" \
19931+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
19932+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
19933 "3: " ASM_CLAC "\n" \
19934 _ASM_EXTABLE_EX(1b, 2b) \
19935 _ASM_EXTABLE_EX(2b, 3b) \
19936@@ -257,7 +301,8 @@ extern void __put_user_8(void);
19937 __typeof__(*(ptr)) __pu_val; \
19938 __chk_user_ptr(ptr); \
19939 might_fault(); \
19940- __pu_val = x; \
19941+ __pu_val = (x); \
19942+ pax_open_userland(); \
19943 switch (sizeof(*(ptr))) { \
19944 case 1: \
19945 __put_user_x(1, __pu_val, ptr, __ret_pu); \
19946@@ -275,6 +320,7 @@ extern void __put_user_8(void);
19947 __put_user_x(X, __pu_val, ptr, __ret_pu); \
19948 break; \
19949 } \
19950+ pax_close_userland(); \
19951 __ret_pu; \
19952 })
19953
19954@@ -355,8 +401,10 @@ do { \
19955 } while (0)
19956
19957 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19958+do { \
19959+ pax_open_userland(); \
19960 asm volatile(ASM_STAC "\n" \
19961- "1: mov"itype" %2,%"rtype"1\n" \
19962+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
19963 "2: " ASM_CLAC "\n" \
19964 ".section .fixup,\"ax\"\n" \
19965 "3: mov %3,%0\n" \
19966@@ -364,8 +412,10 @@ do { \
19967 " jmp 2b\n" \
19968 ".previous\n" \
19969 _ASM_EXTABLE(1b, 3b) \
19970- : "=r" (err), ltype(x) \
19971- : "m" (__m(addr)), "i" (errret), "0" (err))
19972+ : "=r" (err), ltype (x) \
19973+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
19974+ pax_close_userland(); \
19975+} while (0)
19976
19977 #define __get_user_size_ex(x, ptr, size) \
19978 do { \
19979@@ -389,7 +439,7 @@ do { \
19980 } while (0)
19981
19982 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
19983- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
19984+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
19985 "2:\n" \
19986 _ASM_EXTABLE_EX(1b, 2b) \
19987 : ltype(x) : "m" (__m(addr)))
19988@@ -406,13 +456,24 @@ do { \
19989 int __gu_err; \
19990 unsigned long __gu_val; \
19991 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
19992- (x) = (__force __typeof__(*(ptr)))__gu_val; \
19993+ (x) = (__typeof__(*(ptr)))__gu_val; \
19994 __gu_err; \
19995 })
19996
19997 /* FIXME: this hack is definitely wrong -AK */
19998 struct __large_struct { unsigned long buf[100]; };
19999-#define __m(x) (*(struct __large_struct __user *)(x))
20000+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20001+#define ____m(x) \
20002+({ \
20003+ unsigned long ____x = (unsigned long)(x); \
20004+ if (____x < pax_user_shadow_base) \
20005+ ____x += pax_user_shadow_base; \
20006+ (typeof(x))____x; \
20007+})
20008+#else
20009+#define ____m(x) (x)
20010+#endif
20011+#define __m(x) (*(struct __large_struct __user *)____m(x))
20012
20013 /*
20014 * Tell gcc we read from memory instead of writing: this is because
20015@@ -420,8 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
20016 * aliasing issues.
20017 */
20018 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
20019+do { \
20020+ pax_open_userland(); \
20021 asm volatile(ASM_STAC "\n" \
20022- "1: mov"itype" %"rtype"1,%2\n" \
20023+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
20024 "2: " ASM_CLAC "\n" \
20025 ".section .fixup,\"ax\"\n" \
20026 "3: mov %3,%0\n" \
20027@@ -429,10 +492,12 @@ struct __large_struct { unsigned long buf[100]; };
20028 ".previous\n" \
20029 _ASM_EXTABLE(1b, 3b) \
20030 : "=r"(err) \
20031- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
20032+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
20033+ pax_close_userland(); \
20034+} while (0)
20035
20036 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
20037- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
20038+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
20039 "2:\n" \
20040 _ASM_EXTABLE_EX(1b, 2b) \
20041 : : ltype(x), "m" (__m(addr)))
20042@@ -442,11 +507,13 @@ struct __large_struct { unsigned long buf[100]; };
20043 */
20044 #define uaccess_try do { \
20045 current_thread_info()->uaccess_err = 0; \
20046+ pax_open_userland(); \
20047 stac(); \
20048 barrier();
20049
20050 #define uaccess_catch(err) \
20051 clac(); \
20052+ pax_close_userland(); \
20053 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
20054 } while (0)
20055
20056@@ -471,8 +538,12 @@ struct __large_struct { unsigned long buf[100]; };
20057 * On error, the variable @x is set to zero.
20058 */
20059
20060+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20061+#define __get_user(x, ptr) get_user((x), (ptr))
20062+#else
20063 #define __get_user(x, ptr) \
20064 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
20065+#endif
20066
20067 /**
20068 * __put_user: - Write a simple value into user space, with less checking.
20069@@ -494,8 +565,12 @@ struct __large_struct { unsigned long buf[100]; };
20070 * Returns zero on success, or -EFAULT on error.
20071 */
20072
20073+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20074+#define __put_user(x, ptr) put_user((x), (ptr))
20075+#else
20076 #define __put_user(x, ptr) \
20077 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
20078+#endif
20079
20080 #define __get_user_unaligned __get_user
20081 #define __put_user_unaligned __put_user
20082@@ -513,7 +588,7 @@ struct __large_struct { unsigned long buf[100]; };
20083 #define get_user_ex(x, ptr) do { \
20084 unsigned long __gue_val; \
20085 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
20086- (x) = (__force __typeof__(*(ptr)))__gue_val; \
20087+ (x) = (__typeof__(*(ptr)))__gue_val; \
20088 } while (0)
20089
20090 #define put_user_try uaccess_try
20091@@ -542,18 +617,19 @@ extern void __cmpxchg_wrong_size(void)
20092 __typeof__(ptr) __uval = (uval); \
20093 __typeof__(*(ptr)) __old = (old); \
20094 __typeof__(*(ptr)) __new = (new); \
20095+ pax_open_userland(); \
20096 switch (size) { \
20097 case 1: \
20098 { \
20099 asm volatile("\t" ASM_STAC "\n" \
20100- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
20101+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
20102 "2:\t" ASM_CLAC "\n" \
20103 "\t.section .fixup, \"ax\"\n" \
20104 "3:\tmov %3, %0\n" \
20105 "\tjmp 2b\n" \
20106 "\t.previous\n" \
20107 _ASM_EXTABLE(1b, 3b) \
20108- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20109+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20110 : "i" (-EFAULT), "q" (__new), "1" (__old) \
20111 : "memory" \
20112 ); \
20113@@ -562,14 +638,14 @@ extern void __cmpxchg_wrong_size(void)
20114 case 2: \
20115 { \
20116 asm volatile("\t" ASM_STAC "\n" \
20117- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
20118+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
20119 "2:\t" ASM_CLAC "\n" \
20120 "\t.section .fixup, \"ax\"\n" \
20121 "3:\tmov %3, %0\n" \
20122 "\tjmp 2b\n" \
20123 "\t.previous\n" \
20124 _ASM_EXTABLE(1b, 3b) \
20125- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20126+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20127 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20128 : "memory" \
20129 ); \
20130@@ -578,14 +654,14 @@ extern void __cmpxchg_wrong_size(void)
20131 case 4: \
20132 { \
20133 asm volatile("\t" ASM_STAC "\n" \
20134- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
20135+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
20136 "2:\t" ASM_CLAC "\n" \
20137 "\t.section .fixup, \"ax\"\n" \
20138 "3:\tmov %3, %0\n" \
20139 "\tjmp 2b\n" \
20140 "\t.previous\n" \
20141 _ASM_EXTABLE(1b, 3b) \
20142- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20143+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20144 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20145 : "memory" \
20146 ); \
20147@@ -597,14 +673,14 @@ extern void __cmpxchg_wrong_size(void)
20148 __cmpxchg_wrong_size(); \
20149 \
20150 asm volatile("\t" ASM_STAC "\n" \
20151- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
20152+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
20153 "2:\t" ASM_CLAC "\n" \
20154 "\t.section .fixup, \"ax\"\n" \
20155 "3:\tmov %3, %0\n" \
20156 "\tjmp 2b\n" \
20157 "\t.previous\n" \
20158 _ASM_EXTABLE(1b, 3b) \
20159- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20160+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20161 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20162 : "memory" \
20163 ); \
20164@@ -613,6 +689,7 @@ extern void __cmpxchg_wrong_size(void)
20165 default: \
20166 __cmpxchg_wrong_size(); \
20167 } \
20168+ pax_close_userland(); \
20169 *__uval = __old; \
20170 __ret; \
20171 })
20172@@ -636,17 +713,6 @@ extern struct movsl_mask {
20173
20174 #define ARCH_HAS_NOCACHE_UACCESS 1
20175
20176-#ifdef CONFIG_X86_32
20177-# include <asm/uaccess_32.h>
20178-#else
20179-# include <asm/uaccess_64.h>
20180-#endif
20181-
20182-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
20183- unsigned n);
20184-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
20185- unsigned n);
20186-
20187 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
20188 # define copy_user_diag __compiletime_error
20189 #else
20190@@ -656,7 +722,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
20191 extern void copy_user_diag("copy_from_user() buffer size is too small")
20192 copy_from_user_overflow(void);
20193 extern void copy_user_diag("copy_to_user() buffer size is too small")
20194-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
20195+copy_to_user_overflow(void);
20196
20197 #undef copy_user_diag
20198
20199@@ -669,7 +735,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
20200
20201 extern void
20202 __compiletime_warning("copy_to_user() buffer size is not provably correct")
20203-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
20204+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
20205 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
20206
20207 #else
20208@@ -684,10 +750,16 @@ __copy_from_user_overflow(int size, unsigned long count)
20209
20210 #endif
20211
20212+#ifdef CONFIG_X86_32
20213+# include <asm/uaccess_32.h>
20214+#else
20215+# include <asm/uaccess_64.h>
20216+#endif
20217+
20218 static inline unsigned long __must_check
20219 copy_from_user(void *to, const void __user *from, unsigned long n)
20220 {
20221- int sz = __compiletime_object_size(to);
20222+ size_t sz = __compiletime_object_size(to);
20223
20224 might_fault();
20225
20226@@ -709,12 +781,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
20227 * case, and do only runtime checking for non-constant sizes.
20228 */
20229
20230- if (likely(sz < 0 || sz >= n))
20231- n = _copy_from_user(to, from, n);
20232- else if(__builtin_constant_p(n))
20233- copy_from_user_overflow();
20234- else
20235- __copy_from_user_overflow(sz, n);
20236+ if (likely(sz != (size_t)-1 && sz < n)) {
20237+ if(__builtin_constant_p(n))
20238+ copy_from_user_overflow();
20239+ else
20240+ __copy_from_user_overflow(sz, n);
20241+ } else if (access_ok(VERIFY_READ, from, n))
20242+ n = __copy_from_user(to, from, n);
20243+ else if ((long)n > 0)
20244+ memset(to, 0, n);
20245
20246 return n;
20247 }
20248@@ -722,17 +797,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
20249 static inline unsigned long __must_check
20250 copy_to_user(void __user *to, const void *from, unsigned long n)
20251 {
20252- int sz = __compiletime_object_size(from);
20253+ size_t sz = __compiletime_object_size(from);
20254
20255 might_fault();
20256
20257 /* See the comment in copy_from_user() above. */
20258- if (likely(sz < 0 || sz >= n))
20259- n = _copy_to_user(to, from, n);
20260- else if(__builtin_constant_p(n))
20261- copy_to_user_overflow();
20262- else
20263- __copy_to_user_overflow(sz, n);
20264+ if (likely(sz != (size_t)-1 && sz < n)) {
20265+ if(__builtin_constant_p(n))
20266+ copy_to_user_overflow();
20267+ else
20268+ __copy_to_user_overflow(sz, n);
20269+ } else if (access_ok(VERIFY_WRITE, to, n))
20270+ n = __copy_to_user(to, from, n);
20271
20272 return n;
20273 }
20274diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
20275index 3c03a5d..1071638 100644
20276--- a/arch/x86/include/asm/uaccess_32.h
20277+++ b/arch/x86/include/asm/uaccess_32.h
20278@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
20279 static __always_inline unsigned long __must_check
20280 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
20281 {
20282+ if ((long)n < 0)
20283+ return n;
20284+
20285+ check_object_size(from, n, true);
20286+
20287 if (__builtin_constant_p(n)) {
20288 unsigned long ret;
20289
20290@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
20291 __copy_to_user(void __user *to, const void *from, unsigned long n)
20292 {
20293 might_fault();
20294+
20295 return __copy_to_user_inatomic(to, from, n);
20296 }
20297
20298 static __always_inline unsigned long
20299 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
20300 {
20301+ if ((long)n < 0)
20302+ return n;
20303+
20304 /* Avoid zeroing the tail if the copy fails..
20305 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
20306 * but as the zeroing behaviour is only significant when n is not
20307@@ -137,6 +146,12 @@ static __always_inline unsigned long
20308 __copy_from_user(void *to, const void __user *from, unsigned long n)
20309 {
20310 might_fault();
20311+
20312+ if ((long)n < 0)
20313+ return n;
20314+
20315+ check_object_size(to, n, false);
20316+
20317 if (__builtin_constant_p(n)) {
20318 unsigned long ret;
20319
20320@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
20321 const void __user *from, unsigned long n)
20322 {
20323 might_fault();
20324+
20325+ if ((long)n < 0)
20326+ return n;
20327+
20328 if (__builtin_constant_p(n)) {
20329 unsigned long ret;
20330
20331@@ -181,7 +200,10 @@ static __always_inline unsigned long
20332 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
20333 unsigned long n)
20334 {
20335- return __copy_from_user_ll_nocache_nozero(to, from, n);
20336+ if ((long)n < 0)
20337+ return n;
20338+
20339+ return __copy_from_user_ll_nocache_nozero(to, from, n);
20340 }
20341
20342 #endif /* _ASM_X86_UACCESS_32_H */
20343diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
20344index 12a26b9..206c200 100644
20345--- a/arch/x86/include/asm/uaccess_64.h
20346+++ b/arch/x86/include/asm/uaccess_64.h
20347@@ -10,6 +10,9 @@
20348 #include <asm/alternative.h>
20349 #include <asm/cpufeature.h>
20350 #include <asm/page.h>
20351+#include <asm/pgtable.h>
20352+
20353+#define set_fs(x) (current_thread_info()->addr_limit = (x))
20354
20355 /*
20356 * Copy To/From Userspace
20357@@ -17,14 +20,14 @@
20358
20359 /* Handles exceptions in both to and from, but doesn't do access_ok */
20360 __must_check unsigned long
20361-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
20362+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
20363 __must_check unsigned long
20364-copy_user_generic_string(void *to, const void *from, unsigned len);
20365+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
20366 __must_check unsigned long
20367-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
20368+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
20369
20370 static __always_inline __must_check unsigned long
20371-copy_user_generic(void *to, const void *from, unsigned len)
20372+copy_user_generic(void *to, const void *from, unsigned long len)
20373 {
20374 unsigned ret;
20375
20376@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
20377 }
20378
20379 __must_check unsigned long
20380-copy_in_user(void __user *to, const void __user *from, unsigned len);
20381+copy_in_user(void __user *to, const void __user *from, unsigned long len);
20382
20383 static __always_inline __must_check
20384-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
20385+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
20386 {
20387- int ret = 0;
20388+ size_t sz = __compiletime_object_size(dst);
20389+ unsigned ret = 0;
20390+
20391+ if (size > INT_MAX)
20392+ return size;
20393+
20394+ check_object_size(dst, size, false);
20395+
20396+#ifdef CONFIG_PAX_MEMORY_UDEREF
20397+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20398+ return size;
20399+#endif
20400+
20401+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20402+ if(__builtin_constant_p(size))
20403+ copy_from_user_overflow();
20404+ else
20405+ __copy_from_user_overflow(sz, size);
20406+ return size;
20407+ }
20408
20409 if (!__builtin_constant_p(size))
20410- return copy_user_generic(dst, (__force void *)src, size);
20411+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20412 switch (size) {
20413- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
20414+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
20415 ret, "b", "b", "=q", 1);
20416 return ret;
20417- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
20418+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
20419 ret, "w", "w", "=r", 2);
20420 return ret;
20421- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
20422+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
20423 ret, "l", "k", "=r", 4);
20424 return ret;
20425- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
20426+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20427 ret, "q", "", "=r", 8);
20428 return ret;
20429 case 10:
20430- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20431+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20432 ret, "q", "", "=r", 10);
20433 if (unlikely(ret))
20434 return ret;
20435 __get_user_asm(*(u16 *)(8 + (char *)dst),
20436- (u16 __user *)(8 + (char __user *)src),
20437+ (const u16 __user *)(8 + (const char __user *)src),
20438 ret, "w", "w", "=r", 2);
20439 return ret;
20440 case 16:
20441- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20442+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20443 ret, "q", "", "=r", 16);
20444 if (unlikely(ret))
20445 return ret;
20446 __get_user_asm(*(u64 *)(8 + (char *)dst),
20447- (u64 __user *)(8 + (char __user *)src),
20448+ (const u64 __user *)(8 + (const char __user *)src),
20449 ret, "q", "", "=r", 8);
20450 return ret;
20451 default:
20452- return copy_user_generic(dst, (__force void *)src, size);
20453+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20454 }
20455 }
20456
20457 static __always_inline __must_check
20458-int __copy_from_user(void *dst, const void __user *src, unsigned size)
20459+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
20460 {
20461 might_fault();
20462 return __copy_from_user_nocheck(dst, src, size);
20463 }
20464
20465 static __always_inline __must_check
20466-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
20467+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
20468 {
20469- int ret = 0;
20470+ size_t sz = __compiletime_object_size(src);
20471+ unsigned ret = 0;
20472+
20473+ if (size > INT_MAX)
20474+ return size;
20475+
20476+ check_object_size(src, size, true);
20477+
20478+#ifdef CONFIG_PAX_MEMORY_UDEREF
20479+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20480+ return size;
20481+#endif
20482+
20483+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20484+ if(__builtin_constant_p(size))
20485+ copy_to_user_overflow();
20486+ else
20487+ __copy_to_user_overflow(sz, size);
20488+ return size;
20489+ }
20490
20491 if (!__builtin_constant_p(size))
20492- return copy_user_generic((__force void *)dst, src, size);
20493+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20494 switch (size) {
20495- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
20496+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
20497 ret, "b", "b", "iq", 1);
20498 return ret;
20499- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
20500+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
20501 ret, "w", "w", "ir", 2);
20502 return ret;
20503- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
20504+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
20505 ret, "l", "k", "ir", 4);
20506 return ret;
20507- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
20508+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20509 ret, "q", "", "er", 8);
20510 return ret;
20511 case 10:
20512- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20513+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20514 ret, "q", "", "er", 10);
20515 if (unlikely(ret))
20516 return ret;
20517 asm("":::"memory");
20518- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
20519+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
20520 ret, "w", "w", "ir", 2);
20521 return ret;
20522 case 16:
20523- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20524+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20525 ret, "q", "", "er", 16);
20526 if (unlikely(ret))
20527 return ret;
20528 asm("":::"memory");
20529- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
20530+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
20531 ret, "q", "", "er", 8);
20532 return ret;
20533 default:
20534- return copy_user_generic((__force void *)dst, src, size);
20535+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20536 }
20537 }
20538
20539 static __always_inline __must_check
20540-int __copy_to_user(void __user *dst, const void *src, unsigned size)
20541+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
20542 {
20543 might_fault();
20544 return __copy_to_user_nocheck(dst, src, size);
20545 }
20546
20547 static __always_inline __must_check
20548-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20549+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20550 {
20551- int ret = 0;
20552+ unsigned ret = 0;
20553
20554 might_fault();
20555+
20556+ if (size > INT_MAX)
20557+ return size;
20558+
20559+#ifdef CONFIG_PAX_MEMORY_UDEREF
20560+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20561+ return size;
20562+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20563+ return size;
20564+#endif
20565+
20566 if (!__builtin_constant_p(size))
20567- return copy_user_generic((__force void *)dst,
20568- (__force void *)src, size);
20569+ return copy_user_generic((__force_kernel void *)____m(dst),
20570+ (__force_kernel const void *)____m(src), size);
20571 switch (size) {
20572 case 1: {
20573 u8 tmp;
20574- __get_user_asm(tmp, (u8 __user *)src,
20575+ __get_user_asm(tmp, (const u8 __user *)src,
20576 ret, "b", "b", "=q", 1);
20577 if (likely(!ret))
20578 __put_user_asm(tmp, (u8 __user *)dst,
20579@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20580 }
20581 case 2: {
20582 u16 tmp;
20583- __get_user_asm(tmp, (u16 __user *)src,
20584+ __get_user_asm(tmp, (const u16 __user *)src,
20585 ret, "w", "w", "=r", 2);
20586 if (likely(!ret))
20587 __put_user_asm(tmp, (u16 __user *)dst,
20588@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20589
20590 case 4: {
20591 u32 tmp;
20592- __get_user_asm(tmp, (u32 __user *)src,
20593+ __get_user_asm(tmp, (const u32 __user *)src,
20594 ret, "l", "k", "=r", 4);
20595 if (likely(!ret))
20596 __put_user_asm(tmp, (u32 __user *)dst,
20597@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20598 }
20599 case 8: {
20600 u64 tmp;
20601- __get_user_asm(tmp, (u64 __user *)src,
20602+ __get_user_asm(tmp, (const u64 __user *)src,
20603 ret, "q", "", "=r", 8);
20604 if (likely(!ret))
20605 __put_user_asm(tmp, (u64 __user *)dst,
20606@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20607 return ret;
20608 }
20609 default:
20610- return copy_user_generic((__force void *)dst,
20611- (__force void *)src, size);
20612+ return copy_user_generic((__force_kernel void *)____m(dst),
20613+ (__force_kernel const void *)____m(src), size);
20614 }
20615 }
20616
20617-static __must_check __always_inline int
20618-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
20619+static __must_check __always_inline unsigned long
20620+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
20621 {
20622 return __copy_from_user_nocheck(dst, src, size);
20623 }
20624
20625-static __must_check __always_inline int
20626-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
20627+static __must_check __always_inline unsigned long
20628+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
20629 {
20630 return __copy_to_user_nocheck(dst, src, size);
20631 }
20632
20633-extern long __copy_user_nocache(void *dst, const void __user *src,
20634- unsigned size, int zerorest);
20635+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
20636+ unsigned long size, int zerorest);
20637
20638-static inline int
20639-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
20640+static inline unsigned long
20641+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
20642 {
20643 might_fault();
20644+
20645+ if (size > INT_MAX)
20646+ return size;
20647+
20648+#ifdef CONFIG_PAX_MEMORY_UDEREF
20649+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20650+ return size;
20651+#endif
20652+
20653 return __copy_user_nocache(dst, src, size, 1);
20654 }
20655
20656-static inline int
20657+static inline unsigned long
20658 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
20659- unsigned size)
20660+ unsigned long size)
20661 {
20662+ if (size > INT_MAX)
20663+ return size;
20664+
20665+#ifdef CONFIG_PAX_MEMORY_UDEREF
20666+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20667+ return size;
20668+#endif
20669+
20670 return __copy_user_nocache(dst, src, size, 0);
20671 }
20672
20673 unsigned long
20674-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
20675+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
20676
20677 #endif /* _ASM_X86_UACCESS_64_H */
20678diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
20679index 5b238981..77fdd78 100644
20680--- a/arch/x86/include/asm/word-at-a-time.h
20681+++ b/arch/x86/include/asm/word-at-a-time.h
20682@@ -11,7 +11,7 @@
20683 * and shift, for example.
20684 */
20685 struct word_at_a_time {
20686- const unsigned long one_bits, high_bits;
20687+ unsigned long one_bits, high_bits;
20688 };
20689
20690 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
20691diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
20692index e45e4da..44e8572 100644
20693--- a/arch/x86/include/asm/x86_init.h
20694+++ b/arch/x86/include/asm/x86_init.h
20695@@ -129,7 +129,7 @@ struct x86_init_ops {
20696 struct x86_init_timers timers;
20697 struct x86_init_iommu iommu;
20698 struct x86_init_pci pci;
20699-};
20700+} __no_const;
20701
20702 /**
20703 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
20704@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
20705 void (*setup_percpu_clockev)(void);
20706 void (*early_percpu_clock_init)(void);
20707 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
20708-};
20709+} __no_const;
20710
20711 struct timespec;
20712
20713@@ -168,7 +168,7 @@ struct x86_platform_ops {
20714 void (*save_sched_clock_state)(void);
20715 void (*restore_sched_clock_state)(void);
20716 void (*apic_post_init)(void);
20717-};
20718+} __no_const;
20719
20720 struct pci_dev;
20721 struct msi_msg;
20722@@ -185,7 +185,7 @@ struct x86_msi_ops {
20723 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
20724 u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag);
20725 u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag);
20726-};
20727+} __no_const;
20728
20729 struct IO_APIC_route_entry;
20730 struct io_apic_irq_attr;
20731@@ -206,7 +206,7 @@ struct x86_io_apic_ops {
20732 unsigned int destination, int vector,
20733 struct io_apic_irq_attr *attr);
20734 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
20735-};
20736+} __no_const;
20737
20738 extern struct x86_init_ops x86_init;
20739 extern struct x86_cpuinit_ops x86_cpuinit;
20740diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
20741index c949923..c22bfa4 100644
20742--- a/arch/x86/include/asm/xen/page.h
20743+++ b/arch/x86/include/asm/xen/page.h
20744@@ -63,7 +63,7 @@ extern int m2p_remove_override(struct page *page,
20745 extern struct page *m2p_find_override(unsigned long mfn);
20746 extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
20747
20748-static inline unsigned long pfn_to_mfn(unsigned long pfn)
20749+static inline unsigned long __intentional_overflow(-1) pfn_to_mfn(unsigned long pfn)
20750 {
20751 unsigned long mfn;
20752
20753diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
20754index d949ef2..479b9d1 100644
20755--- a/arch/x86/include/asm/xsave.h
20756+++ b/arch/x86/include/asm/xsave.h
20757@@ -82,8 +82,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20758 if (unlikely(err))
20759 return -EFAULT;
20760
20761+ pax_open_userland();
20762 __asm__ __volatile__(ASM_STAC "\n"
20763- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
20764+ "1:"
20765+ __copyuser_seg
20766+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
20767 "2: " ASM_CLAC "\n"
20768 ".section .fixup,\"ax\"\n"
20769 "3: movl $-1,%[err]\n"
20770@@ -93,18 +96,22 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20771 : [err] "=r" (err)
20772 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
20773 : "memory");
20774+ pax_close_userland();
20775 return err;
20776 }
20777
20778 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20779 {
20780 int err;
20781- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
20782+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
20783 u32 lmask = mask;
20784 u32 hmask = mask >> 32;
20785
20786+ pax_open_userland();
20787 __asm__ __volatile__(ASM_STAC "\n"
20788- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
20789+ "1:"
20790+ __copyuser_seg
20791+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
20792 "2: " ASM_CLAC "\n"
20793 ".section .fixup,\"ax\"\n"
20794 "3: movl $-1,%[err]\n"
20795@@ -114,6 +121,7 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20796 : [err] "=r" (err)
20797 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
20798 : "memory"); /* memory required? */
20799+ pax_close_userland();
20800 return err;
20801 }
20802
20803diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
20804index bbae024..e1528f9 100644
20805--- a/arch/x86/include/uapi/asm/e820.h
20806+++ b/arch/x86/include/uapi/asm/e820.h
20807@@ -63,7 +63,7 @@ struct e820map {
20808 #define ISA_START_ADDRESS 0xa0000
20809 #define ISA_END_ADDRESS 0x100000
20810
20811-#define BIOS_BEGIN 0x000a0000
20812+#define BIOS_BEGIN 0x000c0000
20813 #define BIOS_END 0x00100000
20814
20815 #define BIOS_ROM_BASE 0xffe00000
20816diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
20817index 7b0a55a..ad115bf 100644
20818--- a/arch/x86/include/uapi/asm/ptrace-abi.h
20819+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
20820@@ -49,7 +49,6 @@
20821 #define EFLAGS 144
20822 #define RSP 152
20823 #define SS 160
20824-#define ARGOFFSET R11
20825 #endif /* __ASSEMBLY__ */
20826
20827 /* top of stack page */
20828diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
20829index 047f9ff..4ba5ea6 100644
20830--- a/arch/x86/kernel/Makefile
20831+++ b/arch/x86/kernel/Makefile
20832@@ -24,7 +24,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
20833 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
20834 obj-$(CONFIG_IRQ_WORK) += irq_work.o
20835 obj-y += probe_roms.o
20836-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
20837+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
20838 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
20839 obj-$(CONFIG_X86_64) += mcount_64.o
20840 obj-y += syscall_$(BITS).o vsyscall_gtod.o
20841diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
20842index 86281ff..e046fc2 100644
20843--- a/arch/x86/kernel/acpi/boot.c
20844+++ b/arch/x86/kernel/acpi/boot.c
20845@@ -1296,7 +1296,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
20846 * If your system is blacklisted here, but you find that acpi=force
20847 * works for you, please contact linux-acpi@vger.kernel.org
20848 */
20849-static struct dmi_system_id __initdata acpi_dmi_table[] = {
20850+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
20851 /*
20852 * Boxes that need ACPI disabled
20853 */
20854@@ -1371,7 +1371,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
20855 };
20856
20857 /* second table for DMI checks that should run after early-quirks */
20858-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
20859+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
20860 /*
20861 * HP laptops which use a DSDT reporting as HP/SB400/10000,
20862 * which includes some code which overrides all temperature
20863diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
20864index 3136820..e2c6577 100644
20865--- a/arch/x86/kernel/acpi/sleep.c
20866+++ b/arch/x86/kernel/acpi/sleep.c
20867@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
20868 #else /* CONFIG_64BIT */
20869 #ifdef CONFIG_SMP
20870 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
20871+
20872+ pax_open_kernel();
20873 early_gdt_descr.address =
20874 (unsigned long)get_cpu_gdt_table(smp_processor_id());
20875+ pax_close_kernel();
20876+
20877 initial_gs = per_cpu_offset(smp_processor_id());
20878 #endif
20879 initial_code = (unsigned long)wakeup_long64;
20880diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
20881index 665c6b7..eae4d56 100644
20882--- a/arch/x86/kernel/acpi/wakeup_32.S
20883+++ b/arch/x86/kernel/acpi/wakeup_32.S
20884@@ -29,13 +29,11 @@ wakeup_pmode_return:
20885 # and restore the stack ... but you need gdt for this to work
20886 movl saved_context_esp, %esp
20887
20888- movl %cs:saved_magic, %eax
20889- cmpl $0x12345678, %eax
20890+ cmpl $0x12345678, saved_magic
20891 jne bogus_magic
20892
20893 # jump to place where we left off
20894- movl saved_eip, %eax
20895- jmp *%eax
20896+ jmp *(saved_eip)
20897
20898 bogus_magic:
20899 jmp bogus_magic
20900diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
20901index 703130f..27a155d 100644
20902--- a/arch/x86/kernel/alternative.c
20903+++ b/arch/x86/kernel/alternative.c
20904@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20905 */
20906 for (a = start; a < end; a++) {
20907 instr = (u8 *)&a->instr_offset + a->instr_offset;
20908+
20909+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20910+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20911+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20912+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20913+#endif
20914+
20915 replacement = (u8 *)&a->repl_offset + a->repl_offset;
20916 BUG_ON(a->replacementlen > a->instrlen);
20917 BUG_ON(a->instrlen > sizeof(insnbuf));
20918@@ -284,6 +291,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20919 add_nops(insnbuf + a->replacementlen,
20920 a->instrlen - a->replacementlen);
20921
20922+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20923+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20924+ instr = ktva_ktla(instr);
20925+#endif
20926+
20927 text_poke_early(instr, insnbuf, a->instrlen);
20928 }
20929 }
20930@@ -299,10 +311,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
20931 for (poff = start; poff < end; poff++) {
20932 u8 *ptr = (u8 *)poff + *poff;
20933
20934+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20935+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20936+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20937+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20938+#endif
20939+
20940 if (!*poff || ptr < text || ptr >= text_end)
20941 continue;
20942 /* turn DS segment override prefix into lock prefix */
20943- if (*ptr == 0x3e)
20944+ if (*ktla_ktva(ptr) == 0x3e)
20945 text_poke(ptr, ((unsigned char []){0xf0}), 1);
20946 }
20947 mutex_unlock(&text_mutex);
20948@@ -317,10 +335,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
20949 for (poff = start; poff < end; poff++) {
20950 u8 *ptr = (u8 *)poff + *poff;
20951
20952+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20953+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20954+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20955+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20956+#endif
20957+
20958 if (!*poff || ptr < text || ptr >= text_end)
20959 continue;
20960 /* turn lock prefix into DS segment override prefix */
20961- if (*ptr == 0xf0)
20962+ if (*ktla_ktva(ptr) == 0xf0)
20963 text_poke(ptr, ((unsigned char []){0x3E}), 1);
20964 }
20965 mutex_unlock(&text_mutex);
20966@@ -457,7 +481,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
20967
20968 BUG_ON(p->len > MAX_PATCH_LEN);
20969 /* prep the buffer with the original instructions */
20970- memcpy(insnbuf, p->instr, p->len);
20971+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
20972 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
20973 (unsigned long)p->instr, p->len);
20974
20975@@ -504,7 +528,7 @@ void __init alternative_instructions(void)
20976 if (!uniproc_patched || num_possible_cpus() == 1)
20977 free_init_pages("SMP alternatives",
20978 (unsigned long)__smp_locks,
20979- (unsigned long)__smp_locks_end);
20980+ PAGE_ALIGN((unsigned long)__smp_locks_end));
20981 #endif
20982
20983 apply_paravirt(__parainstructions, __parainstructions_end);
20984@@ -524,13 +548,17 @@ void __init alternative_instructions(void)
20985 * instructions. And on the local CPU you need to be protected again NMI or MCE
20986 * handlers seeing an inconsistent instruction while you patch.
20987 */
20988-void *__init_or_module text_poke_early(void *addr, const void *opcode,
20989+void *__kprobes text_poke_early(void *addr, const void *opcode,
20990 size_t len)
20991 {
20992 unsigned long flags;
20993 local_irq_save(flags);
20994- memcpy(addr, opcode, len);
20995+
20996+ pax_open_kernel();
20997+ memcpy(ktla_ktva(addr), opcode, len);
20998 sync_core();
20999+ pax_close_kernel();
21000+
21001 local_irq_restore(flags);
21002 /* Could also do a CLFLUSH here to speed up CPU recovery; but
21003 that causes hangs on some VIA CPUs. */
21004@@ -552,36 +580,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
21005 */
21006 void *text_poke(void *addr, const void *opcode, size_t len)
21007 {
21008- unsigned long flags;
21009- char *vaddr;
21010+ unsigned char *vaddr = ktla_ktva(addr);
21011 struct page *pages[2];
21012- int i;
21013+ size_t i;
21014
21015 if (!core_kernel_text((unsigned long)addr)) {
21016- pages[0] = vmalloc_to_page(addr);
21017- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
21018+ pages[0] = vmalloc_to_page(vaddr);
21019+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
21020 } else {
21021- pages[0] = virt_to_page(addr);
21022+ pages[0] = virt_to_page(vaddr);
21023 WARN_ON(!PageReserved(pages[0]));
21024- pages[1] = virt_to_page(addr + PAGE_SIZE);
21025+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
21026 }
21027 BUG_ON(!pages[0]);
21028- local_irq_save(flags);
21029- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
21030- if (pages[1])
21031- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
21032- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
21033- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
21034- clear_fixmap(FIX_TEXT_POKE0);
21035- if (pages[1])
21036- clear_fixmap(FIX_TEXT_POKE1);
21037- local_flush_tlb();
21038- sync_core();
21039- /* Could also do a CLFLUSH here to speed up CPU recovery; but
21040- that causes hangs on some VIA CPUs. */
21041+ text_poke_early(addr, opcode, len);
21042 for (i = 0; i < len; i++)
21043- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
21044- local_irq_restore(flags);
21045+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
21046 return addr;
21047 }
21048
21049@@ -601,7 +615,7 @@ int poke_int3_handler(struct pt_regs *regs)
21050 if (likely(!bp_patching_in_progress))
21051 return 0;
21052
21053- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
21054+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
21055 return 0;
21056
21057 /* set up the specified breakpoint handler */
21058@@ -635,7 +649,7 @@ int poke_int3_handler(struct pt_regs *regs)
21059 */
21060 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
21061 {
21062- unsigned char int3 = 0xcc;
21063+ const unsigned char int3 = 0xcc;
21064
21065 bp_int3_handler = handler;
21066 bp_int3_addr = (u8 *)addr + sizeof(int3);
21067diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
21068index ad28db7..c538b2c 100644
21069--- a/arch/x86/kernel/apic/apic.c
21070+++ b/arch/x86/kernel/apic/apic.c
21071@@ -201,7 +201,7 @@ int first_system_vector = 0xfe;
21072 /*
21073 * Debug level, exported for io_apic.c
21074 */
21075-unsigned int apic_verbosity;
21076+int apic_verbosity;
21077
21078 int pic_mode;
21079
21080@@ -2000,7 +2000,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
21081 apic_write(APIC_ESR, 0);
21082 v = apic_read(APIC_ESR);
21083 ack_APIC_irq();
21084- atomic_inc(&irq_err_count);
21085+ atomic_inc_unchecked(&irq_err_count);
21086
21087 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
21088 smp_processor_id(), v);
21089diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
21090index 7c1b294..e71d27f 100644
21091--- a/arch/x86/kernel/apic/apic_flat_64.c
21092+++ b/arch/x86/kernel/apic/apic_flat_64.c
21093@@ -154,7 +154,7 @@ static int flat_probe(void)
21094 return 1;
21095 }
21096
21097-static struct apic apic_flat = {
21098+static struct apic apic_flat __read_only = {
21099 .name = "flat",
21100 .probe = flat_probe,
21101 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
21102@@ -268,7 +268,7 @@ static int physflat_probe(void)
21103 return 0;
21104 }
21105
21106-static struct apic apic_physflat = {
21107+static struct apic apic_physflat __read_only = {
21108
21109 .name = "physical flat",
21110 .probe = physflat_probe,
21111diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
21112index 8c7c982..a225910 100644
21113--- a/arch/x86/kernel/apic/apic_noop.c
21114+++ b/arch/x86/kernel/apic/apic_noop.c
21115@@ -118,7 +118,7 @@ static void noop_apic_write(u32 reg, u32 v)
21116 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
21117 }
21118
21119-struct apic apic_noop = {
21120+struct apic apic_noop __read_only = {
21121 .name = "noop",
21122 .probe = noop_probe,
21123 .acpi_madt_oem_check = NULL,
21124diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
21125index e4840aa..e7d9dac 100644
21126--- a/arch/x86/kernel/apic/bigsmp_32.c
21127+++ b/arch/x86/kernel/apic/bigsmp_32.c
21128@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
21129 return dmi_bigsmp;
21130 }
21131
21132-static struct apic apic_bigsmp = {
21133+static struct apic apic_bigsmp __read_only = {
21134
21135 .name = "bigsmp",
21136 .probe = probe_bigsmp,
21137diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
21138index 81e08ef..abc77e5 100644
21139--- a/arch/x86/kernel/apic/io_apic.c
21140+++ b/arch/x86/kernel/apic/io_apic.c
21141@@ -1042,7 +1042,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
21142 }
21143 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
21144
21145-void lock_vector_lock(void)
21146+void lock_vector_lock(void) __acquires(vector_lock)
21147 {
21148 /* Used to the online set of cpus does not change
21149 * during assign_irq_vector.
21150@@ -1050,7 +1050,7 @@ void lock_vector_lock(void)
21151 raw_spin_lock(&vector_lock);
21152 }
21153
21154-void unlock_vector_lock(void)
21155+void unlock_vector_lock(void) __releases(vector_lock)
21156 {
21157 raw_spin_unlock(&vector_lock);
21158 }
21159@@ -2349,7 +2349,7 @@ static void ack_apic_edge(struct irq_data *data)
21160 ack_APIC_irq();
21161 }
21162
21163-atomic_t irq_mis_count;
21164+atomic_unchecked_t irq_mis_count;
21165
21166 #ifdef CONFIG_GENERIC_PENDING_IRQ
21167 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
21168@@ -2490,7 +2490,7 @@ static void ack_apic_level(struct irq_data *data)
21169 * at the cpu.
21170 */
21171 if (!(v & (1 << (i & 0x1f)))) {
21172- atomic_inc(&irq_mis_count);
21173+ atomic_inc_unchecked(&irq_mis_count);
21174
21175 eoi_ioapic_irq(irq, cfg);
21176 }
21177diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
21178index cceb352..a635fd8 100644
21179--- a/arch/x86/kernel/apic/probe_32.c
21180+++ b/arch/x86/kernel/apic/probe_32.c
21181@@ -72,7 +72,7 @@ static int probe_default(void)
21182 return 1;
21183 }
21184
21185-static struct apic apic_default = {
21186+static struct apic apic_default __read_only = {
21187
21188 .name = "default",
21189 .probe = probe_default,
21190diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
21191index e66766b..1c008ba 100644
21192--- a/arch/x86/kernel/apic/x2apic_cluster.c
21193+++ b/arch/x86/kernel/apic/x2apic_cluster.c
21194@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
21195 return notifier_from_errno(err);
21196 }
21197
21198-static struct notifier_block __refdata x2apic_cpu_notifier = {
21199+static struct notifier_block x2apic_cpu_notifier = {
21200 .notifier_call = update_clusterinfo,
21201 };
21202
21203@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
21204 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
21205 }
21206
21207-static struct apic apic_x2apic_cluster = {
21208+static struct apic apic_x2apic_cluster __read_only = {
21209
21210 .name = "cluster x2apic",
21211 .probe = x2apic_cluster_probe,
21212diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
21213index 6d600eb..0300c00 100644
21214--- a/arch/x86/kernel/apic/x2apic_phys.c
21215+++ b/arch/x86/kernel/apic/x2apic_phys.c
21216@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
21217 return apic == &apic_x2apic_phys;
21218 }
21219
21220-static struct apic apic_x2apic_phys = {
21221+static struct apic apic_x2apic_phys __read_only = {
21222
21223 .name = "physical x2apic",
21224 .probe = x2apic_phys_probe,
21225diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
21226index 293b41d..4df25fd 100644
21227--- a/arch/x86/kernel/apic/x2apic_uv_x.c
21228+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
21229@@ -350,7 +350,7 @@ static int uv_probe(void)
21230 return apic == &apic_x2apic_uv_x;
21231 }
21232
21233-static struct apic __refdata apic_x2apic_uv_x = {
21234+static struct apic apic_x2apic_uv_x __read_only = {
21235
21236 .name = "UV large system",
21237 .probe = uv_probe,
21238diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
21239index 5848744..56cb598 100644
21240--- a/arch/x86/kernel/apm_32.c
21241+++ b/arch/x86/kernel/apm_32.c
21242@@ -433,7 +433,7 @@ static DEFINE_MUTEX(apm_mutex);
21243 * This is for buggy BIOS's that refer to (real mode) segment 0x40
21244 * even though they are called in protected mode.
21245 */
21246-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
21247+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
21248 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
21249
21250 static const char driver_version[] = "1.16ac"; /* no spaces */
21251@@ -611,7 +611,10 @@ static long __apm_bios_call(void *_call)
21252 BUG_ON(cpu != 0);
21253 gdt = get_cpu_gdt_table(cpu);
21254 save_desc_40 = gdt[0x40 / 8];
21255+
21256+ pax_open_kernel();
21257 gdt[0x40 / 8] = bad_bios_desc;
21258+ pax_close_kernel();
21259
21260 apm_irq_save(flags);
21261 APM_DO_SAVE_SEGS;
21262@@ -620,7 +623,11 @@ static long __apm_bios_call(void *_call)
21263 &call->esi);
21264 APM_DO_RESTORE_SEGS;
21265 apm_irq_restore(flags);
21266+
21267+ pax_open_kernel();
21268 gdt[0x40 / 8] = save_desc_40;
21269+ pax_close_kernel();
21270+
21271 put_cpu();
21272
21273 return call->eax & 0xff;
21274@@ -687,7 +694,10 @@ static long __apm_bios_call_simple(void *_call)
21275 BUG_ON(cpu != 0);
21276 gdt = get_cpu_gdt_table(cpu);
21277 save_desc_40 = gdt[0x40 / 8];
21278+
21279+ pax_open_kernel();
21280 gdt[0x40 / 8] = bad_bios_desc;
21281+ pax_close_kernel();
21282
21283 apm_irq_save(flags);
21284 APM_DO_SAVE_SEGS;
21285@@ -695,7 +705,11 @@ static long __apm_bios_call_simple(void *_call)
21286 &call->eax);
21287 APM_DO_RESTORE_SEGS;
21288 apm_irq_restore(flags);
21289+
21290+ pax_open_kernel();
21291 gdt[0x40 / 8] = save_desc_40;
21292+ pax_close_kernel();
21293+
21294 put_cpu();
21295 return error;
21296 }
21297@@ -2350,12 +2364,15 @@ static int __init apm_init(void)
21298 * code to that CPU.
21299 */
21300 gdt = get_cpu_gdt_table(0);
21301+
21302+ pax_open_kernel();
21303 set_desc_base(&gdt[APM_CS >> 3],
21304 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
21305 set_desc_base(&gdt[APM_CS_16 >> 3],
21306 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
21307 set_desc_base(&gdt[APM_DS >> 3],
21308 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
21309+ pax_close_kernel();
21310
21311 proc_create("apm", 0, NULL, &apm_file_ops);
21312
21313diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
21314index 9f6b934..cf5ffb3 100644
21315--- a/arch/x86/kernel/asm-offsets.c
21316+++ b/arch/x86/kernel/asm-offsets.c
21317@@ -32,6 +32,8 @@ void common(void) {
21318 OFFSET(TI_flags, thread_info, flags);
21319 OFFSET(TI_status, thread_info, status);
21320 OFFSET(TI_addr_limit, thread_info, addr_limit);
21321+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
21322+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
21323
21324 BLANK();
21325 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
21326@@ -52,8 +54,26 @@ void common(void) {
21327 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
21328 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
21329 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
21330+
21331+#ifdef CONFIG_PAX_KERNEXEC
21332+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
21333 #endif
21334
21335+#ifdef CONFIG_PAX_MEMORY_UDEREF
21336+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
21337+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
21338+#ifdef CONFIG_X86_64
21339+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
21340+#endif
21341+#endif
21342+
21343+#endif
21344+
21345+ BLANK();
21346+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
21347+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
21348+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
21349+
21350 #ifdef CONFIG_XEN
21351 BLANK();
21352 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
21353diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
21354index e7c798b..2b2019b 100644
21355--- a/arch/x86/kernel/asm-offsets_64.c
21356+++ b/arch/x86/kernel/asm-offsets_64.c
21357@@ -77,6 +77,7 @@ int main(void)
21358 BLANK();
21359 #undef ENTRY
21360
21361+ DEFINE(TSS_size, sizeof(struct tss_struct));
21362 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
21363 BLANK();
21364
21365diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
21366index 7fd54f0..0691410 100644
21367--- a/arch/x86/kernel/cpu/Makefile
21368+++ b/arch/x86/kernel/cpu/Makefile
21369@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
21370 CFLAGS_REMOVE_perf_event.o = -pg
21371 endif
21372
21373-# Make sure load_percpu_segment has no stackprotector
21374-nostackp := $(call cc-option, -fno-stack-protector)
21375-CFLAGS_common.o := $(nostackp)
21376-
21377 obj-y := intel_cacheinfo.o scattered.o topology.o
21378 obj-y += proc.o capflags.o powerflags.o common.o
21379 obj-y += rdrand.o
21380diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
21381index ce8b8ff..d7d8851 100644
21382--- a/arch/x86/kernel/cpu/amd.c
21383+++ b/arch/x86/kernel/cpu/amd.c
21384@@ -728,7 +728,7 @@ static void init_amd(struct cpuinfo_x86 *c)
21385 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
21386 {
21387 /* AMD errata T13 (order #21922) */
21388- if ((c->x86 == 6)) {
21389+ if (c->x86 == 6) {
21390 /* Duron Rev A0 */
21391 if (c->x86_model == 3 && c->x86_mask == 0)
21392 size = 64;
21393diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
21394index ef1b93f..150db65 100644
21395--- a/arch/x86/kernel/cpu/common.c
21396+++ b/arch/x86/kernel/cpu/common.c
21397@@ -90,60 +90,6 @@ static const struct cpu_dev default_cpu = {
21398
21399 static const struct cpu_dev *this_cpu = &default_cpu;
21400
21401-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
21402-#ifdef CONFIG_X86_64
21403- /*
21404- * We need valid kernel segments for data and code in long mode too
21405- * IRET will check the segment types kkeil 2000/10/28
21406- * Also sysret mandates a special GDT layout
21407- *
21408- * TLS descriptors are currently at a different place compared to i386.
21409- * Hopefully nobody expects them at a fixed place (Wine?)
21410- */
21411- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
21412- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
21413- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
21414- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
21415- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
21416- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
21417-#else
21418- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
21419- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21420- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
21421- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
21422- /*
21423- * Segments used for calling PnP BIOS have byte granularity.
21424- * They code segments and data segments have fixed 64k limits,
21425- * the transfer segment sizes are set at run time.
21426- */
21427- /* 32-bit code */
21428- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21429- /* 16-bit code */
21430- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21431- /* 16-bit data */
21432- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
21433- /* 16-bit data */
21434- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
21435- /* 16-bit data */
21436- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
21437- /*
21438- * The APM segments have byte granularity and their bases
21439- * are set at run time. All have 64k limits.
21440- */
21441- /* 32-bit code */
21442- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21443- /* 16-bit code */
21444- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21445- /* data */
21446- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
21447-
21448- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21449- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21450- GDT_STACK_CANARY_INIT
21451-#endif
21452-} };
21453-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
21454-
21455 static int __init x86_xsave_setup(char *s)
21456 {
21457 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
21458@@ -295,6 +241,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
21459 }
21460 }
21461
21462+#ifdef CONFIG_X86_64
21463+static __init int setup_disable_pcid(char *arg)
21464+{
21465+ setup_clear_cpu_cap(X86_FEATURE_PCID);
21466+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
21467+
21468+#ifdef CONFIG_PAX_MEMORY_UDEREF
21469+ if (clone_pgd_mask != ~(pgdval_t)0UL)
21470+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21471+#endif
21472+
21473+ return 1;
21474+}
21475+__setup("nopcid", setup_disable_pcid);
21476+
21477+static void setup_pcid(struct cpuinfo_x86 *c)
21478+{
21479+ if (!cpu_has(c, X86_FEATURE_PCID)) {
21480+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
21481+
21482+#ifdef CONFIG_PAX_MEMORY_UDEREF
21483+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
21484+ pax_open_kernel();
21485+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21486+ pax_close_kernel();
21487+ printk("PAX: slow and weak UDEREF enabled\n");
21488+ } else
21489+ printk("PAX: UDEREF disabled\n");
21490+#endif
21491+
21492+ return;
21493+ }
21494+
21495+ printk("PAX: PCID detected\n");
21496+ set_in_cr4(X86_CR4_PCIDE);
21497+
21498+#ifdef CONFIG_PAX_MEMORY_UDEREF
21499+ pax_open_kernel();
21500+ clone_pgd_mask = ~(pgdval_t)0UL;
21501+ pax_close_kernel();
21502+ if (pax_user_shadow_base)
21503+ printk("PAX: weak UDEREF enabled\n");
21504+ else {
21505+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
21506+ printk("PAX: strong UDEREF enabled\n");
21507+ }
21508+#endif
21509+
21510+ if (cpu_has(c, X86_FEATURE_INVPCID))
21511+ printk("PAX: INVPCID detected\n");
21512+}
21513+#endif
21514+
21515 /*
21516 * Some CPU features depend on higher CPUID levels, which may not always
21517 * be available due to CPUID level capping or broken virtualization
21518@@ -395,7 +394,7 @@ void switch_to_new_gdt(int cpu)
21519 {
21520 struct desc_ptr gdt_descr;
21521
21522- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
21523+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
21524 gdt_descr.size = GDT_SIZE - 1;
21525 load_gdt(&gdt_descr);
21526 /* Reload the per-cpu base */
21527@@ -885,6 +884,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21528 setup_smep(c);
21529 setup_smap(c);
21530
21531+#ifdef CONFIG_X86_64
21532+ setup_pcid(c);
21533+#endif
21534+
21535 /*
21536 * The vendor-specific functions might have changed features.
21537 * Now we do "generic changes."
21538@@ -893,6 +896,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21539 /* Filter out anything that depends on CPUID levels we don't have */
21540 filter_cpuid_features(c, true);
21541
21542+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
21543+ setup_clear_cpu_cap(X86_FEATURE_SEP);
21544+#endif
21545+
21546 /* If the model name is still unset, do table lookup. */
21547 if (!c->x86_model_id[0]) {
21548 const char *p;
21549@@ -973,7 +980,7 @@ static void syscall32_cpu_init(void)
21550 void enable_sep_cpu(void)
21551 {
21552 int cpu = get_cpu();
21553- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21554+ struct tss_struct *tss = init_tss + cpu;
21555
21556 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21557 put_cpu();
21558@@ -1113,14 +1120,16 @@ static __init int setup_disablecpuid(char *arg)
21559 }
21560 __setup("clearcpuid=", setup_disablecpuid);
21561
21562+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
21563+EXPORT_PER_CPU_SYMBOL(current_tinfo);
21564+
21565 DEFINE_PER_CPU(unsigned long, kernel_stack) =
21566- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
21567+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
21568 EXPORT_PER_CPU_SYMBOL(kernel_stack);
21569
21570 #ifdef CONFIG_X86_64
21571-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21572-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
21573- (unsigned long) debug_idt_table };
21574+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21575+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
21576
21577 DEFINE_PER_CPU_FIRST(union irq_stack_union,
21578 irq_stack_union) __aligned(PAGE_SIZE) __visible;
21579@@ -1283,7 +1292,7 @@ void cpu_init(void)
21580 load_ucode_ap();
21581
21582 cpu = stack_smp_processor_id();
21583- t = &per_cpu(init_tss, cpu);
21584+ t = init_tss + cpu;
21585 oist = &per_cpu(orig_ist, cpu);
21586
21587 #ifdef CONFIG_NUMA
21588@@ -1318,7 +1327,6 @@ void cpu_init(void)
21589 wrmsrl(MSR_KERNEL_GS_BASE, 0);
21590 barrier();
21591
21592- x86_configure_nx();
21593 enable_x2apic();
21594
21595 /*
21596@@ -1370,7 +1378,7 @@ void cpu_init(void)
21597 {
21598 int cpu = smp_processor_id();
21599 struct task_struct *curr = current;
21600- struct tss_struct *t = &per_cpu(init_tss, cpu);
21601+ struct tss_struct *t = init_tss + cpu;
21602 struct thread_struct *thread = &curr->thread;
21603
21604 show_ucode_info_early();
21605diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
21606index 9c8f739..902a9c5 100644
21607--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
21608+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
21609@@ -1026,6 +1026,22 @@ static struct attribute *default_attrs[] = {
21610 };
21611
21612 #ifdef CONFIG_AMD_NB
21613+static struct attribute *default_attrs_amd_nb[] = {
21614+ &type.attr,
21615+ &level.attr,
21616+ &coherency_line_size.attr,
21617+ &physical_line_partition.attr,
21618+ &ways_of_associativity.attr,
21619+ &number_of_sets.attr,
21620+ &size.attr,
21621+ &shared_cpu_map.attr,
21622+ &shared_cpu_list.attr,
21623+ NULL,
21624+ NULL,
21625+ NULL,
21626+ NULL
21627+};
21628+
21629 static struct attribute **amd_l3_attrs(void)
21630 {
21631 static struct attribute **attrs;
21632@@ -1036,18 +1052,7 @@ static struct attribute **amd_l3_attrs(void)
21633
21634 n = ARRAY_SIZE(default_attrs);
21635
21636- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
21637- n += 2;
21638-
21639- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
21640- n += 1;
21641-
21642- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
21643- if (attrs == NULL)
21644- return attrs = default_attrs;
21645-
21646- for (n = 0; default_attrs[n]; n++)
21647- attrs[n] = default_attrs[n];
21648+ attrs = default_attrs_amd_nb;
21649
21650 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
21651 attrs[n++] = &cache_disable_0.attr;
21652@@ -1098,6 +1103,13 @@ static struct kobj_type ktype_cache = {
21653 .default_attrs = default_attrs,
21654 };
21655
21656+#ifdef CONFIG_AMD_NB
21657+static struct kobj_type ktype_cache_amd_nb = {
21658+ .sysfs_ops = &sysfs_ops,
21659+ .default_attrs = default_attrs_amd_nb,
21660+};
21661+#endif
21662+
21663 static struct kobj_type ktype_percpu_entry = {
21664 .sysfs_ops = &sysfs_ops,
21665 };
21666@@ -1163,20 +1175,26 @@ static int cache_add_dev(struct device *dev)
21667 return retval;
21668 }
21669
21670+#ifdef CONFIG_AMD_NB
21671+ amd_l3_attrs();
21672+#endif
21673+
21674 for (i = 0; i < num_cache_leaves; i++) {
21675+ struct kobj_type *ktype;
21676+
21677 this_object = INDEX_KOBJECT_PTR(cpu, i);
21678 this_object->cpu = cpu;
21679 this_object->index = i;
21680
21681 this_leaf = CPUID4_INFO_IDX(cpu, i);
21682
21683- ktype_cache.default_attrs = default_attrs;
21684+ ktype = &ktype_cache;
21685 #ifdef CONFIG_AMD_NB
21686 if (this_leaf->base.nb)
21687- ktype_cache.default_attrs = amd_l3_attrs();
21688+ ktype = &ktype_cache_amd_nb;
21689 #endif
21690 retval = kobject_init_and_add(&(this_object->kobj),
21691- &ktype_cache,
21692+ ktype,
21693 per_cpu(ici_cache_kobject, cpu),
21694 "index%1lu", i);
21695 if (unlikely(retval)) {
21696diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
21697index 9a79c8d..158c2f1 100644
21698--- a/arch/x86/kernel/cpu/mcheck/mce.c
21699+++ b/arch/x86/kernel/cpu/mcheck/mce.c
21700@@ -45,6 +45,7 @@
21701 #include <asm/processor.h>
21702 #include <asm/mce.h>
21703 #include <asm/msr.h>
21704+#include <asm/local.h>
21705
21706 #include "mce-internal.h"
21707
21708@@ -259,7 +260,7 @@ static void print_mce(struct mce *m)
21709 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
21710 m->cs, m->ip);
21711
21712- if (m->cs == __KERNEL_CS)
21713+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
21714 print_symbol("{%s}", m->ip);
21715 pr_cont("\n");
21716 }
21717@@ -292,10 +293,10 @@ static void print_mce(struct mce *m)
21718
21719 #define PANIC_TIMEOUT 5 /* 5 seconds */
21720
21721-static atomic_t mce_paniced;
21722+static atomic_unchecked_t mce_paniced;
21723
21724 static int fake_panic;
21725-static atomic_t mce_fake_paniced;
21726+static atomic_unchecked_t mce_fake_paniced;
21727
21728 /* Panic in progress. Enable interrupts and wait for final IPI */
21729 static void wait_for_panic(void)
21730@@ -319,7 +320,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21731 /*
21732 * Make sure only one CPU runs in machine check panic
21733 */
21734- if (atomic_inc_return(&mce_paniced) > 1)
21735+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
21736 wait_for_panic();
21737 barrier();
21738
21739@@ -327,7 +328,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21740 console_verbose();
21741 } else {
21742 /* Don't log too much for fake panic */
21743- if (atomic_inc_return(&mce_fake_paniced) > 1)
21744+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
21745 return;
21746 }
21747 /* First print corrected ones that are still unlogged */
21748@@ -366,7 +367,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21749 if (!fake_panic) {
21750 if (panic_timeout == 0)
21751 panic_timeout = mca_cfg.panic_timeout;
21752- panic(msg);
21753+ panic("%s", msg);
21754 } else
21755 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
21756 }
21757@@ -697,7 +698,7 @@ static int mce_timed_out(u64 *t)
21758 * might have been modified by someone else.
21759 */
21760 rmb();
21761- if (atomic_read(&mce_paniced))
21762+ if (atomic_read_unchecked(&mce_paniced))
21763 wait_for_panic();
21764 if (!mca_cfg.monarch_timeout)
21765 goto out;
21766@@ -1674,7 +1675,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
21767 }
21768
21769 /* Call the installed machine check handler for this CPU setup. */
21770-void (*machine_check_vector)(struct pt_regs *, long error_code) =
21771+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
21772 unexpected_machine_check;
21773
21774 /*
21775@@ -1697,7 +1698,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21776 return;
21777 }
21778
21779+ pax_open_kernel();
21780 machine_check_vector = do_machine_check;
21781+ pax_close_kernel();
21782
21783 __mcheck_cpu_init_generic();
21784 __mcheck_cpu_init_vendor(c);
21785@@ -1711,7 +1714,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21786 */
21787
21788 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
21789-static int mce_chrdev_open_count; /* #times opened */
21790+static local_t mce_chrdev_open_count; /* #times opened */
21791 static int mce_chrdev_open_exclu; /* already open exclusive? */
21792
21793 static int mce_chrdev_open(struct inode *inode, struct file *file)
21794@@ -1719,7 +1722,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21795 spin_lock(&mce_chrdev_state_lock);
21796
21797 if (mce_chrdev_open_exclu ||
21798- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
21799+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
21800 spin_unlock(&mce_chrdev_state_lock);
21801
21802 return -EBUSY;
21803@@ -1727,7 +1730,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21804
21805 if (file->f_flags & O_EXCL)
21806 mce_chrdev_open_exclu = 1;
21807- mce_chrdev_open_count++;
21808+ local_inc(&mce_chrdev_open_count);
21809
21810 spin_unlock(&mce_chrdev_state_lock);
21811
21812@@ -1738,7 +1741,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
21813 {
21814 spin_lock(&mce_chrdev_state_lock);
21815
21816- mce_chrdev_open_count--;
21817+ local_dec(&mce_chrdev_open_count);
21818 mce_chrdev_open_exclu = 0;
21819
21820 spin_unlock(&mce_chrdev_state_lock);
21821@@ -2414,7 +2417,7 @@ static __init void mce_init_banks(void)
21822
21823 for (i = 0; i < mca_cfg.banks; i++) {
21824 struct mce_bank *b = &mce_banks[i];
21825- struct device_attribute *a = &b->attr;
21826+ device_attribute_no_const *a = &b->attr;
21827
21828 sysfs_attr_init(&a->attr);
21829 a->attr.name = b->attrname;
21830@@ -2521,7 +2524,7 @@ struct dentry *mce_get_debugfs_dir(void)
21831 static void mce_reset(void)
21832 {
21833 cpu_missing = 0;
21834- atomic_set(&mce_fake_paniced, 0);
21835+ atomic_set_unchecked(&mce_fake_paniced, 0);
21836 atomic_set(&mce_executing, 0);
21837 atomic_set(&mce_callin, 0);
21838 atomic_set(&global_nwo, 0);
21839diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
21840index a304298..49b6d06 100644
21841--- a/arch/x86/kernel/cpu/mcheck/p5.c
21842+++ b/arch/x86/kernel/cpu/mcheck/p5.c
21843@@ -10,6 +10,7 @@
21844 #include <asm/processor.h>
21845 #include <asm/mce.h>
21846 #include <asm/msr.h>
21847+#include <asm/pgtable.h>
21848
21849 /* By default disabled */
21850 int mce_p5_enabled __read_mostly;
21851@@ -48,7 +49,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
21852 if (!cpu_has(c, X86_FEATURE_MCE))
21853 return;
21854
21855+ pax_open_kernel();
21856 machine_check_vector = pentium_machine_check;
21857+ pax_close_kernel();
21858 /* Make sure the vector pointer is visible before we enable MCEs: */
21859 wmb();
21860
21861diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
21862index 7dc5564..1273569 100644
21863--- a/arch/x86/kernel/cpu/mcheck/winchip.c
21864+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
21865@@ -9,6 +9,7 @@
21866 #include <asm/processor.h>
21867 #include <asm/mce.h>
21868 #include <asm/msr.h>
21869+#include <asm/pgtable.h>
21870
21871 /* Machine check handler for WinChip C6: */
21872 static void winchip_machine_check(struct pt_regs *regs, long error_code)
21873@@ -22,7 +23,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
21874 {
21875 u32 lo, hi;
21876
21877+ pax_open_kernel();
21878 machine_check_vector = winchip_machine_check;
21879+ pax_close_kernel();
21880 /* Make sure the vector pointer is visible before we enable MCEs: */
21881 wmb();
21882
21883diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
21884index dd9d619..86e1d81 100644
21885--- a/arch/x86/kernel/cpu/microcode/core.c
21886+++ b/arch/x86/kernel/cpu/microcode/core.c
21887@@ -516,7 +516,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21888 return NOTIFY_OK;
21889 }
21890
21891-static struct notifier_block __refdata mc_cpu_notifier = {
21892+static struct notifier_block mc_cpu_notifier = {
21893 .notifier_call = mc_cpu_callback,
21894 };
21895
21896diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
21897index a276fa7..e66810f 100644
21898--- a/arch/x86/kernel/cpu/microcode/intel.c
21899+++ b/arch/x86/kernel/cpu/microcode/intel.c
21900@@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21901
21902 static int get_ucode_user(void *to, const void *from, size_t n)
21903 {
21904- return copy_from_user(to, from, n);
21905+ return copy_from_user(to, (const void __force_user *)from, n);
21906 }
21907
21908 static enum ucode_state
21909 request_microcode_user(int cpu, const void __user *buf, size_t size)
21910 {
21911- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21912+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21913 }
21914
21915 static void microcode_fini_cpu(int cpu)
21916diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
21917index f961de9..8a9d332 100644
21918--- a/arch/x86/kernel/cpu/mtrr/main.c
21919+++ b/arch/x86/kernel/cpu/mtrr/main.c
21920@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
21921 u64 size_or_mask, size_and_mask;
21922 static bool mtrr_aps_delayed_init;
21923
21924-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
21925+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
21926
21927 const struct mtrr_ops *mtrr_if;
21928
21929diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
21930index df5e41f..816c719 100644
21931--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
21932+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
21933@@ -25,7 +25,7 @@ struct mtrr_ops {
21934 int (*validate_add_page)(unsigned long base, unsigned long size,
21935 unsigned int type);
21936 int (*have_wrcomb)(void);
21937-};
21938+} __do_const;
21939
21940 extern int generic_get_free_region(unsigned long base, unsigned long size,
21941 int replace_reg);
21942diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
21943index 2879ecd..bb8c80b 100644
21944--- a/arch/x86/kernel/cpu/perf_event.c
21945+++ b/arch/x86/kernel/cpu/perf_event.c
21946@@ -1372,7 +1372,7 @@ static void __init pmu_check_apic(void)
21947
21948 }
21949
21950-static struct attribute_group x86_pmu_format_group = {
21951+static attribute_group_no_const x86_pmu_format_group = {
21952 .name = "format",
21953 .attrs = NULL,
21954 };
21955@@ -1471,7 +1471,7 @@ static struct attribute *events_attr[] = {
21956 NULL,
21957 };
21958
21959-static struct attribute_group x86_pmu_events_group = {
21960+static attribute_group_no_const x86_pmu_events_group = {
21961 .name = "events",
21962 .attrs = events_attr,
21963 };
21964@@ -1995,7 +1995,7 @@ static unsigned long get_segment_base(unsigned int segment)
21965 if (idx > GDT_ENTRIES)
21966 return 0;
21967
21968- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
21969+ desc = get_cpu_gdt_table(smp_processor_id());
21970 }
21971
21972 return get_desc_base(desc + idx);
21973@@ -2085,7 +2085,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
21974 break;
21975
21976 perf_callchain_store(entry, frame.return_address);
21977- fp = frame.next_frame;
21978+ fp = (const void __force_user *)frame.next_frame;
21979 }
21980 }
21981
21982diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21983index 639d128..e92d7e5 100644
21984--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21985+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21986@@ -405,7 +405,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
21987 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
21988 {
21989 struct attribute **attrs;
21990- struct attribute_group *attr_group;
21991+ attribute_group_no_const *attr_group;
21992 int i = 0, j;
21993
21994 while (amd_iommu_v2_event_descs[i].attr.attr.name)
21995diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
21996index 2502d0d..e5cc05c 100644
21997--- a/arch/x86/kernel/cpu/perf_event_intel.c
21998+++ b/arch/x86/kernel/cpu/perf_event_intel.c
21999@@ -2353,10 +2353,10 @@ __init int intel_pmu_init(void)
22000 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
22001
22002 if (boot_cpu_has(X86_FEATURE_PDCM)) {
22003- u64 capabilities;
22004+ u64 capabilities = x86_pmu.intel_cap.capabilities;
22005
22006- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
22007- x86_pmu.intel_cap.capabilities = capabilities;
22008+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
22009+ x86_pmu.intel_cap.capabilities = capabilities;
22010 }
22011
22012 intel_ds_init();
22013diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
22014index 619f769..d510008 100644
22015--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
22016+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
22017@@ -449,7 +449,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
22018 NULL,
22019 };
22020
22021-static struct attribute_group rapl_pmu_events_group = {
22022+static attribute_group_no_const rapl_pmu_events_group __read_only = {
22023 .name = "events",
22024 .attrs = NULL, /* patched at runtime */
22025 };
22026diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
22027index ae6552a..b5be2d3 100644
22028--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
22029+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
22030@@ -3694,7 +3694,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
22031 static int __init uncore_type_init(struct intel_uncore_type *type)
22032 {
22033 struct intel_uncore_pmu *pmus;
22034- struct attribute_group *attr_group;
22035+ attribute_group_no_const *attr_group;
22036 struct attribute **attrs;
22037 int i, j;
22038
22039diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
22040index 90236f0..54cb20d 100644
22041--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
22042+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
22043@@ -503,7 +503,7 @@ struct intel_uncore_box {
22044 struct uncore_event_desc {
22045 struct kobj_attribute attr;
22046 const char *config;
22047-};
22048+} __do_const;
22049
22050 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
22051 { \
22052diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
22053index 3225ae6c..ee3c6db 100644
22054--- a/arch/x86/kernel/cpuid.c
22055+++ b/arch/x86/kernel/cpuid.c
22056@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
22057 return notifier_from_errno(err);
22058 }
22059
22060-static struct notifier_block __refdata cpuid_class_cpu_notifier =
22061+static struct notifier_block cpuid_class_cpu_notifier =
22062 {
22063 .notifier_call = cpuid_class_cpu_callback,
22064 };
22065diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
22066index 507de80..ebaae2a 100644
22067--- a/arch/x86/kernel/crash.c
22068+++ b/arch/x86/kernel/crash.c
22069@@ -58,7 +58,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
22070 #ifdef CONFIG_X86_32
22071 struct pt_regs fixed_regs;
22072
22073- if (!user_mode_vm(regs)) {
22074+ if (!user_mode(regs)) {
22075 crash_fixup_ss_esp(&fixed_regs, regs);
22076 regs = &fixed_regs;
22077 }
22078diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
22079index afa64ad..dce67dd 100644
22080--- a/arch/x86/kernel/crash_dump_64.c
22081+++ b/arch/x86/kernel/crash_dump_64.c
22082@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
22083 return -ENOMEM;
22084
22085 if (userbuf) {
22086- if (copy_to_user(buf, vaddr + offset, csize)) {
22087+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
22088 iounmap(vaddr);
22089 return -EFAULT;
22090 }
22091diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
22092index f6dfd93..892ade4 100644
22093--- a/arch/x86/kernel/doublefault.c
22094+++ b/arch/x86/kernel/doublefault.c
22095@@ -12,7 +12,7 @@
22096
22097 #define DOUBLEFAULT_STACKSIZE (1024)
22098 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
22099-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
22100+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
22101
22102 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
22103
22104@@ -22,7 +22,7 @@ static void doublefault_fn(void)
22105 unsigned long gdt, tss;
22106
22107 native_store_gdt(&gdt_desc);
22108- gdt = gdt_desc.address;
22109+ gdt = (unsigned long)gdt_desc.address;
22110
22111 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
22112
22113@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
22114 /* 0x2 bit is always set */
22115 .flags = X86_EFLAGS_SF | 0x2,
22116 .sp = STACK_START,
22117- .es = __USER_DS,
22118+ .es = __KERNEL_DS,
22119 .cs = __KERNEL_CS,
22120 .ss = __KERNEL_DS,
22121- .ds = __USER_DS,
22122+ .ds = __KERNEL_DS,
22123 .fs = __KERNEL_PERCPU,
22124
22125 .__cr3 = __pa_nodebug(swapper_pg_dir),
22126diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
22127index b74ebc7..6dbb0c5 100644
22128--- a/arch/x86/kernel/dumpstack.c
22129+++ b/arch/x86/kernel/dumpstack.c
22130@@ -2,6 +2,9 @@
22131 * Copyright (C) 1991, 1992 Linus Torvalds
22132 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
22133 */
22134+#ifdef CONFIG_GRKERNSEC_HIDESYM
22135+#define __INCLUDED_BY_HIDESYM 1
22136+#endif
22137 #include <linux/kallsyms.h>
22138 #include <linux/kprobes.h>
22139 #include <linux/uaccess.h>
22140@@ -40,16 +43,14 @@ void printk_address(unsigned long address)
22141 static void
22142 print_ftrace_graph_addr(unsigned long addr, void *data,
22143 const struct stacktrace_ops *ops,
22144- struct thread_info *tinfo, int *graph)
22145+ struct task_struct *task, int *graph)
22146 {
22147- struct task_struct *task;
22148 unsigned long ret_addr;
22149 int index;
22150
22151 if (addr != (unsigned long)return_to_handler)
22152 return;
22153
22154- task = tinfo->task;
22155 index = task->curr_ret_stack;
22156
22157 if (!task->ret_stack || index < *graph)
22158@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
22159 static inline void
22160 print_ftrace_graph_addr(unsigned long addr, void *data,
22161 const struct stacktrace_ops *ops,
22162- struct thread_info *tinfo, int *graph)
22163+ struct task_struct *task, int *graph)
22164 { }
22165 #endif
22166
22167@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
22168 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
22169 */
22170
22171-static inline int valid_stack_ptr(struct thread_info *tinfo,
22172- void *p, unsigned int size, void *end)
22173+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
22174 {
22175- void *t = tinfo;
22176 if (end) {
22177 if (p < end && p >= (end-THREAD_SIZE))
22178 return 1;
22179@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
22180 }
22181
22182 unsigned long
22183-print_context_stack(struct thread_info *tinfo,
22184+print_context_stack(struct task_struct *task, void *stack_start,
22185 unsigned long *stack, unsigned long bp,
22186 const struct stacktrace_ops *ops, void *data,
22187 unsigned long *end, int *graph)
22188 {
22189 struct stack_frame *frame = (struct stack_frame *)bp;
22190
22191- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
22192+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
22193 unsigned long addr;
22194
22195 addr = *stack;
22196@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
22197 } else {
22198 ops->address(data, addr, 0);
22199 }
22200- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22201+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22202 }
22203 stack++;
22204 }
22205@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
22206 EXPORT_SYMBOL_GPL(print_context_stack);
22207
22208 unsigned long
22209-print_context_stack_bp(struct thread_info *tinfo,
22210+print_context_stack_bp(struct task_struct *task, void *stack_start,
22211 unsigned long *stack, unsigned long bp,
22212 const struct stacktrace_ops *ops, void *data,
22213 unsigned long *end, int *graph)
22214@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22215 struct stack_frame *frame = (struct stack_frame *)bp;
22216 unsigned long *ret_addr = &frame->return_address;
22217
22218- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
22219+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
22220 unsigned long addr = *ret_addr;
22221
22222 if (!__kernel_text_address(addr))
22223@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22224 ops->address(data, addr, 1);
22225 frame = frame->next_frame;
22226 ret_addr = &frame->return_address;
22227- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22228+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22229 }
22230
22231 return (unsigned long)frame;
22232@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
22233 static void print_trace_address(void *data, unsigned long addr, int reliable)
22234 {
22235 touch_nmi_watchdog();
22236- printk(data);
22237+ printk("%s", (char *)data);
22238 printk_stack_address(addr, reliable);
22239 }
22240
22241@@ -225,6 +224,8 @@ unsigned long oops_begin(void)
22242 EXPORT_SYMBOL_GPL(oops_begin);
22243 NOKPROBE_SYMBOL(oops_begin);
22244
22245+extern void gr_handle_kernel_exploit(void);
22246+
22247 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22248 {
22249 if (regs && kexec_should_crash(current))
22250@@ -246,7 +247,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22251 panic("Fatal exception in interrupt");
22252 if (panic_on_oops)
22253 panic("Fatal exception");
22254- do_exit(signr);
22255+
22256+ gr_handle_kernel_exploit();
22257+
22258+ do_group_exit(signr);
22259 }
22260 NOKPROBE_SYMBOL(oops_end);
22261
22262@@ -275,7 +279,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
22263 print_modules();
22264 show_regs(regs);
22265 #ifdef CONFIG_X86_32
22266- if (user_mode_vm(regs)) {
22267+ if (user_mode(regs)) {
22268 sp = regs->sp;
22269 ss = regs->ss & 0xffff;
22270 } else {
22271@@ -304,7 +308,7 @@ void die(const char *str, struct pt_regs *regs, long err)
22272 unsigned long flags = oops_begin();
22273 int sig = SIGSEGV;
22274
22275- if (!user_mode_vm(regs))
22276+ if (!user_mode(regs))
22277 report_bug(regs->ip, regs);
22278
22279 if (__die(str, regs, err))
22280diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
22281index 5abd4cd..c65733b 100644
22282--- a/arch/x86/kernel/dumpstack_32.c
22283+++ b/arch/x86/kernel/dumpstack_32.c
22284@@ -61,15 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22285 bp = stack_frame(task, regs);
22286
22287 for (;;) {
22288- struct thread_info *context;
22289+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22290 void *end_stack;
22291
22292 end_stack = is_hardirq_stack(stack, cpu);
22293 if (!end_stack)
22294 end_stack = is_softirq_stack(stack, cpu);
22295
22296- context = task_thread_info(task);
22297- bp = ops->walk_stack(context, stack, bp, ops, data,
22298+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
22299 end_stack, &graph);
22300
22301 /* Stop if not on irq stack */
22302@@ -123,27 +122,28 @@ void show_regs(struct pt_regs *regs)
22303 int i;
22304
22305 show_regs_print_info(KERN_EMERG);
22306- __show_regs(regs, !user_mode_vm(regs));
22307+ __show_regs(regs, !user_mode(regs));
22308
22309 /*
22310 * When in-kernel, we also print out the stack and code at the
22311 * time of the fault..
22312 */
22313- if (!user_mode_vm(regs)) {
22314+ if (!user_mode(regs)) {
22315 unsigned int code_prologue = code_bytes * 43 / 64;
22316 unsigned int code_len = code_bytes;
22317 unsigned char c;
22318 u8 *ip;
22319+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
22320
22321 pr_emerg("Stack:\n");
22322 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
22323
22324 pr_emerg("Code:");
22325
22326- ip = (u8 *)regs->ip - code_prologue;
22327+ ip = (u8 *)regs->ip - code_prologue + cs_base;
22328 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
22329 /* try starting at IP */
22330- ip = (u8 *)regs->ip;
22331+ ip = (u8 *)regs->ip + cs_base;
22332 code_len = code_len - code_prologue + 1;
22333 }
22334 for (i = 0; i < code_len; i++, ip++) {
22335@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
22336 pr_cont(" Bad EIP value.");
22337 break;
22338 }
22339- if (ip == (u8 *)regs->ip)
22340+ if (ip == (u8 *)regs->ip + cs_base)
22341 pr_cont(" <%02x>", c);
22342 else
22343 pr_cont(" %02x", c);
22344@@ -165,6 +165,7 @@ int is_valid_bugaddr(unsigned long ip)
22345 {
22346 unsigned short ud2;
22347
22348+ ip = ktla_ktva(ip);
22349 if (ip < PAGE_OFFSET)
22350 return 0;
22351 if (probe_kernel_address((unsigned short *)ip, ud2))
22352@@ -172,3 +173,15 @@ int is_valid_bugaddr(unsigned long ip)
22353
22354 return ud2 == 0x0b0f;
22355 }
22356+
22357+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22358+void pax_check_alloca(unsigned long size)
22359+{
22360+ unsigned long sp = (unsigned long)&sp, stack_left;
22361+
22362+ /* all kernel stacks are of the same size */
22363+ stack_left = sp & (THREAD_SIZE - 1);
22364+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22365+}
22366+EXPORT_SYMBOL(pax_check_alloca);
22367+#endif
22368diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
22369index 1abcb50..6c8d702 100644
22370--- a/arch/x86/kernel/dumpstack_64.c
22371+++ b/arch/x86/kernel/dumpstack_64.c
22372@@ -154,12 +154,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22373 const struct stacktrace_ops *ops, void *data)
22374 {
22375 const unsigned cpu = get_cpu();
22376- struct thread_info *tinfo;
22377 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
22378 unsigned long dummy;
22379 unsigned used = 0;
22380 int graph = 0;
22381 int done = 0;
22382+ void *stack_start;
22383
22384 if (!task)
22385 task = current;
22386@@ -180,7 +180,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22387 * current stack address. If the stacks consist of nested
22388 * exceptions
22389 */
22390- tinfo = task_thread_info(task);
22391 while (!done) {
22392 unsigned long *stack_end;
22393 enum stack_type stype;
22394@@ -203,7 +202,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22395 if (ops->stack(data, id) < 0)
22396 break;
22397
22398- bp = ops->walk_stack(tinfo, stack, bp, ops,
22399+ bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
22400 data, stack_end, &graph);
22401 ops->stack(data, "<EOE>");
22402 /*
22403@@ -211,6 +210,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22404 * second-to-last pointer (index -2 to end) in the
22405 * exception stack:
22406 */
22407+ if ((u16)stack_end[-1] != __KERNEL_DS)
22408+ goto out;
22409 stack = (unsigned long *) stack_end[-2];
22410 done = 0;
22411 break;
22412@@ -219,7 +220,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22413
22414 if (ops->stack(data, "IRQ") < 0)
22415 break;
22416- bp = ops->walk_stack(tinfo, stack, bp,
22417+ bp = ops->walk_stack(task, irq_stack, stack, bp,
22418 ops, data, stack_end, &graph);
22419 /*
22420 * We link to the next stack (which would be
22421@@ -241,7 +242,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22422 /*
22423 * This handles the process stack:
22424 */
22425- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
22426+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22427+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22428+out:
22429 put_cpu();
22430 }
22431 EXPORT_SYMBOL(dump_trace);
22432@@ -350,3 +353,50 @@ int is_valid_bugaddr(unsigned long ip)
22433
22434 return ud2 == 0x0b0f;
22435 }
22436+
22437+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22438+void pax_check_alloca(unsigned long size)
22439+{
22440+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
22441+ unsigned cpu, used;
22442+ char *id;
22443+
22444+ /* check the process stack first */
22445+ stack_start = (unsigned long)task_stack_page(current);
22446+ stack_end = stack_start + THREAD_SIZE;
22447+ if (likely(stack_start <= sp && sp < stack_end)) {
22448+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
22449+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22450+ return;
22451+ }
22452+
22453+ cpu = get_cpu();
22454+
22455+ /* check the irq stacks */
22456+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
22457+ stack_start = stack_end - IRQ_STACK_SIZE;
22458+ if (stack_start <= sp && sp < stack_end) {
22459+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
22460+ put_cpu();
22461+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22462+ return;
22463+ }
22464+
22465+ /* check the exception stacks */
22466+ used = 0;
22467+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
22468+ stack_start = stack_end - EXCEPTION_STKSZ;
22469+ if (stack_end && stack_start <= sp && sp < stack_end) {
22470+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
22471+ put_cpu();
22472+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22473+ return;
22474+ }
22475+
22476+ put_cpu();
22477+
22478+ /* unknown stack */
22479+ BUG();
22480+}
22481+EXPORT_SYMBOL(pax_check_alloca);
22482+#endif
22483diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
22484index 988c00a..4f673b6 100644
22485--- a/arch/x86/kernel/e820.c
22486+++ b/arch/x86/kernel/e820.c
22487@@ -803,8 +803,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
22488
22489 static void early_panic(char *msg)
22490 {
22491- early_printk(msg);
22492- panic(msg);
22493+ early_printk("%s", msg);
22494+ panic("%s", msg);
22495 }
22496
22497 static int userdef __initdata;
22498diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
22499index 01d1c18..8073693 100644
22500--- a/arch/x86/kernel/early_printk.c
22501+++ b/arch/x86/kernel/early_printk.c
22502@@ -7,6 +7,7 @@
22503 #include <linux/pci_regs.h>
22504 #include <linux/pci_ids.h>
22505 #include <linux/errno.h>
22506+#include <linux/sched.h>
22507 #include <asm/io.h>
22508 #include <asm/processor.h>
22509 #include <asm/fcntl.h>
22510diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
22511index 0d0c9d4..f65b4f6 100644
22512--- a/arch/x86/kernel/entry_32.S
22513+++ b/arch/x86/kernel/entry_32.S
22514@@ -177,13 +177,153 @@
22515 /*CFI_REL_OFFSET gs, PT_GS*/
22516 .endm
22517 .macro SET_KERNEL_GS reg
22518+
22519+#ifdef CONFIG_CC_STACKPROTECTOR
22520 movl $(__KERNEL_STACK_CANARY), \reg
22521+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
22522+ movl $(__USER_DS), \reg
22523+#else
22524+ xorl \reg, \reg
22525+#endif
22526+
22527 movl \reg, %gs
22528 .endm
22529
22530 #endif /* CONFIG_X86_32_LAZY_GS */
22531
22532-.macro SAVE_ALL
22533+.macro pax_enter_kernel
22534+#ifdef CONFIG_PAX_KERNEXEC
22535+ call pax_enter_kernel
22536+#endif
22537+.endm
22538+
22539+.macro pax_exit_kernel
22540+#ifdef CONFIG_PAX_KERNEXEC
22541+ call pax_exit_kernel
22542+#endif
22543+.endm
22544+
22545+#ifdef CONFIG_PAX_KERNEXEC
22546+ENTRY(pax_enter_kernel)
22547+#ifdef CONFIG_PARAVIRT
22548+ pushl %eax
22549+ pushl %ecx
22550+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
22551+ mov %eax, %esi
22552+#else
22553+ mov %cr0, %esi
22554+#endif
22555+ bts $16, %esi
22556+ jnc 1f
22557+ mov %cs, %esi
22558+ cmp $__KERNEL_CS, %esi
22559+ jz 3f
22560+ ljmp $__KERNEL_CS, $3f
22561+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
22562+2:
22563+#ifdef CONFIG_PARAVIRT
22564+ mov %esi, %eax
22565+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
22566+#else
22567+ mov %esi, %cr0
22568+#endif
22569+3:
22570+#ifdef CONFIG_PARAVIRT
22571+ popl %ecx
22572+ popl %eax
22573+#endif
22574+ ret
22575+ENDPROC(pax_enter_kernel)
22576+
22577+ENTRY(pax_exit_kernel)
22578+#ifdef CONFIG_PARAVIRT
22579+ pushl %eax
22580+ pushl %ecx
22581+#endif
22582+ mov %cs, %esi
22583+ cmp $__KERNEXEC_KERNEL_CS, %esi
22584+ jnz 2f
22585+#ifdef CONFIG_PARAVIRT
22586+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
22587+ mov %eax, %esi
22588+#else
22589+ mov %cr0, %esi
22590+#endif
22591+ btr $16, %esi
22592+ ljmp $__KERNEL_CS, $1f
22593+1:
22594+#ifdef CONFIG_PARAVIRT
22595+ mov %esi, %eax
22596+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
22597+#else
22598+ mov %esi, %cr0
22599+#endif
22600+2:
22601+#ifdef CONFIG_PARAVIRT
22602+ popl %ecx
22603+ popl %eax
22604+#endif
22605+ ret
22606+ENDPROC(pax_exit_kernel)
22607+#endif
22608+
22609+ .macro pax_erase_kstack
22610+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22611+ call pax_erase_kstack
22612+#endif
22613+ .endm
22614+
22615+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22616+/*
22617+ * ebp: thread_info
22618+ */
22619+ENTRY(pax_erase_kstack)
22620+ pushl %edi
22621+ pushl %ecx
22622+ pushl %eax
22623+
22624+ mov TI_lowest_stack(%ebp), %edi
22625+ mov $-0xBEEF, %eax
22626+ std
22627+
22628+1: mov %edi, %ecx
22629+ and $THREAD_SIZE_asm - 1, %ecx
22630+ shr $2, %ecx
22631+ repne scasl
22632+ jecxz 2f
22633+
22634+ cmp $2*16, %ecx
22635+ jc 2f
22636+
22637+ mov $2*16, %ecx
22638+ repe scasl
22639+ jecxz 2f
22640+ jne 1b
22641+
22642+2: cld
22643+ mov %esp, %ecx
22644+ sub %edi, %ecx
22645+
22646+ cmp $THREAD_SIZE_asm, %ecx
22647+ jb 3f
22648+ ud2
22649+3:
22650+
22651+ shr $2, %ecx
22652+ rep stosl
22653+
22654+ mov TI_task_thread_sp0(%ebp), %edi
22655+ sub $128, %edi
22656+ mov %edi, TI_lowest_stack(%ebp)
22657+
22658+ popl %eax
22659+ popl %ecx
22660+ popl %edi
22661+ ret
22662+ENDPROC(pax_erase_kstack)
22663+#endif
22664+
22665+.macro __SAVE_ALL _DS
22666 cld
22667 PUSH_GS
22668 pushl_cfi %fs
22669@@ -206,7 +346,7 @@
22670 CFI_REL_OFFSET ecx, 0
22671 pushl_cfi %ebx
22672 CFI_REL_OFFSET ebx, 0
22673- movl $(__USER_DS), %edx
22674+ movl $\_DS, %edx
22675 movl %edx, %ds
22676 movl %edx, %es
22677 movl $(__KERNEL_PERCPU), %edx
22678@@ -214,6 +354,15 @@
22679 SET_KERNEL_GS %edx
22680 .endm
22681
22682+.macro SAVE_ALL
22683+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22684+ __SAVE_ALL __KERNEL_DS
22685+ pax_enter_kernel
22686+#else
22687+ __SAVE_ALL __USER_DS
22688+#endif
22689+.endm
22690+
22691 .macro RESTORE_INT_REGS
22692 popl_cfi %ebx
22693 CFI_RESTORE ebx
22694@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
22695 popfl_cfi
22696 jmp syscall_exit
22697 CFI_ENDPROC
22698-END(ret_from_fork)
22699+ENDPROC(ret_from_fork)
22700
22701 ENTRY(ret_from_kernel_thread)
22702 CFI_STARTPROC
22703@@ -340,7 +489,15 @@ ret_from_intr:
22704 andl $SEGMENT_RPL_MASK, %eax
22705 #endif
22706 cmpl $USER_RPL, %eax
22707+
22708+#ifdef CONFIG_PAX_KERNEXEC
22709+ jae resume_userspace
22710+
22711+ pax_exit_kernel
22712+ jmp resume_kernel
22713+#else
22714 jb resume_kernel # not returning to v8086 or userspace
22715+#endif
22716
22717 ENTRY(resume_userspace)
22718 LOCKDEP_SYS_EXIT
22719@@ -352,8 +509,8 @@ ENTRY(resume_userspace)
22720 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
22721 # int/exception return?
22722 jne work_pending
22723- jmp restore_all
22724-END(ret_from_exception)
22725+ jmp restore_all_pax
22726+ENDPROC(ret_from_exception)
22727
22728 #ifdef CONFIG_PREEMPT
22729 ENTRY(resume_kernel)
22730@@ -365,7 +522,7 @@ need_resched:
22731 jz restore_all
22732 call preempt_schedule_irq
22733 jmp need_resched
22734-END(resume_kernel)
22735+ENDPROC(resume_kernel)
22736 #endif
22737 CFI_ENDPROC
22738
22739@@ -395,30 +552,45 @@ sysenter_past_esp:
22740 /*CFI_REL_OFFSET cs, 0*/
22741 /*
22742 * Push current_thread_info()->sysenter_return to the stack.
22743- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
22744- * pushed above; +8 corresponds to copy_thread's esp0 setting.
22745 */
22746- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
22747+ pushl_cfi $0
22748 CFI_REL_OFFSET eip, 0
22749
22750 pushl_cfi %eax
22751 SAVE_ALL
22752+ GET_THREAD_INFO(%ebp)
22753+ movl TI_sysenter_return(%ebp),%ebp
22754+ movl %ebp,PT_EIP(%esp)
22755 ENABLE_INTERRUPTS(CLBR_NONE)
22756
22757 /*
22758 * Load the potential sixth argument from user stack.
22759 * Careful about security.
22760 */
22761+ movl PT_OLDESP(%esp),%ebp
22762+
22763+#ifdef CONFIG_PAX_MEMORY_UDEREF
22764+ mov PT_OLDSS(%esp),%ds
22765+1: movl %ds:(%ebp),%ebp
22766+ push %ss
22767+ pop %ds
22768+#else
22769 cmpl $__PAGE_OFFSET-3,%ebp
22770 jae syscall_fault
22771 ASM_STAC
22772 1: movl (%ebp),%ebp
22773 ASM_CLAC
22774+#endif
22775+
22776 movl %ebp,PT_EBP(%esp)
22777 _ASM_EXTABLE(1b,syscall_fault)
22778
22779 GET_THREAD_INFO(%ebp)
22780
22781+#ifdef CONFIG_PAX_RANDKSTACK
22782+ pax_erase_kstack
22783+#endif
22784+
22785 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22786 jnz sysenter_audit
22787 sysenter_do_call:
22788@@ -434,12 +606,24 @@ sysenter_after_call:
22789 testl $_TIF_ALLWORK_MASK, %ecx
22790 jne sysexit_audit
22791 sysenter_exit:
22792+
22793+#ifdef CONFIG_PAX_RANDKSTACK
22794+ pushl_cfi %eax
22795+ movl %esp, %eax
22796+ call pax_randomize_kstack
22797+ popl_cfi %eax
22798+#endif
22799+
22800+ pax_erase_kstack
22801+
22802 /* if something modifies registers it must also disable sysexit */
22803 movl PT_EIP(%esp), %edx
22804 movl PT_OLDESP(%esp), %ecx
22805 xorl %ebp,%ebp
22806 TRACE_IRQS_ON
22807 1: mov PT_FS(%esp), %fs
22808+2: mov PT_DS(%esp), %ds
22809+3: mov PT_ES(%esp), %es
22810 PTGS_TO_GS
22811 ENABLE_INTERRUPTS_SYSEXIT
22812
22813@@ -456,6 +640,9 @@ sysenter_audit:
22814 movl %eax,%edx /* 2nd arg: syscall number */
22815 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
22816 call __audit_syscall_entry
22817+
22818+ pax_erase_kstack
22819+
22820 pushl_cfi %ebx
22821 movl PT_EAX(%esp),%eax /* reload syscall number */
22822 jmp sysenter_do_call
22823@@ -481,10 +668,16 @@ sysexit_audit:
22824
22825 CFI_ENDPROC
22826 .pushsection .fixup,"ax"
22827-2: movl $0,PT_FS(%esp)
22828+4: movl $0,PT_FS(%esp)
22829+ jmp 1b
22830+5: movl $0,PT_DS(%esp)
22831+ jmp 1b
22832+6: movl $0,PT_ES(%esp)
22833 jmp 1b
22834 .popsection
22835- _ASM_EXTABLE(1b,2b)
22836+ _ASM_EXTABLE(1b,4b)
22837+ _ASM_EXTABLE(2b,5b)
22838+ _ASM_EXTABLE(3b,6b)
22839 PTGS_TO_GS_EX
22840 ENDPROC(ia32_sysenter_target)
22841
22842@@ -495,6 +688,11 @@ ENTRY(system_call)
22843 pushl_cfi %eax # save orig_eax
22844 SAVE_ALL
22845 GET_THREAD_INFO(%ebp)
22846+
22847+#ifdef CONFIG_PAX_RANDKSTACK
22848+ pax_erase_kstack
22849+#endif
22850+
22851 # system call tracing in operation / emulation
22852 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22853 jnz syscall_trace_entry
22854@@ -514,6 +712,15 @@ syscall_exit:
22855 testl $_TIF_ALLWORK_MASK, %ecx # current->work
22856 jne syscall_exit_work
22857
22858+restore_all_pax:
22859+
22860+#ifdef CONFIG_PAX_RANDKSTACK
22861+ movl %esp, %eax
22862+ call pax_randomize_kstack
22863+#endif
22864+
22865+ pax_erase_kstack
22866+
22867 restore_all:
22868 TRACE_IRQS_IRET
22869 restore_all_notrace:
22870@@ -568,14 +775,34 @@ ldt_ss:
22871 * compensating for the offset by changing to the ESPFIX segment with
22872 * a base address that matches for the difference.
22873 */
22874-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
22875+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
22876 mov %esp, %edx /* load kernel esp */
22877 mov PT_OLDESP(%esp), %eax /* load userspace esp */
22878 mov %dx, %ax /* eax: new kernel esp */
22879 sub %eax, %edx /* offset (low word is 0) */
22880+#ifdef CONFIG_SMP
22881+ movl PER_CPU_VAR(cpu_number), %ebx
22882+ shll $PAGE_SHIFT_asm, %ebx
22883+ addl $cpu_gdt_table, %ebx
22884+#else
22885+ movl $cpu_gdt_table, %ebx
22886+#endif
22887 shr $16, %edx
22888- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
22889- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
22890+
22891+#ifdef CONFIG_PAX_KERNEXEC
22892+ mov %cr0, %esi
22893+ btr $16, %esi
22894+ mov %esi, %cr0
22895+#endif
22896+
22897+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
22898+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
22899+
22900+#ifdef CONFIG_PAX_KERNEXEC
22901+ bts $16, %esi
22902+ mov %esi, %cr0
22903+#endif
22904+
22905 pushl_cfi $__ESPFIX_SS
22906 pushl_cfi %eax /* new kernel esp */
22907 /* Disable interrupts, but do not irqtrace this section: we
22908@@ -605,20 +832,18 @@ work_resched:
22909 movl TI_flags(%ebp), %ecx
22910 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
22911 # than syscall tracing?
22912- jz restore_all
22913+ jz restore_all_pax
22914 testb $_TIF_NEED_RESCHED, %cl
22915 jnz work_resched
22916
22917 work_notifysig: # deal with pending signals and
22918 # notify-resume requests
22919+ movl %esp, %eax
22920 #ifdef CONFIG_VM86
22921 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
22922- movl %esp, %eax
22923 jne work_notifysig_v86 # returning to kernel-space or
22924 # vm86-space
22925 1:
22926-#else
22927- movl %esp, %eax
22928 #endif
22929 TRACE_IRQS_ON
22930 ENABLE_INTERRUPTS(CLBR_NONE)
22931@@ -639,7 +864,7 @@ work_notifysig_v86:
22932 movl %eax, %esp
22933 jmp 1b
22934 #endif
22935-END(work_pending)
22936+ENDPROC(work_pending)
22937
22938 # perform syscall exit tracing
22939 ALIGN
22940@@ -647,11 +872,14 @@ syscall_trace_entry:
22941 movl $-ENOSYS,PT_EAX(%esp)
22942 movl %esp, %eax
22943 call syscall_trace_enter
22944+
22945+ pax_erase_kstack
22946+
22947 /* What it returned is what we'll actually use. */
22948 cmpl $(NR_syscalls), %eax
22949 jnae syscall_call
22950 jmp syscall_exit
22951-END(syscall_trace_entry)
22952+ENDPROC(syscall_trace_entry)
22953
22954 # perform syscall exit tracing
22955 ALIGN
22956@@ -664,26 +892,30 @@ syscall_exit_work:
22957 movl %esp, %eax
22958 call syscall_trace_leave
22959 jmp resume_userspace
22960-END(syscall_exit_work)
22961+ENDPROC(syscall_exit_work)
22962 CFI_ENDPROC
22963
22964 RING0_INT_FRAME # can't unwind into user space anyway
22965 syscall_fault:
22966+#ifdef CONFIG_PAX_MEMORY_UDEREF
22967+ push %ss
22968+ pop %ds
22969+#endif
22970 ASM_CLAC
22971 GET_THREAD_INFO(%ebp)
22972 movl $-EFAULT,PT_EAX(%esp)
22973 jmp resume_userspace
22974-END(syscall_fault)
22975+ENDPROC(syscall_fault)
22976
22977 syscall_badsys:
22978 movl $-ENOSYS,%eax
22979 jmp syscall_after_call
22980-END(syscall_badsys)
22981+ENDPROC(syscall_badsys)
22982
22983 sysenter_badsys:
22984 movl $-ENOSYS,%eax
22985 jmp sysenter_after_call
22986-END(syscall_badsys)
22987+ENDPROC(sysenter_badsys)
22988 CFI_ENDPROC
22989
22990 .macro FIXUP_ESPFIX_STACK
22991@@ -696,8 +928,15 @@ END(syscall_badsys)
22992 */
22993 #ifdef CONFIG_X86_ESPFIX32
22994 /* fixup the stack */
22995- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
22996- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
22997+#ifdef CONFIG_SMP
22998+ movl PER_CPU_VAR(cpu_number), %ebx
22999+ shll $PAGE_SHIFT_asm, %ebx
23000+ addl $cpu_gdt_table, %ebx
23001+#else
23002+ movl $cpu_gdt_table, %ebx
23003+#endif
23004+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
23005+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
23006 shl $16, %eax
23007 addl %esp, %eax /* the adjusted stack pointer */
23008 pushl_cfi $__KERNEL_DS
23009@@ -753,7 +992,7 @@ vector=vector+1
23010 .endr
23011 2: jmp common_interrupt
23012 .endr
23013-END(irq_entries_start)
23014+ENDPROC(irq_entries_start)
23015
23016 .previous
23017 END(interrupt)
23018@@ -810,7 +1049,7 @@ ENTRY(coprocessor_error)
23019 pushl_cfi $do_coprocessor_error
23020 jmp error_code
23021 CFI_ENDPROC
23022-END(coprocessor_error)
23023+ENDPROC(coprocessor_error)
23024
23025 ENTRY(simd_coprocessor_error)
23026 RING0_INT_FRAME
23027@@ -823,7 +1062,7 @@ ENTRY(simd_coprocessor_error)
23028 .section .altinstructions,"a"
23029 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
23030 .previous
23031-.section .altinstr_replacement,"ax"
23032+.section .altinstr_replacement,"a"
23033 663: pushl $do_simd_coprocessor_error
23034 664:
23035 .previous
23036@@ -832,7 +1071,7 @@ ENTRY(simd_coprocessor_error)
23037 #endif
23038 jmp error_code
23039 CFI_ENDPROC
23040-END(simd_coprocessor_error)
23041+ENDPROC(simd_coprocessor_error)
23042
23043 ENTRY(device_not_available)
23044 RING0_INT_FRAME
23045@@ -841,18 +1080,18 @@ ENTRY(device_not_available)
23046 pushl_cfi $do_device_not_available
23047 jmp error_code
23048 CFI_ENDPROC
23049-END(device_not_available)
23050+ENDPROC(device_not_available)
23051
23052 #ifdef CONFIG_PARAVIRT
23053 ENTRY(native_iret)
23054 iret
23055 _ASM_EXTABLE(native_iret, iret_exc)
23056-END(native_iret)
23057+ENDPROC(native_iret)
23058
23059 ENTRY(native_irq_enable_sysexit)
23060 sti
23061 sysexit
23062-END(native_irq_enable_sysexit)
23063+ENDPROC(native_irq_enable_sysexit)
23064 #endif
23065
23066 ENTRY(overflow)
23067@@ -862,7 +1101,7 @@ ENTRY(overflow)
23068 pushl_cfi $do_overflow
23069 jmp error_code
23070 CFI_ENDPROC
23071-END(overflow)
23072+ENDPROC(overflow)
23073
23074 ENTRY(bounds)
23075 RING0_INT_FRAME
23076@@ -871,7 +1110,7 @@ ENTRY(bounds)
23077 pushl_cfi $do_bounds
23078 jmp error_code
23079 CFI_ENDPROC
23080-END(bounds)
23081+ENDPROC(bounds)
23082
23083 ENTRY(invalid_op)
23084 RING0_INT_FRAME
23085@@ -880,7 +1119,7 @@ ENTRY(invalid_op)
23086 pushl_cfi $do_invalid_op
23087 jmp error_code
23088 CFI_ENDPROC
23089-END(invalid_op)
23090+ENDPROC(invalid_op)
23091
23092 ENTRY(coprocessor_segment_overrun)
23093 RING0_INT_FRAME
23094@@ -889,7 +1128,7 @@ ENTRY(coprocessor_segment_overrun)
23095 pushl_cfi $do_coprocessor_segment_overrun
23096 jmp error_code
23097 CFI_ENDPROC
23098-END(coprocessor_segment_overrun)
23099+ENDPROC(coprocessor_segment_overrun)
23100
23101 ENTRY(invalid_TSS)
23102 RING0_EC_FRAME
23103@@ -897,7 +1136,7 @@ ENTRY(invalid_TSS)
23104 pushl_cfi $do_invalid_TSS
23105 jmp error_code
23106 CFI_ENDPROC
23107-END(invalid_TSS)
23108+ENDPROC(invalid_TSS)
23109
23110 ENTRY(segment_not_present)
23111 RING0_EC_FRAME
23112@@ -905,7 +1144,7 @@ ENTRY(segment_not_present)
23113 pushl_cfi $do_segment_not_present
23114 jmp error_code
23115 CFI_ENDPROC
23116-END(segment_not_present)
23117+ENDPROC(segment_not_present)
23118
23119 ENTRY(stack_segment)
23120 RING0_EC_FRAME
23121@@ -913,7 +1152,7 @@ ENTRY(stack_segment)
23122 pushl_cfi $do_stack_segment
23123 jmp error_code
23124 CFI_ENDPROC
23125-END(stack_segment)
23126+ENDPROC(stack_segment)
23127
23128 ENTRY(alignment_check)
23129 RING0_EC_FRAME
23130@@ -921,7 +1160,7 @@ ENTRY(alignment_check)
23131 pushl_cfi $do_alignment_check
23132 jmp error_code
23133 CFI_ENDPROC
23134-END(alignment_check)
23135+ENDPROC(alignment_check)
23136
23137 ENTRY(divide_error)
23138 RING0_INT_FRAME
23139@@ -930,7 +1169,7 @@ ENTRY(divide_error)
23140 pushl_cfi $do_divide_error
23141 jmp error_code
23142 CFI_ENDPROC
23143-END(divide_error)
23144+ENDPROC(divide_error)
23145
23146 #ifdef CONFIG_X86_MCE
23147 ENTRY(machine_check)
23148@@ -940,7 +1179,7 @@ ENTRY(machine_check)
23149 pushl_cfi machine_check_vector
23150 jmp error_code
23151 CFI_ENDPROC
23152-END(machine_check)
23153+ENDPROC(machine_check)
23154 #endif
23155
23156 ENTRY(spurious_interrupt_bug)
23157@@ -950,7 +1189,7 @@ ENTRY(spurious_interrupt_bug)
23158 pushl_cfi $do_spurious_interrupt_bug
23159 jmp error_code
23160 CFI_ENDPROC
23161-END(spurious_interrupt_bug)
23162+ENDPROC(spurious_interrupt_bug)
23163
23164 #ifdef CONFIG_XEN
23165 /* Xen doesn't set %esp to be precisely what the normal sysenter
23166@@ -1056,7 +1295,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
23167
23168 ENTRY(mcount)
23169 ret
23170-END(mcount)
23171+ENDPROC(mcount)
23172
23173 ENTRY(ftrace_caller)
23174 cmpl $0, function_trace_stop
23175@@ -1089,7 +1328,7 @@ ftrace_graph_call:
23176 .globl ftrace_stub
23177 ftrace_stub:
23178 ret
23179-END(ftrace_caller)
23180+ENDPROC(ftrace_caller)
23181
23182 ENTRY(ftrace_regs_caller)
23183 pushf /* push flags before compare (in cs location) */
23184@@ -1193,7 +1432,7 @@ trace:
23185 popl %ecx
23186 popl %eax
23187 jmp ftrace_stub
23188-END(mcount)
23189+ENDPROC(mcount)
23190 #endif /* CONFIG_DYNAMIC_FTRACE */
23191 #endif /* CONFIG_FUNCTION_TRACER */
23192
23193@@ -1211,7 +1450,7 @@ ENTRY(ftrace_graph_caller)
23194 popl %ecx
23195 popl %eax
23196 ret
23197-END(ftrace_graph_caller)
23198+ENDPROC(ftrace_graph_caller)
23199
23200 .globl return_to_handler
23201 return_to_handler:
23202@@ -1272,15 +1511,18 @@ error_code:
23203 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
23204 REG_TO_PTGS %ecx
23205 SET_KERNEL_GS %ecx
23206- movl $(__USER_DS), %ecx
23207+ movl $(__KERNEL_DS), %ecx
23208 movl %ecx, %ds
23209 movl %ecx, %es
23210+
23211+ pax_enter_kernel
23212+
23213 TRACE_IRQS_OFF
23214 movl %esp,%eax # pt_regs pointer
23215 call *%edi
23216 jmp ret_from_exception
23217 CFI_ENDPROC
23218-END(page_fault)
23219+ENDPROC(page_fault)
23220
23221 /*
23222 * Debug traps and NMI can happen at the one SYSENTER instruction
23223@@ -1323,7 +1565,7 @@ debug_stack_correct:
23224 call do_debug
23225 jmp ret_from_exception
23226 CFI_ENDPROC
23227-END(debug)
23228+ENDPROC(debug)
23229
23230 /*
23231 * NMI is doubly nasty. It can happen _while_ we're handling
23232@@ -1363,6 +1605,9 @@ nmi_stack_correct:
23233 xorl %edx,%edx # zero error code
23234 movl %esp,%eax # pt_regs pointer
23235 call do_nmi
23236+
23237+ pax_exit_kernel
23238+
23239 jmp restore_all_notrace
23240 CFI_ENDPROC
23241
23242@@ -1400,13 +1645,16 @@ nmi_espfix_stack:
23243 FIXUP_ESPFIX_STACK # %eax == %esp
23244 xorl %edx,%edx # zero error code
23245 call do_nmi
23246+
23247+ pax_exit_kernel
23248+
23249 RESTORE_REGS
23250 lss 12+4(%esp), %esp # back to espfix stack
23251 CFI_ADJUST_CFA_OFFSET -24
23252 jmp irq_return
23253 #endif
23254 CFI_ENDPROC
23255-END(nmi)
23256+ENDPROC(nmi)
23257
23258 ENTRY(int3)
23259 RING0_INT_FRAME
23260@@ -1419,14 +1667,14 @@ ENTRY(int3)
23261 call do_int3
23262 jmp ret_from_exception
23263 CFI_ENDPROC
23264-END(int3)
23265+ENDPROC(int3)
23266
23267 ENTRY(general_protection)
23268 RING0_EC_FRAME
23269 pushl_cfi $do_general_protection
23270 jmp error_code
23271 CFI_ENDPROC
23272-END(general_protection)
23273+ENDPROC(general_protection)
23274
23275 #ifdef CONFIG_KVM_GUEST
23276 ENTRY(async_page_fault)
23277@@ -1435,6 +1683,6 @@ ENTRY(async_page_fault)
23278 pushl_cfi $do_async_page_fault
23279 jmp error_code
23280 CFI_ENDPROC
23281-END(async_page_fault)
23282+ENDPROC(async_page_fault)
23283 #endif
23284
23285diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
23286index c844f08..966a50e 100644
23287--- a/arch/x86/kernel/entry_64.S
23288+++ b/arch/x86/kernel/entry_64.S
23289@@ -59,6 +59,8 @@
23290 #include <asm/smap.h>
23291 #include <asm/pgtable_types.h>
23292 #include <linux/err.h>
23293+#include <asm/pgtable.h>
23294+#include <asm/alternative-asm.h>
23295
23296 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
23297 #include <linux/elf-em.h>
23298@@ -81,6 +83,430 @@ ENTRY(native_usergs_sysret64)
23299 ENDPROC(native_usergs_sysret64)
23300 #endif /* CONFIG_PARAVIRT */
23301
23302+ .macro ljmpq sel, off
23303+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
23304+ .byte 0x48; ljmp *1234f(%rip)
23305+ .pushsection .rodata
23306+ .align 16
23307+ 1234: .quad \off; .word \sel
23308+ .popsection
23309+#else
23310+ pushq $\sel
23311+ pushq $\off
23312+ lretq
23313+#endif
23314+ .endm
23315+
23316+ .macro pax_enter_kernel
23317+ pax_set_fptr_mask
23318+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23319+ call pax_enter_kernel
23320+#endif
23321+ .endm
23322+
23323+ .macro pax_exit_kernel
23324+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23325+ call pax_exit_kernel
23326+#endif
23327+
23328+ .endm
23329+
23330+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23331+ENTRY(pax_enter_kernel)
23332+ pushq %rdi
23333+
23334+#ifdef CONFIG_PARAVIRT
23335+ PV_SAVE_REGS(CLBR_RDI)
23336+#endif
23337+
23338+#ifdef CONFIG_PAX_KERNEXEC
23339+ GET_CR0_INTO_RDI
23340+ bts $16,%rdi
23341+ jnc 3f
23342+ mov %cs,%edi
23343+ cmp $__KERNEL_CS,%edi
23344+ jnz 2f
23345+1:
23346+#endif
23347+
23348+#ifdef CONFIG_PAX_MEMORY_UDEREF
23349+ 661: jmp 111f
23350+ .pushsection .altinstr_replacement, "a"
23351+ 662: ASM_NOP2
23352+ .popsection
23353+ .pushsection .altinstructions, "a"
23354+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23355+ .popsection
23356+ GET_CR3_INTO_RDI
23357+ cmp $0,%dil
23358+ jnz 112f
23359+ mov $__KERNEL_DS,%edi
23360+ mov %edi,%ss
23361+ jmp 111f
23362+112: cmp $1,%dil
23363+ jz 113f
23364+ ud2
23365+113: sub $4097,%rdi
23366+ bts $63,%rdi
23367+ SET_RDI_INTO_CR3
23368+ mov $__UDEREF_KERNEL_DS,%edi
23369+ mov %edi,%ss
23370+111:
23371+#endif
23372+
23373+#ifdef CONFIG_PARAVIRT
23374+ PV_RESTORE_REGS(CLBR_RDI)
23375+#endif
23376+
23377+ popq %rdi
23378+ pax_force_retaddr
23379+ retq
23380+
23381+#ifdef CONFIG_PAX_KERNEXEC
23382+2: ljmpq __KERNEL_CS,1b
23383+3: ljmpq __KERNEXEC_KERNEL_CS,4f
23384+4: SET_RDI_INTO_CR0
23385+ jmp 1b
23386+#endif
23387+ENDPROC(pax_enter_kernel)
23388+
23389+ENTRY(pax_exit_kernel)
23390+ pushq %rdi
23391+
23392+#ifdef CONFIG_PARAVIRT
23393+ PV_SAVE_REGS(CLBR_RDI)
23394+#endif
23395+
23396+#ifdef CONFIG_PAX_KERNEXEC
23397+ mov %cs,%rdi
23398+ cmp $__KERNEXEC_KERNEL_CS,%edi
23399+ jz 2f
23400+ GET_CR0_INTO_RDI
23401+ bts $16,%rdi
23402+ jnc 4f
23403+1:
23404+#endif
23405+
23406+#ifdef CONFIG_PAX_MEMORY_UDEREF
23407+ 661: jmp 111f
23408+ .pushsection .altinstr_replacement, "a"
23409+ 662: ASM_NOP2
23410+ .popsection
23411+ .pushsection .altinstructions, "a"
23412+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23413+ .popsection
23414+ mov %ss,%edi
23415+ cmp $__UDEREF_KERNEL_DS,%edi
23416+ jnz 111f
23417+ GET_CR3_INTO_RDI
23418+ cmp $0,%dil
23419+ jz 112f
23420+ ud2
23421+112: add $4097,%rdi
23422+ bts $63,%rdi
23423+ SET_RDI_INTO_CR3
23424+ mov $__KERNEL_DS,%edi
23425+ mov %edi,%ss
23426+111:
23427+#endif
23428+
23429+#ifdef CONFIG_PARAVIRT
23430+ PV_RESTORE_REGS(CLBR_RDI);
23431+#endif
23432+
23433+ popq %rdi
23434+ pax_force_retaddr
23435+ retq
23436+
23437+#ifdef CONFIG_PAX_KERNEXEC
23438+2: GET_CR0_INTO_RDI
23439+ btr $16,%rdi
23440+ jnc 4f
23441+ ljmpq __KERNEL_CS,3f
23442+3: SET_RDI_INTO_CR0
23443+ jmp 1b
23444+4: ud2
23445+ jmp 4b
23446+#endif
23447+ENDPROC(pax_exit_kernel)
23448+#endif
23449+
23450+ .macro pax_enter_kernel_user
23451+ pax_set_fptr_mask
23452+#ifdef CONFIG_PAX_MEMORY_UDEREF
23453+ call pax_enter_kernel_user
23454+#endif
23455+ .endm
23456+
23457+ .macro pax_exit_kernel_user
23458+#ifdef CONFIG_PAX_MEMORY_UDEREF
23459+ call pax_exit_kernel_user
23460+#endif
23461+#ifdef CONFIG_PAX_RANDKSTACK
23462+ pushq %rax
23463+ pushq %r11
23464+ call pax_randomize_kstack
23465+ popq %r11
23466+ popq %rax
23467+#endif
23468+ .endm
23469+
23470+#ifdef CONFIG_PAX_MEMORY_UDEREF
23471+ENTRY(pax_enter_kernel_user)
23472+ pushq %rdi
23473+ pushq %rbx
23474+
23475+#ifdef CONFIG_PARAVIRT
23476+ PV_SAVE_REGS(CLBR_RDI)
23477+#endif
23478+
23479+ 661: jmp 111f
23480+ .pushsection .altinstr_replacement, "a"
23481+ 662: ASM_NOP2
23482+ .popsection
23483+ .pushsection .altinstructions, "a"
23484+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23485+ .popsection
23486+ GET_CR3_INTO_RDI
23487+ cmp $1,%dil
23488+ jnz 4f
23489+ sub $4097,%rdi
23490+ bts $63,%rdi
23491+ SET_RDI_INTO_CR3
23492+ jmp 3f
23493+111:
23494+
23495+ GET_CR3_INTO_RDI
23496+ mov %rdi,%rbx
23497+ add $__START_KERNEL_map,%rbx
23498+ sub phys_base(%rip),%rbx
23499+
23500+#ifdef CONFIG_PARAVIRT
23501+ cmpl $0, pv_info+PARAVIRT_enabled
23502+ jz 1f
23503+ pushq %rdi
23504+ i = 0
23505+ .rept USER_PGD_PTRS
23506+ mov i*8(%rbx),%rsi
23507+ mov $0,%sil
23508+ lea i*8(%rbx),%rdi
23509+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23510+ i = i + 1
23511+ .endr
23512+ popq %rdi
23513+ jmp 2f
23514+1:
23515+#endif
23516+
23517+ i = 0
23518+ .rept USER_PGD_PTRS
23519+ movb $0,i*8(%rbx)
23520+ i = i + 1
23521+ .endr
23522+
23523+2: SET_RDI_INTO_CR3
23524+
23525+#ifdef CONFIG_PAX_KERNEXEC
23526+ GET_CR0_INTO_RDI
23527+ bts $16,%rdi
23528+ SET_RDI_INTO_CR0
23529+#endif
23530+
23531+3:
23532+
23533+#ifdef CONFIG_PARAVIRT
23534+ PV_RESTORE_REGS(CLBR_RDI)
23535+#endif
23536+
23537+ popq %rbx
23538+ popq %rdi
23539+ pax_force_retaddr
23540+ retq
23541+4: ud2
23542+ENDPROC(pax_enter_kernel_user)
23543+
23544+ENTRY(pax_exit_kernel_user)
23545+ pushq %rdi
23546+ pushq %rbx
23547+
23548+#ifdef CONFIG_PARAVIRT
23549+ PV_SAVE_REGS(CLBR_RDI)
23550+#endif
23551+
23552+ GET_CR3_INTO_RDI
23553+ 661: jmp 1f
23554+ .pushsection .altinstr_replacement, "a"
23555+ 662: ASM_NOP2
23556+ .popsection
23557+ .pushsection .altinstructions, "a"
23558+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23559+ .popsection
23560+ cmp $0,%dil
23561+ jnz 3f
23562+ add $4097,%rdi
23563+ bts $63,%rdi
23564+ SET_RDI_INTO_CR3
23565+ jmp 2f
23566+1:
23567+
23568+ mov %rdi,%rbx
23569+
23570+#ifdef CONFIG_PAX_KERNEXEC
23571+ GET_CR0_INTO_RDI
23572+ btr $16,%rdi
23573+ jnc 3f
23574+ SET_RDI_INTO_CR0
23575+#endif
23576+
23577+ add $__START_KERNEL_map,%rbx
23578+ sub phys_base(%rip),%rbx
23579+
23580+#ifdef CONFIG_PARAVIRT
23581+ cmpl $0, pv_info+PARAVIRT_enabled
23582+ jz 1f
23583+ i = 0
23584+ .rept USER_PGD_PTRS
23585+ mov i*8(%rbx),%rsi
23586+ mov $0x67,%sil
23587+ lea i*8(%rbx),%rdi
23588+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23589+ i = i + 1
23590+ .endr
23591+ jmp 2f
23592+1:
23593+#endif
23594+
23595+ i = 0
23596+ .rept USER_PGD_PTRS
23597+ movb $0x67,i*8(%rbx)
23598+ i = i + 1
23599+ .endr
23600+2:
23601+
23602+#ifdef CONFIG_PARAVIRT
23603+ PV_RESTORE_REGS(CLBR_RDI)
23604+#endif
23605+
23606+ popq %rbx
23607+ popq %rdi
23608+ pax_force_retaddr
23609+ retq
23610+3: ud2
23611+ENDPROC(pax_exit_kernel_user)
23612+#endif
23613+
23614+ .macro pax_enter_kernel_nmi
23615+ pax_set_fptr_mask
23616+
23617+#ifdef CONFIG_PAX_KERNEXEC
23618+ GET_CR0_INTO_RDI
23619+ bts $16,%rdi
23620+ jc 110f
23621+ SET_RDI_INTO_CR0
23622+ or $2,%ebx
23623+110:
23624+#endif
23625+
23626+#ifdef CONFIG_PAX_MEMORY_UDEREF
23627+ 661: jmp 111f
23628+ .pushsection .altinstr_replacement, "a"
23629+ 662: ASM_NOP2
23630+ .popsection
23631+ .pushsection .altinstructions, "a"
23632+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23633+ .popsection
23634+ GET_CR3_INTO_RDI
23635+ cmp $0,%dil
23636+ jz 111f
23637+ sub $4097,%rdi
23638+ or $4,%ebx
23639+ bts $63,%rdi
23640+ SET_RDI_INTO_CR3
23641+ mov $__UDEREF_KERNEL_DS,%edi
23642+ mov %edi,%ss
23643+111:
23644+#endif
23645+ .endm
23646+
23647+ .macro pax_exit_kernel_nmi
23648+#ifdef CONFIG_PAX_KERNEXEC
23649+ btr $1,%ebx
23650+ jnc 110f
23651+ GET_CR0_INTO_RDI
23652+ btr $16,%rdi
23653+ SET_RDI_INTO_CR0
23654+110:
23655+#endif
23656+
23657+#ifdef CONFIG_PAX_MEMORY_UDEREF
23658+ btr $2,%ebx
23659+ jnc 111f
23660+ GET_CR3_INTO_RDI
23661+ add $4097,%rdi
23662+ bts $63,%rdi
23663+ SET_RDI_INTO_CR3
23664+ mov $__KERNEL_DS,%edi
23665+ mov %edi,%ss
23666+111:
23667+#endif
23668+ .endm
23669+
23670+ .macro pax_erase_kstack
23671+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23672+ call pax_erase_kstack
23673+#endif
23674+ .endm
23675+
23676+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23677+ENTRY(pax_erase_kstack)
23678+ pushq %rdi
23679+ pushq %rcx
23680+ pushq %rax
23681+ pushq %r11
23682+
23683+ GET_THREAD_INFO(%r11)
23684+ mov TI_lowest_stack(%r11), %rdi
23685+ mov $-0xBEEF, %rax
23686+ std
23687+
23688+1: mov %edi, %ecx
23689+ and $THREAD_SIZE_asm - 1, %ecx
23690+ shr $3, %ecx
23691+ repne scasq
23692+ jecxz 2f
23693+
23694+ cmp $2*8, %ecx
23695+ jc 2f
23696+
23697+ mov $2*8, %ecx
23698+ repe scasq
23699+ jecxz 2f
23700+ jne 1b
23701+
23702+2: cld
23703+ mov %esp, %ecx
23704+ sub %edi, %ecx
23705+
23706+ cmp $THREAD_SIZE_asm, %rcx
23707+ jb 3f
23708+ ud2
23709+3:
23710+
23711+ shr $3, %ecx
23712+ rep stosq
23713+
23714+ mov TI_task_thread_sp0(%r11), %rdi
23715+ sub $256, %rdi
23716+ mov %rdi, TI_lowest_stack(%r11)
23717+
23718+ popq %r11
23719+ popq %rax
23720+ popq %rcx
23721+ popq %rdi
23722+ pax_force_retaddr
23723+ ret
23724+ENDPROC(pax_erase_kstack)
23725+#endif
23726
23727 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
23728 #ifdef CONFIG_TRACE_IRQFLAGS
23729@@ -117,7 +543,7 @@ ENDPROC(native_usergs_sysret64)
23730 .endm
23731
23732 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
23733- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
23734+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
23735 jnc 1f
23736 TRACE_IRQS_ON_DEBUG
23737 1:
23738@@ -155,27 +581,6 @@ ENDPROC(native_usergs_sysret64)
23739 movq \tmp,R11+\offset(%rsp)
23740 .endm
23741
23742- .macro FAKE_STACK_FRAME child_rip
23743- /* push in order ss, rsp, eflags, cs, rip */
23744- xorl %eax, %eax
23745- pushq_cfi $__KERNEL_DS /* ss */
23746- /*CFI_REL_OFFSET ss,0*/
23747- pushq_cfi %rax /* rsp */
23748- CFI_REL_OFFSET rsp,0
23749- pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
23750- /*CFI_REL_OFFSET rflags,0*/
23751- pushq_cfi $__KERNEL_CS /* cs */
23752- /*CFI_REL_OFFSET cs,0*/
23753- pushq_cfi \child_rip /* rip */
23754- CFI_REL_OFFSET rip,0
23755- pushq_cfi %rax /* orig rax */
23756- .endm
23757-
23758- .macro UNFAKE_STACK_FRAME
23759- addq $8*6, %rsp
23760- CFI_ADJUST_CFA_OFFSET -(6*8)
23761- .endm
23762-
23763 /*
23764 * initial frame state for interrupts (and exceptions without error code)
23765 */
23766@@ -242,25 +647,26 @@ ENDPROC(native_usergs_sysret64)
23767 /* save partial stack frame */
23768 .macro SAVE_ARGS_IRQ
23769 cld
23770- /* start from rbp in pt_regs and jump over */
23771- movq_cfi rdi, (RDI-RBP)
23772- movq_cfi rsi, (RSI-RBP)
23773- movq_cfi rdx, (RDX-RBP)
23774- movq_cfi rcx, (RCX-RBP)
23775- movq_cfi rax, (RAX-RBP)
23776- movq_cfi r8, (R8-RBP)
23777- movq_cfi r9, (R9-RBP)
23778- movq_cfi r10, (R10-RBP)
23779- movq_cfi r11, (R11-RBP)
23780+ /* start from r15 in pt_regs and jump over */
23781+ movq_cfi rdi, RDI
23782+ movq_cfi rsi, RSI
23783+ movq_cfi rdx, RDX
23784+ movq_cfi rcx, RCX
23785+ movq_cfi rax, RAX
23786+ movq_cfi r8, R8
23787+ movq_cfi r9, R9
23788+ movq_cfi r10, R10
23789+ movq_cfi r11, R11
23790+ movq_cfi r12, R12
23791
23792 /* Save rbp so that we can unwind from get_irq_regs() */
23793- movq_cfi rbp, 0
23794+ movq_cfi rbp, RBP
23795
23796 /* Save previous stack value */
23797 movq %rsp, %rsi
23798
23799- leaq -RBP(%rsp),%rdi /* arg1 for handler */
23800- testl $3, CS-RBP(%rsi)
23801+ movq %rsp,%rdi /* arg1 for handler */
23802+ testb $3, CS(%rsi)
23803 je 1f
23804 SWAPGS
23805 /*
23806@@ -280,6 +686,18 @@ ENDPROC(native_usergs_sysret64)
23807 0x06 /* DW_OP_deref */, \
23808 0x08 /* DW_OP_const1u */, SS+8-RBP, \
23809 0x22 /* DW_OP_plus */
23810+
23811+#ifdef CONFIG_PAX_MEMORY_UDEREF
23812+ testb $3, CS(%rdi)
23813+ jnz 1f
23814+ pax_enter_kernel
23815+ jmp 2f
23816+1: pax_enter_kernel_user
23817+2:
23818+#else
23819+ pax_enter_kernel
23820+#endif
23821+
23822 /* We entered an interrupt context - irqs are off: */
23823 TRACE_IRQS_OFF
23824 .endm
23825@@ -309,9 +727,52 @@ ENTRY(save_paranoid)
23826 js 1f /* negative -> in kernel */
23827 SWAPGS
23828 xorl %ebx,%ebx
23829-1: ret
23830+1:
23831+#ifdef CONFIG_PAX_MEMORY_UDEREF
23832+ testb $3, CS+8(%rsp)
23833+ jnz 1f
23834+ pax_enter_kernel
23835+ jmp 2f
23836+1: pax_enter_kernel_user
23837+2:
23838+#else
23839+ pax_enter_kernel
23840+#endif
23841+ pax_force_retaddr
23842+ ret
23843 CFI_ENDPROC
23844-END(save_paranoid)
23845+ENDPROC(save_paranoid)
23846+
23847+ENTRY(save_paranoid_nmi)
23848+ XCPT_FRAME 1 RDI+8
23849+ cld
23850+ movq_cfi rdi, RDI+8
23851+ movq_cfi rsi, RSI+8
23852+ movq_cfi rdx, RDX+8
23853+ movq_cfi rcx, RCX+8
23854+ movq_cfi rax, RAX+8
23855+ movq_cfi r8, R8+8
23856+ movq_cfi r9, R9+8
23857+ movq_cfi r10, R10+8
23858+ movq_cfi r11, R11+8
23859+ movq_cfi rbx, RBX+8
23860+ movq_cfi rbp, RBP+8
23861+ movq_cfi r12, R12+8
23862+ movq_cfi r13, R13+8
23863+ movq_cfi r14, R14+8
23864+ movq_cfi r15, R15+8
23865+ movl $1,%ebx
23866+ movl $MSR_GS_BASE,%ecx
23867+ rdmsr
23868+ testl %edx,%edx
23869+ js 1f /* negative -> in kernel */
23870+ SWAPGS
23871+ xorl %ebx,%ebx
23872+1: pax_enter_kernel_nmi
23873+ pax_force_retaddr
23874+ ret
23875+ CFI_ENDPROC
23876+ENDPROC(save_paranoid_nmi)
23877
23878 /*
23879 * A newly forked process directly context switches into this address.
23880@@ -332,7 +793,7 @@ ENTRY(ret_from_fork)
23881
23882 RESTORE_REST
23883
23884- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23885+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23886 jz 1f
23887
23888 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
23889@@ -342,15 +803,13 @@ ENTRY(ret_from_fork)
23890 jmp ret_from_sys_call # go to the SYSRET fastpath
23891
23892 1:
23893- subq $REST_SKIP, %rsp # leave space for volatiles
23894- CFI_ADJUST_CFA_OFFSET REST_SKIP
23895 movq %rbp, %rdi
23896 call *%rbx
23897 movl $0, RAX(%rsp)
23898 RESTORE_REST
23899 jmp int_ret_from_sys_call
23900 CFI_ENDPROC
23901-END(ret_from_fork)
23902+ENDPROC(ret_from_fork)
23903
23904 /*
23905 * System call entry. Up to 6 arguments in registers are supported.
23906@@ -387,7 +846,7 @@ END(ret_from_fork)
23907 ENTRY(system_call)
23908 CFI_STARTPROC simple
23909 CFI_SIGNAL_FRAME
23910- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
23911+ CFI_DEF_CFA rsp,0
23912 CFI_REGISTER rip,rcx
23913 /*CFI_REGISTER rflags,r11*/
23914 SWAPGS_UNSAFE_STACK
23915@@ -400,16 +859,23 @@ GLOBAL(system_call_after_swapgs)
23916
23917 movq %rsp,PER_CPU_VAR(old_rsp)
23918 movq PER_CPU_VAR(kernel_stack),%rsp
23919+ SAVE_ARGS 8*6,0
23920+ pax_enter_kernel_user
23921+
23922+#ifdef CONFIG_PAX_RANDKSTACK
23923+ pax_erase_kstack
23924+#endif
23925+
23926 /*
23927 * No need to follow this irqs off/on section - it's straight
23928 * and short:
23929 */
23930 ENABLE_INTERRUPTS(CLBR_NONE)
23931- SAVE_ARGS 8,0
23932 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
23933 movq %rcx,RIP-ARGOFFSET(%rsp)
23934 CFI_REL_OFFSET rip,RIP-ARGOFFSET
23935- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23936+ GET_THREAD_INFO(%rcx)
23937+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
23938 jnz tracesys
23939 system_call_fastpath:
23940 #if __SYSCALL_MASK == ~0
23941@@ -433,10 +899,13 @@ sysret_check:
23942 LOCKDEP_SYS_EXIT
23943 DISABLE_INTERRUPTS(CLBR_NONE)
23944 TRACE_IRQS_OFF
23945- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
23946+ GET_THREAD_INFO(%rcx)
23947+ movl TI_flags(%rcx),%edx
23948 andl %edi,%edx
23949 jnz sysret_careful
23950 CFI_REMEMBER_STATE
23951+ pax_exit_kernel_user
23952+ pax_erase_kstack
23953 /*
23954 * sysretq will re-enable interrupts:
23955 */
23956@@ -495,6 +964,9 @@ auditsys:
23957 movq %rax,%rsi /* 2nd arg: syscall number */
23958 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
23959 call __audit_syscall_entry
23960+
23961+ pax_erase_kstack
23962+
23963 LOAD_ARGS 0 /* reload call-clobbered registers */
23964 jmp system_call_fastpath
23965
23966@@ -516,7 +988,7 @@ sysret_audit:
23967 /* Do syscall tracing */
23968 tracesys:
23969 #ifdef CONFIG_AUDITSYSCALL
23970- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23971+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
23972 jz auditsys
23973 #endif
23974 SAVE_REST
23975@@ -524,12 +996,15 @@ tracesys:
23976 FIXUP_TOP_OF_STACK %rdi
23977 movq %rsp,%rdi
23978 call syscall_trace_enter
23979+
23980+ pax_erase_kstack
23981+
23982 /*
23983 * Reload arg registers from stack in case ptrace changed them.
23984 * We don't reload %rax because syscall_trace_enter() returned
23985 * the value it wants us to use in the table lookup.
23986 */
23987- LOAD_ARGS ARGOFFSET, 1
23988+ LOAD_ARGS 1
23989 RESTORE_REST
23990 #if __SYSCALL_MASK == ~0
23991 cmpq $__NR_syscall_max,%rax
23992@@ -559,7 +1034,9 @@ GLOBAL(int_with_check)
23993 andl %edi,%edx
23994 jnz int_careful
23995 andl $~TS_COMPAT,TI_status(%rcx)
23996- jmp retint_swapgs
23997+ pax_exit_kernel_user
23998+ pax_erase_kstack
23999+ jmp retint_swapgs_pax
24000
24001 /* Either reschedule or signal or syscall exit tracking needed. */
24002 /* First do a reschedule test. */
24003@@ -605,7 +1082,7 @@ int_restore_rest:
24004 TRACE_IRQS_OFF
24005 jmp int_with_check
24006 CFI_ENDPROC
24007-END(system_call)
24008+ENDPROC(system_call)
24009
24010 .macro FORK_LIKE func
24011 ENTRY(stub_\func)
24012@@ -618,9 +1095,10 @@ ENTRY(stub_\func)
24013 DEFAULT_FRAME 0 8 /* offset 8: return address */
24014 call sys_\func
24015 RESTORE_TOP_OF_STACK %r11, 8
24016- ret $REST_SKIP /* pop extended registers */
24017+ pax_force_retaddr
24018+ ret
24019 CFI_ENDPROC
24020-END(stub_\func)
24021+ENDPROC(stub_\func)
24022 .endm
24023
24024 .macro FIXED_FRAME label,func
24025@@ -630,9 +1108,10 @@ ENTRY(\label)
24026 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
24027 call \func
24028 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
24029+ pax_force_retaddr
24030 ret
24031 CFI_ENDPROC
24032-END(\label)
24033+ENDPROC(\label)
24034 .endm
24035
24036 FORK_LIKE clone
24037@@ -640,19 +1119,6 @@ END(\label)
24038 FORK_LIKE vfork
24039 FIXED_FRAME stub_iopl, sys_iopl
24040
24041-ENTRY(ptregscall_common)
24042- DEFAULT_FRAME 1 8 /* offset 8: return address */
24043- RESTORE_TOP_OF_STACK %r11, 8
24044- movq_cfi_restore R15+8, r15
24045- movq_cfi_restore R14+8, r14
24046- movq_cfi_restore R13+8, r13
24047- movq_cfi_restore R12+8, r12
24048- movq_cfi_restore RBP+8, rbp
24049- movq_cfi_restore RBX+8, rbx
24050- ret $REST_SKIP /* pop extended registers */
24051- CFI_ENDPROC
24052-END(ptregscall_common)
24053-
24054 ENTRY(stub_execve)
24055 CFI_STARTPROC
24056 addq $8, %rsp
24057@@ -664,7 +1130,7 @@ ENTRY(stub_execve)
24058 RESTORE_REST
24059 jmp int_ret_from_sys_call
24060 CFI_ENDPROC
24061-END(stub_execve)
24062+ENDPROC(stub_execve)
24063
24064 /*
24065 * sigreturn is special because it needs to restore all registers on return.
24066@@ -681,7 +1147,7 @@ ENTRY(stub_rt_sigreturn)
24067 RESTORE_REST
24068 jmp int_ret_from_sys_call
24069 CFI_ENDPROC
24070-END(stub_rt_sigreturn)
24071+ENDPROC(stub_rt_sigreturn)
24072
24073 #ifdef CONFIG_X86_X32_ABI
24074 ENTRY(stub_x32_rt_sigreturn)
24075@@ -695,7 +1161,7 @@ ENTRY(stub_x32_rt_sigreturn)
24076 RESTORE_REST
24077 jmp int_ret_from_sys_call
24078 CFI_ENDPROC
24079-END(stub_x32_rt_sigreturn)
24080+ENDPROC(stub_x32_rt_sigreturn)
24081
24082 ENTRY(stub_x32_execve)
24083 CFI_STARTPROC
24084@@ -709,7 +1175,7 @@ ENTRY(stub_x32_execve)
24085 RESTORE_REST
24086 jmp int_ret_from_sys_call
24087 CFI_ENDPROC
24088-END(stub_x32_execve)
24089+ENDPROC(stub_x32_execve)
24090
24091 #endif
24092
24093@@ -746,7 +1212,7 @@ vector=vector+1
24094 2: jmp common_interrupt
24095 .endr
24096 CFI_ENDPROC
24097-END(irq_entries_start)
24098+ENDPROC(irq_entries_start)
24099
24100 .previous
24101 END(interrupt)
24102@@ -763,8 +1229,8 @@ END(interrupt)
24103 /* 0(%rsp): ~(interrupt number) */
24104 .macro interrupt func
24105 /* reserve pt_regs for scratch regs and rbp */
24106- subq $ORIG_RAX-RBP, %rsp
24107- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
24108+ subq $ORIG_RAX, %rsp
24109+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
24110 SAVE_ARGS_IRQ
24111 call \func
24112 .endm
24113@@ -787,14 +1253,14 @@ ret_from_intr:
24114
24115 /* Restore saved previous stack */
24116 popq %rsi
24117- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
24118- leaq ARGOFFSET-RBP(%rsi), %rsp
24119+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
24120+ movq %rsi, %rsp
24121 CFI_DEF_CFA_REGISTER rsp
24122- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
24123+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
24124
24125 exit_intr:
24126 GET_THREAD_INFO(%rcx)
24127- testl $3,CS-ARGOFFSET(%rsp)
24128+ testb $3,CS-ARGOFFSET(%rsp)
24129 je retint_kernel
24130
24131 /* Interrupt came from user space */
24132@@ -816,12 +1282,35 @@ retint_swapgs: /* return to user-space */
24133 * The iretq could re-enable interrupts:
24134 */
24135 DISABLE_INTERRUPTS(CLBR_ANY)
24136+ pax_exit_kernel_user
24137+retint_swapgs_pax:
24138 TRACE_IRQS_IRETQ
24139 SWAPGS
24140 jmp restore_args
24141
24142 retint_restore_args: /* return to kernel space */
24143 DISABLE_INTERRUPTS(CLBR_ANY)
24144+ pax_exit_kernel
24145+
24146+#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
24147+ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
24148+ * namely calling EFI runtime services with a phys mapping. We're
24149+ * starting off with NOPs and patch in the real instrumentation
24150+ * (BTS/OR) before starting any userland process; even before starting
24151+ * up the APs.
24152+ */
24153+ .pushsection .altinstr_replacement, "a"
24154+ 601: pax_force_retaddr (RIP-ARGOFFSET)
24155+ 602:
24156+ .popsection
24157+ 603: .fill 602b-601b, 1, 0x90
24158+ .pushsection .altinstructions, "a"
24159+ altinstruction_entry 603b, 601b, X86_FEATURE_ALWAYS, 602b-601b, 602b-601b
24160+ .popsection
24161+#else
24162+ pax_force_retaddr (RIP-ARGOFFSET)
24163+#endif
24164+
24165 /*
24166 * The iretq could re-enable interrupts:
24167 */
24168@@ -934,7 +1423,7 @@ ENTRY(retint_kernel)
24169 jmp exit_intr
24170 #endif
24171 CFI_ENDPROC
24172-END(common_interrupt)
24173+ENDPROC(common_interrupt)
24174
24175 /*
24176 * If IRET takes a fault on the espfix stack, then we
24177@@ -956,13 +1445,13 @@ __do_double_fault:
24178 cmpq $native_irq_return_iret,%rax
24179 jne do_double_fault /* This shouldn't happen... */
24180 movq PER_CPU_VAR(kernel_stack),%rax
24181- subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */
24182+ subq $(6*8),%rax /* Reset to original stack */
24183 movq %rax,RSP(%rdi)
24184 movq $0,(%rax) /* Missing (lost) #GP error code */
24185 movq $general_protection,RIP(%rdi)
24186 retq
24187 CFI_ENDPROC
24188-END(__do_double_fault)
24189+ENDPROC(__do_double_fault)
24190 #else
24191 # define __do_double_fault do_double_fault
24192 #endif
24193@@ -979,7 +1468,7 @@ ENTRY(\sym)
24194 interrupt \do_sym
24195 jmp ret_from_intr
24196 CFI_ENDPROC
24197-END(\sym)
24198+ENDPROC(\sym)
24199 .endm
24200
24201 #ifdef CONFIG_TRACING
24202@@ -1052,7 +1541,7 @@ apicinterrupt IRQ_WORK_VECTOR \
24203 /*
24204 * Exception entry points.
24205 */
24206-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
24207+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
24208
24209 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
24210 ENTRY(\sym)
24211@@ -1103,6 +1592,12 @@ ENTRY(\sym)
24212 .endif
24213
24214 .if \shift_ist != -1
24215+#ifdef CONFIG_SMP
24216+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
24217+ lea init_tss(%r13), %r13
24218+#else
24219+ lea init_tss(%rip), %r13
24220+#endif
24221 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
24222 .endif
24223
24224@@ -1119,7 +1614,7 @@ ENTRY(\sym)
24225 .endif
24226
24227 CFI_ENDPROC
24228-END(\sym)
24229+ENDPROC(\sym)
24230 .endm
24231
24232 #ifdef CONFIG_TRACING
24233@@ -1160,9 +1655,10 @@ gs_change:
24234 2: mfence /* workaround */
24235 SWAPGS
24236 popfq_cfi
24237+ pax_force_retaddr
24238 ret
24239 CFI_ENDPROC
24240-END(native_load_gs_index)
24241+ENDPROC(native_load_gs_index)
24242
24243 _ASM_EXTABLE(gs_change,bad_gs)
24244 .section .fixup,"ax"
24245@@ -1190,9 +1686,10 @@ ENTRY(do_softirq_own_stack)
24246 CFI_DEF_CFA_REGISTER rsp
24247 CFI_ADJUST_CFA_OFFSET -8
24248 decl PER_CPU_VAR(irq_count)
24249+ pax_force_retaddr
24250 ret
24251 CFI_ENDPROC
24252-END(do_softirq_own_stack)
24253+ENDPROC(do_softirq_own_stack)
24254
24255 #ifdef CONFIG_XEN
24256 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
24257@@ -1230,7 +1727,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
24258 decl PER_CPU_VAR(irq_count)
24259 jmp error_exit
24260 CFI_ENDPROC
24261-END(xen_do_hypervisor_callback)
24262+ENDPROC(xen_do_hypervisor_callback)
24263
24264 /*
24265 * Hypervisor uses this for application faults while it executes.
24266@@ -1289,7 +1786,7 @@ ENTRY(xen_failsafe_callback)
24267 SAVE_ALL
24268 jmp error_exit
24269 CFI_ENDPROC
24270-END(xen_failsafe_callback)
24271+ENDPROC(xen_failsafe_callback)
24272
24273 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
24274 xen_hvm_callback_vector xen_evtchn_do_upcall
24275@@ -1336,18 +1833,33 @@ ENTRY(paranoid_exit)
24276 DEFAULT_FRAME
24277 DISABLE_INTERRUPTS(CLBR_NONE)
24278 TRACE_IRQS_OFF_DEBUG
24279- testl %ebx,%ebx /* swapgs needed? */
24280+ testl $1,%ebx /* swapgs needed? */
24281 jnz paranoid_restore
24282- testl $3,CS(%rsp)
24283+ testb $3,CS(%rsp)
24284 jnz paranoid_userspace
24285+#ifdef CONFIG_PAX_MEMORY_UDEREF
24286+ pax_exit_kernel
24287+ TRACE_IRQS_IRETQ 0
24288+ SWAPGS_UNSAFE_STACK
24289+ RESTORE_ALL 8
24290+ pax_force_retaddr_bts
24291+ jmp irq_return
24292+#endif
24293 paranoid_swapgs:
24294+#ifdef CONFIG_PAX_MEMORY_UDEREF
24295+ pax_exit_kernel_user
24296+#else
24297+ pax_exit_kernel
24298+#endif
24299 TRACE_IRQS_IRETQ 0
24300 SWAPGS_UNSAFE_STACK
24301 RESTORE_ALL 8
24302 jmp irq_return
24303 paranoid_restore:
24304+ pax_exit_kernel
24305 TRACE_IRQS_IRETQ_DEBUG 0
24306 RESTORE_ALL 8
24307+ pax_force_retaddr_bts
24308 jmp irq_return
24309 paranoid_userspace:
24310 GET_THREAD_INFO(%rcx)
24311@@ -1376,7 +1888,7 @@ paranoid_schedule:
24312 TRACE_IRQS_OFF
24313 jmp paranoid_userspace
24314 CFI_ENDPROC
24315-END(paranoid_exit)
24316+ENDPROC(paranoid_exit)
24317
24318 /*
24319 * Exception entry point. This expects an error code/orig_rax on the stack.
24320@@ -1403,12 +1915,23 @@ ENTRY(error_entry)
24321 movq_cfi r14, R14+8
24322 movq_cfi r15, R15+8
24323 xorl %ebx,%ebx
24324- testl $3,CS+8(%rsp)
24325+ testb $3,CS+8(%rsp)
24326 je error_kernelspace
24327 error_swapgs:
24328 SWAPGS
24329 error_sti:
24330+#ifdef CONFIG_PAX_MEMORY_UDEREF
24331+ testb $3, CS+8(%rsp)
24332+ jnz 1f
24333+ pax_enter_kernel
24334+ jmp 2f
24335+1: pax_enter_kernel_user
24336+2:
24337+#else
24338+ pax_enter_kernel
24339+#endif
24340 TRACE_IRQS_OFF
24341+ pax_force_retaddr
24342 ret
24343
24344 /*
24345@@ -1435,7 +1958,7 @@ bstep_iret:
24346 movq %rcx,RIP+8(%rsp)
24347 jmp error_swapgs
24348 CFI_ENDPROC
24349-END(error_entry)
24350+ENDPROC(error_entry)
24351
24352
24353 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
24354@@ -1446,7 +1969,7 @@ ENTRY(error_exit)
24355 DISABLE_INTERRUPTS(CLBR_NONE)
24356 TRACE_IRQS_OFF
24357 GET_THREAD_INFO(%rcx)
24358- testl %eax,%eax
24359+ testl $1,%eax
24360 jne retint_kernel
24361 LOCKDEP_SYS_EXIT_IRQ
24362 movl TI_flags(%rcx),%edx
24363@@ -1455,7 +1978,7 @@ ENTRY(error_exit)
24364 jnz retint_careful
24365 jmp retint_swapgs
24366 CFI_ENDPROC
24367-END(error_exit)
24368+ENDPROC(error_exit)
24369
24370 /*
24371 * Test if a given stack is an NMI stack or not.
24372@@ -1513,9 +2036,11 @@ ENTRY(nmi)
24373 * If %cs was not the kernel segment, then the NMI triggered in user
24374 * space, which means it is definitely not nested.
24375 */
24376+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
24377+ je 1f
24378 cmpl $__KERNEL_CS, 16(%rsp)
24379 jne first_nmi
24380-
24381+1:
24382 /*
24383 * Check the special variable on the stack to see if NMIs are
24384 * executing.
24385@@ -1549,8 +2074,7 @@ nested_nmi:
24386
24387 1:
24388 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
24389- leaq -1*8(%rsp), %rdx
24390- movq %rdx, %rsp
24391+ subq $8, %rsp
24392 CFI_ADJUST_CFA_OFFSET 1*8
24393 leaq -10*8(%rsp), %rdx
24394 pushq_cfi $__KERNEL_DS
24395@@ -1568,6 +2092,7 @@ nested_nmi_out:
24396 CFI_RESTORE rdx
24397
24398 /* No need to check faults here */
24399+# pax_force_retaddr_bts
24400 INTERRUPT_RETURN
24401
24402 CFI_RESTORE_STATE
24403@@ -1664,13 +2189,13 @@ end_repeat_nmi:
24404 subq $ORIG_RAX-R15, %rsp
24405 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
24406 /*
24407- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
24408+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
24409 * as we should not be calling schedule in NMI context.
24410 * Even with normal interrupts enabled. An NMI should not be
24411 * setting NEED_RESCHED or anything that normal interrupts and
24412 * exceptions might do.
24413 */
24414- call save_paranoid
24415+ call save_paranoid_nmi
24416 DEFAULT_FRAME 0
24417
24418 /*
24419@@ -1680,9 +2205,9 @@ end_repeat_nmi:
24420 * NMI itself takes a page fault, the page fault that was preempted
24421 * will read the information from the NMI page fault and not the
24422 * origin fault. Save it off and restore it if it changes.
24423- * Use the r12 callee-saved register.
24424+ * Use the r13 callee-saved register.
24425 */
24426- movq %cr2, %r12
24427+ movq %cr2, %r13
24428
24429 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
24430 movq %rsp,%rdi
24431@@ -1691,29 +2216,34 @@ end_repeat_nmi:
24432
24433 /* Did the NMI take a page fault? Restore cr2 if it did */
24434 movq %cr2, %rcx
24435- cmpq %rcx, %r12
24436+ cmpq %rcx, %r13
24437 je 1f
24438- movq %r12, %cr2
24439+ movq %r13, %cr2
24440 1:
24441
24442- testl %ebx,%ebx /* swapgs needed? */
24443+ testl $1,%ebx /* swapgs needed? */
24444 jnz nmi_restore
24445 nmi_swapgs:
24446 SWAPGS_UNSAFE_STACK
24447 nmi_restore:
24448+ pax_exit_kernel_nmi
24449 /* Pop the extra iret frame at once */
24450 RESTORE_ALL 6*8
24451+ testb $3, 8(%rsp)
24452+ jnz 1f
24453+ pax_force_retaddr_bts
24454+1:
24455
24456 /* Clear the NMI executing stack variable */
24457 movq $0, 5*8(%rsp)
24458 jmp irq_return
24459 CFI_ENDPROC
24460-END(nmi)
24461+ENDPROC(nmi)
24462
24463 ENTRY(ignore_sysret)
24464 CFI_STARTPROC
24465 mov $-ENOSYS,%eax
24466 sysret
24467 CFI_ENDPROC
24468-END(ignore_sysret)
24469+ENDPROC(ignore_sysret)
24470
24471diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
24472index 94d857f..bf1f0bf 100644
24473--- a/arch/x86/kernel/espfix_64.c
24474+++ b/arch/x86/kernel/espfix_64.c
24475@@ -197,7 +197,7 @@ void init_espfix_ap(void)
24476 set_pte(&pte_p[n*PTE_STRIDE], pte);
24477
24478 /* Job is done for this CPU and any CPU which shares this page */
24479- ACCESS_ONCE(espfix_pages[page]) = stack_page;
24480+ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
24481
24482 unlock_done:
24483 mutex_unlock(&espfix_init_mutex);
24484diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
24485index cbc4a91..b38ee45 100644
24486--- a/arch/x86/kernel/ftrace.c
24487+++ b/arch/x86/kernel/ftrace.c
24488@@ -88,7 +88,7 @@ static unsigned long text_ip_addr(unsigned long ip)
24489 * kernel identity mapping to modify code.
24490 */
24491 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
24492- ip = (unsigned long)__va(__pa_symbol(ip));
24493+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
24494
24495 return ip;
24496 }
24497@@ -104,6 +104,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
24498 {
24499 unsigned char replaced[MCOUNT_INSN_SIZE];
24500
24501+ ip = ktla_ktva(ip);
24502+
24503 /*
24504 * Note: Due to modules and __init, code can
24505 * disappear and change, we need to protect against faulting
24506@@ -229,7 +231,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
24507 unsigned char old[MCOUNT_INSN_SIZE];
24508 int ret;
24509
24510- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
24511+ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
24512
24513 ftrace_update_func = ip;
24514 /* Make sure the breakpoints see the ftrace_update_func update */
24515@@ -310,7 +312,7 @@ static int add_break(unsigned long ip, const char *old)
24516 unsigned char replaced[MCOUNT_INSN_SIZE];
24517 unsigned char brk = BREAKPOINT_INSTRUCTION;
24518
24519- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
24520+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
24521 return -EFAULT;
24522
24523 /* Make sure it is what we expect it to be */
24524diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
24525index eda1a86..8f6df48 100644
24526--- a/arch/x86/kernel/head64.c
24527+++ b/arch/x86/kernel/head64.c
24528@@ -67,12 +67,12 @@ again:
24529 pgd = *pgd_p;
24530
24531 /*
24532- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
24533- * critical -- __PAGE_OFFSET would point us back into the dynamic
24534+ * The use of __early_va rather than __va here is critical:
24535+ * __va would point us back into the dynamic
24536 * range and we might end up looping forever...
24537 */
24538 if (pgd)
24539- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24540+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
24541 else {
24542 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24543 reset_early_page_tables();
24544@@ -82,13 +82,13 @@ again:
24545 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
24546 for (i = 0; i < PTRS_PER_PUD; i++)
24547 pud_p[i] = 0;
24548- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24549+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
24550 }
24551 pud_p += pud_index(address);
24552 pud = *pud_p;
24553
24554 if (pud)
24555- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24556+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
24557 else {
24558 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24559 reset_early_page_tables();
24560@@ -98,7 +98,7 @@ again:
24561 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
24562 for (i = 0; i < PTRS_PER_PMD; i++)
24563 pmd_p[i] = 0;
24564- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24565+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
24566 }
24567 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
24568 pmd_p[pmd_index(address)] = pmd;
24569@@ -175,7 +175,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
24570 if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
24571 early_printk("Kernel alive\n");
24572
24573- clear_page(init_level4_pgt);
24574 /* set init_level4_pgt kernel high mapping*/
24575 init_level4_pgt[511] = early_level4_pgt[511];
24576
24577diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
24578index f36bd42..0ab4474 100644
24579--- a/arch/x86/kernel/head_32.S
24580+++ b/arch/x86/kernel/head_32.S
24581@@ -26,6 +26,12 @@
24582 /* Physical address */
24583 #define pa(X) ((X) - __PAGE_OFFSET)
24584
24585+#ifdef CONFIG_PAX_KERNEXEC
24586+#define ta(X) (X)
24587+#else
24588+#define ta(X) ((X) - __PAGE_OFFSET)
24589+#endif
24590+
24591 /*
24592 * References to members of the new_cpu_data structure.
24593 */
24594@@ -55,11 +61,7 @@
24595 * and small than max_low_pfn, otherwise will waste some page table entries
24596 */
24597
24598-#if PTRS_PER_PMD > 1
24599-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
24600-#else
24601-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
24602-#endif
24603+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
24604
24605 /* Number of possible pages in the lowmem region */
24606 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
24607@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
24608 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24609
24610 /*
24611+ * Real beginning of normal "text" segment
24612+ */
24613+ENTRY(stext)
24614+ENTRY(_stext)
24615+
24616+/*
24617 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
24618 * %esi points to the real-mode code as a 32-bit pointer.
24619 * CS and DS must be 4 GB flat segments, but we don't depend on
24620@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24621 * can.
24622 */
24623 __HEAD
24624+
24625+#ifdef CONFIG_PAX_KERNEXEC
24626+ jmp startup_32
24627+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
24628+.fill PAGE_SIZE-5,1,0xcc
24629+#endif
24630+
24631 ENTRY(startup_32)
24632 movl pa(stack_start),%ecx
24633
24634@@ -106,6 +121,59 @@ ENTRY(startup_32)
24635 2:
24636 leal -__PAGE_OFFSET(%ecx),%esp
24637
24638+#ifdef CONFIG_SMP
24639+ movl $pa(cpu_gdt_table),%edi
24640+ movl $__per_cpu_load,%eax
24641+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
24642+ rorl $16,%eax
24643+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
24644+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
24645+ movl $__per_cpu_end - 1,%eax
24646+ subl $__per_cpu_start,%eax
24647+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
24648+#endif
24649+
24650+#ifdef CONFIG_PAX_MEMORY_UDEREF
24651+ movl $NR_CPUS,%ecx
24652+ movl $pa(cpu_gdt_table),%edi
24653+1:
24654+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
24655+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
24656+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
24657+ addl $PAGE_SIZE_asm,%edi
24658+ loop 1b
24659+#endif
24660+
24661+#ifdef CONFIG_PAX_KERNEXEC
24662+ movl $pa(boot_gdt),%edi
24663+ movl $__LOAD_PHYSICAL_ADDR,%eax
24664+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
24665+ rorl $16,%eax
24666+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
24667+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
24668+ rorl $16,%eax
24669+
24670+ ljmp $(__BOOT_CS),$1f
24671+1:
24672+
24673+ movl $NR_CPUS,%ecx
24674+ movl $pa(cpu_gdt_table),%edi
24675+ addl $__PAGE_OFFSET,%eax
24676+1:
24677+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
24678+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
24679+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
24680+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
24681+ rorl $16,%eax
24682+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
24683+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
24684+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
24685+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
24686+ rorl $16,%eax
24687+ addl $PAGE_SIZE_asm,%edi
24688+ loop 1b
24689+#endif
24690+
24691 /*
24692 * Clear BSS first so that there are no surprises...
24693 */
24694@@ -201,8 +269,11 @@ ENTRY(startup_32)
24695 movl %eax, pa(max_pfn_mapped)
24696
24697 /* Do early initialization of the fixmap area */
24698- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24699- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
24700+#ifdef CONFIG_COMPAT_VDSO
24701+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
24702+#else
24703+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
24704+#endif
24705 #else /* Not PAE */
24706
24707 page_pde_offset = (__PAGE_OFFSET >> 20);
24708@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24709 movl %eax, pa(max_pfn_mapped)
24710
24711 /* Do early initialization of the fixmap area */
24712- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24713- movl %eax,pa(initial_page_table+0xffc)
24714+#ifdef CONFIG_COMPAT_VDSO
24715+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
24716+#else
24717+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
24718+#endif
24719 #endif
24720
24721 #ifdef CONFIG_PARAVIRT
24722@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24723 cmpl $num_subarch_entries, %eax
24724 jae bad_subarch
24725
24726- movl pa(subarch_entries)(,%eax,4), %eax
24727- subl $__PAGE_OFFSET, %eax
24728- jmp *%eax
24729+ jmp *pa(subarch_entries)(,%eax,4)
24730
24731 bad_subarch:
24732 WEAK(lguest_entry)
24733@@ -261,10 +333,10 @@ WEAK(xen_entry)
24734 __INITDATA
24735
24736 subarch_entries:
24737- .long default_entry /* normal x86/PC */
24738- .long lguest_entry /* lguest hypervisor */
24739- .long xen_entry /* Xen hypervisor */
24740- .long default_entry /* Moorestown MID */
24741+ .long ta(default_entry) /* normal x86/PC */
24742+ .long ta(lguest_entry) /* lguest hypervisor */
24743+ .long ta(xen_entry) /* Xen hypervisor */
24744+ .long ta(default_entry) /* Moorestown MID */
24745 num_subarch_entries = (. - subarch_entries) / 4
24746 .previous
24747 #else
24748@@ -354,6 +426,7 @@ default_entry:
24749 movl pa(mmu_cr4_features),%eax
24750 movl %eax,%cr4
24751
24752+#ifdef CONFIG_X86_PAE
24753 testb $X86_CR4_PAE, %al # check if PAE is enabled
24754 jz enable_paging
24755
24756@@ -382,6 +455,9 @@ default_entry:
24757 /* Make changes effective */
24758 wrmsr
24759
24760+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
24761+#endif
24762+
24763 enable_paging:
24764
24765 /*
24766@@ -449,14 +525,20 @@ is486:
24767 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
24768 movl %eax,%ss # after changing gdt.
24769
24770- movl $(__USER_DS),%eax # DS/ES contains default USER segment
24771+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
24772 movl %eax,%ds
24773 movl %eax,%es
24774
24775 movl $(__KERNEL_PERCPU), %eax
24776 movl %eax,%fs # set this cpu's percpu
24777
24778+#ifdef CONFIG_CC_STACKPROTECTOR
24779 movl $(__KERNEL_STACK_CANARY),%eax
24780+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
24781+ movl $(__USER_DS),%eax
24782+#else
24783+ xorl %eax,%eax
24784+#endif
24785 movl %eax,%gs
24786
24787 xorl %eax,%eax # Clear LDT
24788@@ -512,8 +594,11 @@ setup_once:
24789 * relocation. Manually set base address in stack canary
24790 * segment descriptor.
24791 */
24792- movl $gdt_page,%eax
24793+ movl $cpu_gdt_table,%eax
24794 movl $stack_canary,%ecx
24795+#ifdef CONFIG_SMP
24796+ addl $__per_cpu_load,%ecx
24797+#endif
24798 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
24799 shrl $16, %ecx
24800 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
24801@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
24802 cmpl $2,(%esp) # X86_TRAP_NMI
24803 je is_nmi # Ignore NMI
24804
24805- cmpl $2,%ss:early_recursion_flag
24806+ cmpl $1,%ss:early_recursion_flag
24807 je hlt_loop
24808 incl %ss:early_recursion_flag
24809
24810@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
24811 pushl (20+6*4)(%esp) /* trapno */
24812 pushl $fault_msg
24813 call printk
24814-#endif
24815 call dump_stack
24816+#endif
24817 hlt_loop:
24818 hlt
24819 jmp hlt_loop
24820@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
24821 /* This is the default interrupt "handler" :-) */
24822 ALIGN
24823 ignore_int:
24824- cld
24825 #ifdef CONFIG_PRINTK
24826+ cmpl $2,%ss:early_recursion_flag
24827+ je hlt_loop
24828+ incl %ss:early_recursion_flag
24829+ cld
24830 pushl %eax
24831 pushl %ecx
24832 pushl %edx
24833@@ -617,9 +705,6 @@ ignore_int:
24834 movl $(__KERNEL_DS),%eax
24835 movl %eax,%ds
24836 movl %eax,%es
24837- cmpl $2,early_recursion_flag
24838- je hlt_loop
24839- incl early_recursion_flag
24840 pushl 16(%esp)
24841 pushl 24(%esp)
24842 pushl 32(%esp)
24843@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
24844 /*
24845 * BSS section
24846 */
24847-__PAGE_ALIGNED_BSS
24848- .align PAGE_SIZE
24849 #ifdef CONFIG_X86_PAE
24850+.section .initial_pg_pmd,"a",@progbits
24851 initial_pg_pmd:
24852 .fill 1024*KPMDS,4,0
24853 #else
24854+.section .initial_page_table,"a",@progbits
24855 ENTRY(initial_page_table)
24856 .fill 1024,4,0
24857 #endif
24858+.section .initial_pg_fixmap,"a",@progbits
24859 initial_pg_fixmap:
24860 .fill 1024,4,0
24861+.section .empty_zero_page,"a",@progbits
24862 ENTRY(empty_zero_page)
24863 .fill 4096,1,0
24864+.section .swapper_pg_dir,"a",@progbits
24865 ENTRY(swapper_pg_dir)
24866+#ifdef CONFIG_X86_PAE
24867+ .fill 4,8,0
24868+#else
24869 .fill 1024,4,0
24870+#endif
24871
24872 /*
24873 * This starts the data section.
24874 */
24875 #ifdef CONFIG_X86_PAE
24876-__PAGE_ALIGNED_DATA
24877- /* Page-aligned for the benefit of paravirt? */
24878- .align PAGE_SIZE
24879+.section .initial_page_table,"a",@progbits
24880 ENTRY(initial_page_table)
24881 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
24882 # if KPMDS == 3
24883@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
24884 # error "Kernel PMDs should be 1, 2 or 3"
24885 # endif
24886 .align PAGE_SIZE /* needs to be page-sized too */
24887+
24888+#ifdef CONFIG_PAX_PER_CPU_PGD
24889+ENTRY(cpu_pgd)
24890+ .rept 2*NR_CPUS
24891+ .fill 4,8,0
24892+ .endr
24893+#endif
24894+
24895 #endif
24896
24897 .data
24898 .balign 4
24899 ENTRY(stack_start)
24900- .long init_thread_union+THREAD_SIZE
24901+ .long init_thread_union+THREAD_SIZE-8
24902
24903 __INITRODATA
24904 int_msg:
24905@@ -727,7 +825,7 @@ fault_msg:
24906 * segment size, and 32-bit linear address value:
24907 */
24908
24909- .data
24910+.section .rodata,"a",@progbits
24911 .globl boot_gdt_descr
24912 .globl idt_descr
24913
24914@@ -736,7 +834,7 @@ fault_msg:
24915 .word 0 # 32 bit align gdt_desc.address
24916 boot_gdt_descr:
24917 .word __BOOT_DS+7
24918- .long boot_gdt - __PAGE_OFFSET
24919+ .long pa(boot_gdt)
24920
24921 .word 0 # 32-bit align idt_desc.address
24922 idt_descr:
24923@@ -747,7 +845,7 @@ idt_descr:
24924 .word 0 # 32 bit align gdt_desc.address
24925 ENTRY(early_gdt_descr)
24926 .word GDT_ENTRIES*8-1
24927- .long gdt_page /* Overwritten for secondary CPUs */
24928+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
24929
24930 /*
24931 * The boot_gdt must mirror the equivalent in setup.S and is
24932@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
24933 .align L1_CACHE_BYTES
24934 ENTRY(boot_gdt)
24935 .fill GDT_ENTRY_BOOT_CS,8,0
24936- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
24937- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
24938+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
24939+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
24940+
24941+ .align PAGE_SIZE_asm
24942+ENTRY(cpu_gdt_table)
24943+ .rept NR_CPUS
24944+ .quad 0x0000000000000000 /* NULL descriptor */
24945+ .quad 0x0000000000000000 /* 0x0b reserved */
24946+ .quad 0x0000000000000000 /* 0x13 reserved */
24947+ .quad 0x0000000000000000 /* 0x1b reserved */
24948+
24949+#ifdef CONFIG_PAX_KERNEXEC
24950+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
24951+#else
24952+ .quad 0x0000000000000000 /* 0x20 unused */
24953+#endif
24954+
24955+ .quad 0x0000000000000000 /* 0x28 unused */
24956+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
24957+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
24958+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
24959+ .quad 0x0000000000000000 /* 0x4b reserved */
24960+ .quad 0x0000000000000000 /* 0x53 reserved */
24961+ .quad 0x0000000000000000 /* 0x5b reserved */
24962+
24963+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
24964+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
24965+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
24966+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
24967+
24968+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
24969+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
24970+
24971+ /*
24972+ * Segments used for calling PnP BIOS have byte granularity.
24973+ * The code segments and data segments have fixed 64k limits,
24974+ * the transfer segment sizes are set at run time.
24975+ */
24976+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
24977+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
24978+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
24979+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
24980+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
24981+
24982+ /*
24983+ * The APM segments have byte granularity and their bases
24984+ * are set at run time. All have 64k limits.
24985+ */
24986+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
24987+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
24988+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
24989+
24990+ .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */
24991+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
24992+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
24993+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
24994+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
24995+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
24996+
24997+ /* Be sure this is zeroed to avoid false validations in Xen */
24998+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
24999+ .endr
25000diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
25001index a468c0a..c7dec74 100644
25002--- a/arch/x86/kernel/head_64.S
25003+++ b/arch/x86/kernel/head_64.S
25004@@ -20,6 +20,8 @@
25005 #include <asm/processor-flags.h>
25006 #include <asm/percpu.h>
25007 #include <asm/nops.h>
25008+#include <asm/cpufeature.h>
25009+#include <asm/alternative-asm.h>
25010
25011 #ifdef CONFIG_PARAVIRT
25012 #include <asm/asm-offsets.h>
25013@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
25014 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
25015 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
25016 L3_START_KERNEL = pud_index(__START_KERNEL_map)
25017+L4_VMALLOC_START = pgd_index(VMALLOC_START)
25018+L3_VMALLOC_START = pud_index(VMALLOC_START)
25019+L4_VMALLOC_END = pgd_index(VMALLOC_END)
25020+L3_VMALLOC_END = pud_index(VMALLOC_END)
25021+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
25022+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
25023
25024 .text
25025 __HEAD
25026@@ -89,11 +97,24 @@ startup_64:
25027 * Fixup the physical addresses in the page table
25028 */
25029 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
25030+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
25031+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
25032+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
25033+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
25034+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
25035
25036- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
25037- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
25038+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
25039+#ifndef CONFIG_XEN
25040+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
25041+#endif
25042+
25043+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
25044+
25045+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
25046+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
25047
25048 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
25049+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
25050
25051 /*
25052 * Set up the identity mapping for the switchover. These
25053@@ -177,8 +198,8 @@ ENTRY(secondary_startup_64)
25054 movq $(init_level4_pgt - __START_KERNEL_map), %rax
25055 1:
25056
25057- /* Enable PAE mode and PGE */
25058- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
25059+ /* Enable PAE mode and PSE/PGE */
25060+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
25061 movq %rcx, %cr4
25062
25063 /* Setup early boot stage 4 level pagetables. */
25064@@ -199,10 +220,19 @@ ENTRY(secondary_startup_64)
25065 movl $MSR_EFER, %ecx
25066 rdmsr
25067 btsl $_EFER_SCE, %eax /* Enable System Call */
25068- btl $20,%edi /* No Execute supported? */
25069+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
25070 jnc 1f
25071 btsl $_EFER_NX, %eax
25072 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
25073+#ifndef CONFIG_EFI
25074+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
25075+#endif
25076+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
25077+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
25078+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
25079+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
25080+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
25081+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
25082 1: wrmsr /* Make changes effective */
25083
25084 /* Setup cr0 */
25085@@ -282,6 +312,7 @@ ENTRY(secondary_startup_64)
25086 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
25087 * address given in m16:64.
25088 */
25089+ pax_set_fptr_mask
25090 movq initial_code(%rip),%rax
25091 pushq $0 # fake return address to stop unwinder
25092 pushq $__KERNEL_CS # set correct cs
25093@@ -313,7 +344,7 @@ ENDPROC(start_cpu0)
25094 .quad INIT_PER_CPU_VAR(irq_stack_union)
25095
25096 GLOBAL(stack_start)
25097- .quad init_thread_union+THREAD_SIZE-8
25098+ .quad init_thread_union+THREAD_SIZE-16
25099 .word 0
25100 __FINITDATA
25101
25102@@ -391,7 +422,7 @@ ENTRY(early_idt_handler)
25103 call dump_stack
25104 #ifdef CONFIG_KALLSYMS
25105 leaq early_idt_ripmsg(%rip),%rdi
25106- movq 40(%rsp),%rsi # %rip again
25107+ movq 88(%rsp),%rsi # %rip again
25108 call __print_symbol
25109 #endif
25110 #endif /* EARLY_PRINTK */
25111@@ -420,6 +451,7 @@ ENDPROC(early_idt_handler)
25112 early_recursion_flag:
25113 .long 0
25114
25115+ .section .rodata,"a",@progbits
25116 #ifdef CONFIG_EARLY_PRINTK
25117 early_idt_msg:
25118 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
25119@@ -447,29 +479,52 @@ NEXT_PAGE(early_level4_pgt)
25120 NEXT_PAGE(early_dynamic_pgts)
25121 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
25122
25123- .data
25124+ .section .rodata,"a",@progbits
25125
25126-#ifndef CONFIG_XEN
25127 NEXT_PAGE(init_level4_pgt)
25128- .fill 512,8,0
25129-#else
25130-NEXT_PAGE(init_level4_pgt)
25131- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25132 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
25133 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25134+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
25135+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
25136+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
25137+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
25138+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
25139+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25140 .org init_level4_pgt + L4_START_KERNEL*8, 0
25141 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
25142 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
25143
25144+#ifdef CONFIG_PAX_PER_CPU_PGD
25145+NEXT_PAGE(cpu_pgd)
25146+ .rept 2*NR_CPUS
25147+ .fill 512,8,0
25148+ .endr
25149+#endif
25150+
25151 NEXT_PAGE(level3_ident_pgt)
25152 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25153+#ifdef CONFIG_XEN
25154 .fill 511, 8, 0
25155+#else
25156+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
25157+ .fill 510,8,0
25158+#endif
25159+
25160+NEXT_PAGE(level3_vmalloc_start_pgt)
25161+ .fill 512,8,0
25162+
25163+NEXT_PAGE(level3_vmalloc_end_pgt)
25164+ .fill 512,8,0
25165+
25166+NEXT_PAGE(level3_vmemmap_pgt)
25167+ .fill L3_VMEMMAP_START,8,0
25168+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25169+
25170 NEXT_PAGE(level2_ident_pgt)
25171- /* Since I easily can, map the first 1G.
25172+ /* Since I easily can, map the first 2G.
25173 * Don't set NX because code runs from these pages.
25174 */
25175- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
25176-#endif
25177+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
25178
25179 NEXT_PAGE(level3_kernel_pgt)
25180 .fill L3_START_KERNEL,8,0
25181@@ -477,6 +532,9 @@ NEXT_PAGE(level3_kernel_pgt)
25182 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
25183 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25184
25185+NEXT_PAGE(level2_vmemmap_pgt)
25186+ .fill 512,8,0
25187+
25188 NEXT_PAGE(level2_kernel_pgt)
25189 /*
25190 * 512 MB kernel mapping. We spend a full page on this pagetable
25191@@ -494,28 +552,64 @@ NEXT_PAGE(level2_kernel_pgt)
25192 NEXT_PAGE(level2_fixmap_pgt)
25193 .fill 506,8,0
25194 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25195- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
25196- .fill 5,8,0
25197+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
25198+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
25199+ .fill 4,8,0
25200
25201 NEXT_PAGE(level1_fixmap_pgt)
25202 .fill 512,8,0
25203
25204+NEXT_PAGE(level1_vsyscall_pgt)
25205+ .fill 512,8,0
25206+
25207 #undef PMDS
25208
25209- .data
25210+ .align PAGE_SIZE
25211+ENTRY(cpu_gdt_table)
25212+ .rept NR_CPUS
25213+ .quad 0x0000000000000000 /* NULL descriptor */
25214+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
25215+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
25216+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
25217+ .quad 0x00cffb000000ffff /* __USER32_CS */
25218+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
25219+ .quad 0x00affb000000ffff /* __USER_CS */
25220+
25221+#ifdef CONFIG_PAX_KERNEXEC
25222+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
25223+#else
25224+ .quad 0x0 /* unused */
25225+#endif
25226+
25227+ .quad 0,0 /* TSS */
25228+ .quad 0,0 /* LDT */
25229+ .quad 0,0,0 /* three TLS descriptors */
25230+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
25231+ /* asm/segment.h:GDT_ENTRIES must match this */
25232+
25233+#ifdef CONFIG_PAX_MEMORY_UDEREF
25234+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
25235+#else
25236+ .quad 0x0 /* unused */
25237+#endif
25238+
25239+ /* zero the remaining page */
25240+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
25241+ .endr
25242+
25243 .align 16
25244 .globl early_gdt_descr
25245 early_gdt_descr:
25246 .word GDT_ENTRIES*8-1
25247 early_gdt_descr_base:
25248- .quad INIT_PER_CPU_VAR(gdt_page)
25249+ .quad cpu_gdt_table
25250
25251 ENTRY(phys_base)
25252 /* This must match the first entry in level2_kernel_pgt */
25253 .quad 0x0000000000000000
25254
25255 #include "../../x86/xen/xen-head.S"
25256-
25257- __PAGE_ALIGNED_BSS
25258+
25259+ .section .rodata,"a",@progbits
25260 NEXT_PAGE(empty_zero_page)
25261 .skip PAGE_SIZE
25262diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
25263index 05fd74f..c3548b1 100644
25264--- a/arch/x86/kernel/i386_ksyms_32.c
25265+++ b/arch/x86/kernel/i386_ksyms_32.c
25266@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
25267 EXPORT_SYMBOL(cmpxchg8b_emu);
25268 #endif
25269
25270+EXPORT_SYMBOL_GPL(cpu_gdt_table);
25271+
25272 /* Networking helper routines. */
25273 EXPORT_SYMBOL(csum_partial_copy_generic);
25274+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
25275+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
25276
25277 EXPORT_SYMBOL(__get_user_1);
25278 EXPORT_SYMBOL(__get_user_2);
25279@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
25280 EXPORT_SYMBOL(___preempt_schedule_context);
25281 #endif
25282 #endif
25283+
25284+#ifdef CONFIG_PAX_KERNEXEC
25285+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
25286+#endif
25287+
25288+#ifdef CONFIG_PAX_PER_CPU_PGD
25289+EXPORT_SYMBOL(cpu_pgd);
25290+#endif
25291diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
25292index d5dd808..b6432cf 100644
25293--- a/arch/x86/kernel/i387.c
25294+++ b/arch/x86/kernel/i387.c
25295@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
25296 static inline bool interrupted_user_mode(void)
25297 {
25298 struct pt_regs *regs = get_irq_regs();
25299- return regs && user_mode_vm(regs);
25300+ return regs && user_mode(regs);
25301 }
25302
25303 /*
25304diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
25305index 8af8171..f8c1169 100644
25306--- a/arch/x86/kernel/i8259.c
25307+++ b/arch/x86/kernel/i8259.c
25308@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
25309 static void make_8259A_irq(unsigned int irq)
25310 {
25311 disable_irq_nosync(irq);
25312- io_apic_irqs &= ~(1<<irq);
25313+ io_apic_irqs &= ~(1UL<<irq);
25314 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
25315 i8259A_chip.name);
25316 enable_irq(irq);
25317@@ -209,7 +209,7 @@ spurious_8259A_irq:
25318 "spurious 8259A interrupt: IRQ%d.\n", irq);
25319 spurious_irq_mask |= irqmask;
25320 }
25321- atomic_inc(&irq_err_count);
25322+ atomic_inc_unchecked(&irq_err_count);
25323 /*
25324 * Theoretically we do not have to handle this IRQ,
25325 * but in Linux this does not cause problems and is
25326@@ -350,14 +350,16 @@ static void init_8259A(int auto_eoi)
25327 /* (slave's support for AEOI in flat mode is to be investigated) */
25328 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
25329
25330+ pax_open_kernel();
25331 if (auto_eoi)
25332 /*
25333 * In AEOI mode we just have to mask the interrupt
25334 * when acking.
25335 */
25336- i8259A_chip.irq_mask_ack = disable_8259A_irq;
25337+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
25338 else
25339- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25340+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25341+ pax_close_kernel();
25342
25343 udelay(100); /* wait for 8259A to initialize */
25344
25345diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
25346index a979b5b..1d6db75 100644
25347--- a/arch/x86/kernel/io_delay.c
25348+++ b/arch/x86/kernel/io_delay.c
25349@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
25350 * Quirk table for systems that misbehave (lock up, etc.) if port
25351 * 0x80 is used:
25352 */
25353-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
25354+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
25355 {
25356 .callback = dmi_io_delay_0xed_port,
25357 .ident = "Compaq Presario V6000",
25358diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
25359index 4ddaf66..49d5c18 100644
25360--- a/arch/x86/kernel/ioport.c
25361+++ b/arch/x86/kernel/ioport.c
25362@@ -6,6 +6,7 @@
25363 #include <linux/sched.h>
25364 #include <linux/kernel.h>
25365 #include <linux/capability.h>
25366+#include <linux/security.h>
25367 #include <linux/errno.h>
25368 #include <linux/types.h>
25369 #include <linux/ioport.h>
25370@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25371 return -EINVAL;
25372 if (turn_on && !capable(CAP_SYS_RAWIO))
25373 return -EPERM;
25374+#ifdef CONFIG_GRKERNSEC_IO
25375+ if (turn_on && grsec_disable_privio) {
25376+ gr_handle_ioperm();
25377+ return -ENODEV;
25378+ }
25379+#endif
25380
25381 /*
25382 * If it's the first ioperm() call in this thread's lifetime, set the
25383@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25384 * because the ->io_bitmap_max value must match the bitmap
25385 * contents:
25386 */
25387- tss = &per_cpu(init_tss, get_cpu());
25388+ tss = init_tss + get_cpu();
25389
25390 if (turn_on)
25391 bitmap_clear(t->io_bitmap_ptr, from, num);
25392@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
25393 if (level > old) {
25394 if (!capable(CAP_SYS_RAWIO))
25395 return -EPERM;
25396+#ifdef CONFIG_GRKERNSEC_IO
25397+ if (grsec_disable_privio) {
25398+ gr_handle_iopl();
25399+ return -ENODEV;
25400+ }
25401+#endif
25402 }
25403 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
25404 t->iopl = level << 12;
25405diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
25406index 922d285..6d20692 100644
25407--- a/arch/x86/kernel/irq.c
25408+++ b/arch/x86/kernel/irq.c
25409@@ -22,7 +22,7 @@
25410 #define CREATE_TRACE_POINTS
25411 #include <asm/trace/irq_vectors.h>
25412
25413-atomic_t irq_err_count;
25414+atomic_unchecked_t irq_err_count;
25415
25416 /* Function pointer for generic interrupt vector handling */
25417 void (*x86_platform_ipi_callback)(void) = NULL;
25418@@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
25419 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
25420 seq_printf(p, " Hypervisor callback interrupts\n");
25421 #endif
25422- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
25423+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
25424 #if defined(CONFIG_X86_IO_APIC)
25425- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
25426+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
25427 #endif
25428 return 0;
25429 }
25430@@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
25431
25432 u64 arch_irq_stat(void)
25433 {
25434- u64 sum = atomic_read(&irq_err_count);
25435+ u64 sum = atomic_read_unchecked(&irq_err_count);
25436 return sum;
25437 }
25438
25439diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
25440index 63ce838..2ea3e06 100644
25441--- a/arch/x86/kernel/irq_32.c
25442+++ b/arch/x86/kernel/irq_32.c
25443@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
25444
25445 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25446
25447+extern void gr_handle_kernel_exploit(void);
25448+
25449 int sysctl_panic_on_stackoverflow __read_mostly;
25450
25451 /* Debugging check for stack overflow: is there less than 1KB free? */
25452@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
25453 __asm__ __volatile__("andl %%esp,%0" :
25454 "=r" (sp) : "0" (THREAD_SIZE - 1));
25455
25456- return sp < (sizeof(struct thread_info) + STACK_WARN);
25457+ return sp < STACK_WARN;
25458 }
25459
25460 static void print_stack_overflow(void)
25461 {
25462 printk(KERN_WARNING "low stack detected by irq handler\n");
25463 dump_stack();
25464+ gr_handle_kernel_exploit();
25465 if (sysctl_panic_on_stackoverflow)
25466 panic("low stack detected by irq handler - check messages\n");
25467 }
25468@@ -84,10 +87,9 @@ static inline void *current_stack(void)
25469 static inline int
25470 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25471 {
25472- struct irq_stack *curstk, *irqstk;
25473+ struct irq_stack *irqstk;
25474 u32 *isp, *prev_esp, arg1, arg2;
25475
25476- curstk = (struct irq_stack *) current_stack();
25477 irqstk = __this_cpu_read(hardirq_stack);
25478
25479 /*
25480@@ -96,15 +98,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25481 * handler) we can't do that and just have to keep using the
25482 * current stack (which is the irq stack already after all)
25483 */
25484- if (unlikely(curstk == irqstk))
25485+ if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
25486 return 0;
25487
25488- isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
25489+ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
25490
25491 /* Save the next esp at the bottom of the stack */
25492 prev_esp = (u32 *)irqstk;
25493 *prev_esp = current_stack_pointer;
25494
25495+#ifdef CONFIG_PAX_MEMORY_UDEREF
25496+ __set_fs(MAKE_MM_SEG(0));
25497+#endif
25498+
25499 if (unlikely(overflow))
25500 call_on_stack(print_stack_overflow, isp);
25501
25502@@ -115,6 +121,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25503 : "0" (irq), "1" (desc), "2" (isp),
25504 "D" (desc->handle_irq)
25505 : "memory", "cc", "ecx");
25506+
25507+#ifdef CONFIG_PAX_MEMORY_UDEREF
25508+ __set_fs(current_thread_info()->addr_limit);
25509+#endif
25510+
25511 return 1;
25512 }
25513
25514@@ -123,32 +134,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25515 */
25516 void irq_ctx_init(int cpu)
25517 {
25518- struct irq_stack *irqstk;
25519-
25520 if (per_cpu(hardirq_stack, cpu))
25521 return;
25522
25523- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25524- THREADINFO_GFP,
25525- THREAD_SIZE_ORDER));
25526- per_cpu(hardirq_stack, cpu) = irqstk;
25527-
25528- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25529- THREADINFO_GFP,
25530- THREAD_SIZE_ORDER));
25531- per_cpu(softirq_stack, cpu) = irqstk;
25532-
25533- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
25534- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
25535+ per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25536+ per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25537 }
25538
25539 void do_softirq_own_stack(void)
25540 {
25541- struct thread_info *curstk;
25542 struct irq_stack *irqstk;
25543 u32 *isp, *prev_esp;
25544
25545- curstk = current_stack();
25546 irqstk = __this_cpu_read(softirq_stack);
25547
25548 /* build the stack frame on the softirq stack */
25549@@ -158,7 +155,16 @@ void do_softirq_own_stack(void)
25550 prev_esp = (u32 *)irqstk;
25551 *prev_esp = current_stack_pointer;
25552
25553+#ifdef CONFIG_PAX_MEMORY_UDEREF
25554+ __set_fs(MAKE_MM_SEG(0));
25555+#endif
25556+
25557 call_on_stack(__do_softirq, isp);
25558+
25559+#ifdef CONFIG_PAX_MEMORY_UDEREF
25560+ __set_fs(current_thread_info()->addr_limit);
25561+#endif
25562+
25563 }
25564
25565 bool handle_irq(unsigned irq, struct pt_regs *regs)
25566@@ -172,7 +178,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
25567 if (unlikely(!desc))
25568 return false;
25569
25570- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25571+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25572 if (unlikely(overflow))
25573 print_stack_overflow();
25574 desc->handle_irq(irq, desc);
25575diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
25576index 4d1c746..55a22d6 100644
25577--- a/arch/x86/kernel/irq_64.c
25578+++ b/arch/x86/kernel/irq_64.c
25579@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
25580 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
25581 EXPORT_PER_CPU_SYMBOL(irq_regs);
25582
25583+extern void gr_handle_kernel_exploit(void);
25584+
25585 int sysctl_panic_on_stackoverflow;
25586
25587 /*
25588@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25589 u64 estack_top, estack_bottom;
25590 u64 curbase = (u64)task_stack_page(current);
25591
25592- if (user_mode_vm(regs))
25593+ if (user_mode(regs))
25594 return;
25595
25596 if (regs->sp >= curbase + sizeof(struct thread_info) +
25597@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25598 irq_stack_top, irq_stack_bottom,
25599 estack_top, estack_bottom);
25600
25601+ gr_handle_kernel_exploit();
25602+
25603 if (sysctl_panic_on_stackoverflow)
25604 panic("low stack detected by irq handler - check messages\n");
25605 #endif
25606diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
25607index 26d5a55..a01160a 100644
25608--- a/arch/x86/kernel/jump_label.c
25609+++ b/arch/x86/kernel/jump_label.c
25610@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25611 * Jump label is enabled for the first time.
25612 * So we expect a default_nop...
25613 */
25614- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
25615+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
25616 != 0))
25617 bug_at((void *)entry->code, __LINE__);
25618 } else {
25619@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25620 * ...otherwise expect an ideal_nop. Otherwise
25621 * something went horribly wrong.
25622 */
25623- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
25624+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
25625 != 0))
25626 bug_at((void *)entry->code, __LINE__);
25627 }
25628@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
25629 * are converting the default nop to the ideal nop.
25630 */
25631 if (init) {
25632- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
25633+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
25634 bug_at((void *)entry->code, __LINE__);
25635 } else {
25636 code.jump = 0xe9;
25637 code.offset = entry->target -
25638 (entry->code + JUMP_LABEL_NOP_SIZE);
25639- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
25640+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
25641 bug_at((void *)entry->code, __LINE__);
25642 }
25643 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
25644diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
25645index 7ec1d5f..5a7d130 100644
25646--- a/arch/x86/kernel/kgdb.c
25647+++ b/arch/x86/kernel/kgdb.c
25648@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
25649 #ifdef CONFIG_X86_32
25650 switch (regno) {
25651 case GDB_SS:
25652- if (!user_mode_vm(regs))
25653+ if (!user_mode(regs))
25654 *(unsigned long *)mem = __KERNEL_DS;
25655 break;
25656 case GDB_SP:
25657- if (!user_mode_vm(regs))
25658+ if (!user_mode(regs))
25659 *(unsigned long *)mem = kernel_stack_pointer(regs);
25660 break;
25661 case GDB_GS:
25662@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
25663 bp->attr.bp_addr = breakinfo[breakno].addr;
25664 bp->attr.bp_len = breakinfo[breakno].len;
25665 bp->attr.bp_type = breakinfo[breakno].type;
25666- info->address = breakinfo[breakno].addr;
25667+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
25668+ info->address = ktla_ktva(breakinfo[breakno].addr);
25669+ else
25670+ info->address = breakinfo[breakno].addr;
25671 info->len = breakinfo[breakno].len;
25672 info->type = breakinfo[breakno].type;
25673 val = arch_install_hw_breakpoint(bp);
25674@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
25675 case 'k':
25676 /* clear the trace bit */
25677 linux_regs->flags &= ~X86_EFLAGS_TF;
25678- atomic_set(&kgdb_cpu_doing_single_step, -1);
25679+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
25680
25681 /* set the trace bit if we're stepping */
25682 if (remcomInBuffer[0] == 's') {
25683 linux_regs->flags |= X86_EFLAGS_TF;
25684- atomic_set(&kgdb_cpu_doing_single_step,
25685+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
25686 raw_smp_processor_id());
25687 }
25688
25689@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
25690
25691 switch (cmd) {
25692 case DIE_DEBUG:
25693- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
25694+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
25695 if (user_mode(regs))
25696 return single_step_cont(regs, args);
25697 break;
25698@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25699 #endif /* CONFIG_DEBUG_RODATA */
25700
25701 bpt->type = BP_BREAKPOINT;
25702- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
25703+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
25704 BREAK_INSTR_SIZE);
25705 if (err)
25706 return err;
25707- err = probe_kernel_write((char *)bpt->bpt_addr,
25708+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25709 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
25710 #ifdef CONFIG_DEBUG_RODATA
25711 if (!err)
25712@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25713 return -EBUSY;
25714 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
25715 BREAK_INSTR_SIZE);
25716- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25717+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25718 if (err)
25719 return err;
25720 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
25721@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
25722 if (mutex_is_locked(&text_mutex))
25723 goto knl_write;
25724 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
25725- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25726+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25727 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
25728 goto knl_write;
25729 return err;
25730 knl_write:
25731 #endif /* CONFIG_DEBUG_RODATA */
25732- return probe_kernel_write((char *)bpt->bpt_addr,
25733+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25734 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
25735 }
25736
25737diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
25738index 67e6d19..731ed28 100644
25739--- a/arch/x86/kernel/kprobes/core.c
25740+++ b/arch/x86/kernel/kprobes/core.c
25741@@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
25742 s32 raddr;
25743 } __packed *insn;
25744
25745- insn = (struct __arch_relative_insn *)from;
25746+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
25747+
25748+ pax_open_kernel();
25749 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
25750 insn->op = op;
25751+ pax_close_kernel();
25752 }
25753
25754 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
25755@@ -168,7 +171,7 @@ int can_boost(kprobe_opcode_t *opcodes)
25756 kprobe_opcode_t opcode;
25757 kprobe_opcode_t *orig_opcodes = opcodes;
25758
25759- if (search_exception_tables((unsigned long)opcodes))
25760+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
25761 return 0; /* Page fault may occur on this address. */
25762
25763 retry:
25764@@ -242,9 +245,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
25765 * for the first byte, we can recover the original instruction
25766 * from it and kp->opcode.
25767 */
25768- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25769+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25770 buf[0] = kp->opcode;
25771- return (unsigned long)buf;
25772+ return ktva_ktla((unsigned long)buf);
25773 }
25774
25775 /*
25776@@ -336,7 +339,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25777 /* Another subsystem puts a breakpoint, failed to recover */
25778 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
25779 return 0;
25780+ pax_open_kernel();
25781 memcpy(dest, insn.kaddr, insn.length);
25782+ pax_close_kernel();
25783
25784 #ifdef CONFIG_X86_64
25785 if (insn_rip_relative(&insn)) {
25786@@ -363,7 +368,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25787 return 0;
25788 }
25789 disp = (u8 *) dest + insn_offset_displacement(&insn);
25790+ pax_open_kernel();
25791 *(s32 *) disp = (s32) newdisp;
25792+ pax_close_kernel();
25793 }
25794 #endif
25795 return insn.length;
25796@@ -505,7 +512,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25797 * nor set current_kprobe, because it doesn't use single
25798 * stepping.
25799 */
25800- regs->ip = (unsigned long)p->ainsn.insn;
25801+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25802 preempt_enable_no_resched();
25803 return;
25804 }
25805@@ -522,9 +529,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25806 regs->flags &= ~X86_EFLAGS_IF;
25807 /* single step inline if the instruction is an int3 */
25808 if (p->opcode == BREAKPOINT_INSTRUCTION)
25809- regs->ip = (unsigned long)p->addr;
25810+ regs->ip = ktla_ktva((unsigned long)p->addr);
25811 else
25812- regs->ip = (unsigned long)p->ainsn.insn;
25813+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25814 }
25815 NOKPROBE_SYMBOL(setup_singlestep);
25816
25817@@ -574,7 +581,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25818 struct kprobe *p;
25819 struct kprobe_ctlblk *kcb;
25820
25821- if (user_mode_vm(regs))
25822+ if (user_mode(regs))
25823 return 0;
25824
25825 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
25826@@ -609,7 +616,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25827 setup_singlestep(p, regs, kcb, 0);
25828 return 1;
25829 }
25830- } else if (*addr != BREAKPOINT_INSTRUCTION) {
25831+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
25832 /*
25833 * The breakpoint instruction was removed right
25834 * after we hit it. Another cpu has removed
25835@@ -656,6 +663,9 @@ static void __used kretprobe_trampoline_holder(void)
25836 " movq %rax, 152(%rsp)\n"
25837 RESTORE_REGS_STRING
25838 " popfq\n"
25839+#ifdef KERNEXEC_PLUGIN
25840+ " btsq $63,(%rsp)\n"
25841+#endif
25842 #else
25843 " pushf\n"
25844 SAVE_REGS_STRING
25845@@ -796,7 +806,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
25846 struct kprobe_ctlblk *kcb)
25847 {
25848 unsigned long *tos = stack_addr(regs);
25849- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
25850+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
25851 unsigned long orig_ip = (unsigned long)p->addr;
25852 kprobe_opcode_t *insn = p->ainsn.insn;
25853
25854@@ -979,7 +989,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
25855 struct die_args *args = data;
25856 int ret = NOTIFY_DONE;
25857
25858- if (args->regs && user_mode_vm(args->regs))
25859+ if (args->regs && user_mode(args->regs))
25860 return ret;
25861
25862 if (val == DIE_GPF) {
25863diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
25864index f304773..551e63c 100644
25865--- a/arch/x86/kernel/kprobes/opt.c
25866+++ b/arch/x86/kernel/kprobes/opt.c
25867@@ -79,6 +79,7 @@ found:
25868 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
25869 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25870 {
25871+ pax_open_kernel();
25872 #ifdef CONFIG_X86_64
25873 *addr++ = 0x48;
25874 *addr++ = 0xbf;
25875@@ -86,6 +87,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25876 *addr++ = 0xb8;
25877 #endif
25878 *(unsigned long *)addr = val;
25879+ pax_close_kernel();
25880 }
25881
25882 asm (
25883@@ -337,7 +339,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25884 * Verify if the address gap is in 2GB range, because this uses
25885 * a relative jump.
25886 */
25887- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
25888+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
25889 if (abs(rel) > 0x7fffffff)
25890 return -ERANGE;
25891
25892@@ -352,16 +354,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25893 op->optinsn.size = ret;
25894
25895 /* Copy arch-dep-instance from template */
25896- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
25897+ pax_open_kernel();
25898+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
25899+ pax_close_kernel();
25900
25901 /* Set probe information */
25902 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
25903
25904 /* Set probe function call */
25905- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
25906+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
25907
25908 /* Set returning jmp instruction at the tail of out-of-line buffer */
25909- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
25910+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
25911 (u8 *)op->kp.addr + op->optinsn.size);
25912
25913 flush_icache_range((unsigned long) buf,
25914@@ -386,7 +390,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
25915 WARN_ON(kprobe_disabled(&op->kp));
25916
25917 /* Backup instructions which will be replaced by jump address */
25918- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
25919+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
25920 RELATIVE_ADDR_SIZE);
25921
25922 insn_buf[0] = RELATIVEJUMP_OPCODE;
25923@@ -434,7 +438,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
25924 /* This kprobe is really able to run optimized path. */
25925 op = container_of(p, struct optimized_kprobe, kp);
25926 /* Detour through copied instructions */
25927- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
25928+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
25929 if (!reenter)
25930 reset_current_kprobe();
25931 preempt_enable_no_resched();
25932diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
25933index c2bedae..25e7ab60 100644
25934--- a/arch/x86/kernel/ksysfs.c
25935+++ b/arch/x86/kernel/ksysfs.c
25936@@ -184,7 +184,7 @@ out:
25937
25938 static struct kobj_attribute type_attr = __ATTR_RO(type);
25939
25940-static struct bin_attribute data_attr = {
25941+static bin_attribute_no_const data_attr __read_only = {
25942 .attr = {
25943 .name = "data",
25944 .mode = S_IRUGO,
25945diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
25946index c37886d..d851d32 100644
25947--- a/arch/x86/kernel/ldt.c
25948+++ b/arch/x86/kernel/ldt.c
25949@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
25950 if (reload) {
25951 #ifdef CONFIG_SMP
25952 preempt_disable();
25953- load_LDT(pc);
25954+ load_LDT_nolock(pc);
25955 if (!cpumask_equal(mm_cpumask(current->mm),
25956 cpumask_of(smp_processor_id())))
25957 smp_call_function(flush_ldt, current->mm, 1);
25958 preempt_enable();
25959 #else
25960- load_LDT(pc);
25961+ load_LDT_nolock(pc);
25962 #endif
25963 }
25964 if (oldsize) {
25965@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
25966 return err;
25967
25968 for (i = 0; i < old->size; i++)
25969- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
25970+ write_ldt_entry(new->ldt, i, old->ldt + i);
25971 return 0;
25972 }
25973
25974@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
25975 retval = copy_ldt(&mm->context, &old_mm->context);
25976 mutex_unlock(&old_mm->context.lock);
25977 }
25978+
25979+ if (tsk == current) {
25980+ mm->context.vdso = 0;
25981+
25982+#ifdef CONFIG_X86_32
25983+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25984+ mm->context.user_cs_base = 0UL;
25985+ mm->context.user_cs_limit = ~0UL;
25986+
25987+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
25988+ cpus_clear(mm->context.cpu_user_cs_mask);
25989+#endif
25990+
25991+#endif
25992+#endif
25993+
25994+ }
25995+
25996 return retval;
25997 }
25998
25999@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
26000 }
26001 }
26002
26003+#ifdef CONFIG_PAX_SEGMEXEC
26004+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
26005+ error = -EINVAL;
26006+ goto out_unlock;
26007+ }
26008+#endif
26009+
26010 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
26011 error = -EINVAL;
26012 goto out_unlock;
26013diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
26014index 1667b1d..16492c5 100644
26015--- a/arch/x86/kernel/machine_kexec_32.c
26016+++ b/arch/x86/kernel/machine_kexec_32.c
26017@@ -25,7 +25,7 @@
26018 #include <asm/cacheflush.h>
26019 #include <asm/debugreg.h>
26020
26021-static void set_idt(void *newidt, __u16 limit)
26022+static void set_idt(struct desc_struct *newidt, __u16 limit)
26023 {
26024 struct desc_ptr curidt;
26025
26026@@ -37,7 +37,7 @@ static void set_idt(void *newidt, __u16 limit)
26027 }
26028
26029
26030-static void set_gdt(void *newgdt, __u16 limit)
26031+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
26032 {
26033 struct desc_ptr curgdt;
26034
26035@@ -215,7 +215,7 @@ void machine_kexec(struct kimage *image)
26036 }
26037
26038 control_page = page_address(image->control_code_page);
26039- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
26040+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
26041
26042 relocate_kernel_ptr = control_page;
26043 page_list[PA_CONTROL_PAGE] = __pa(control_page);
26044diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
26045index c050a01..5774072 100644
26046--- a/arch/x86/kernel/mcount_64.S
26047+++ b/arch/x86/kernel/mcount_64.S
26048@@ -7,7 +7,7 @@
26049 #include <linux/linkage.h>
26050 #include <asm/ptrace.h>
26051 #include <asm/ftrace.h>
26052-
26053+#include <asm/alternative-asm.h>
26054
26055 .code64
26056 .section .entry.text, "ax"
26057@@ -24,8 +24,9 @@
26058 #ifdef CONFIG_DYNAMIC_FTRACE
26059
26060 ENTRY(function_hook)
26061+ pax_force_retaddr
26062 retq
26063-END(function_hook)
26064+ENDPROC(function_hook)
26065
26066 /* skip is set if stack has been adjusted */
26067 .macro ftrace_caller_setup skip=0
26068@@ -66,8 +67,9 @@ GLOBAL(ftrace_graph_call)
26069 #endif
26070
26071 GLOBAL(ftrace_stub)
26072+ pax_force_retaddr
26073 retq
26074-END(ftrace_caller)
26075+ENDPROC(ftrace_caller)
26076
26077 ENTRY(ftrace_regs_caller)
26078 /* Save the current flags before compare (in SS location)*/
26079@@ -135,7 +137,7 @@ ftrace_restore_flags:
26080 popfq
26081 jmp ftrace_stub
26082
26083-END(ftrace_regs_caller)
26084+ENDPROC(ftrace_regs_caller)
26085
26086
26087 #else /* ! CONFIG_DYNAMIC_FTRACE */
26088@@ -156,6 +158,7 @@ ENTRY(function_hook)
26089 #endif
26090
26091 GLOBAL(ftrace_stub)
26092+ pax_force_retaddr
26093 retq
26094
26095 trace:
26096@@ -169,12 +172,13 @@ trace:
26097 #endif
26098 subq $MCOUNT_INSN_SIZE, %rdi
26099
26100+ pax_force_fptr ftrace_trace_function
26101 call *ftrace_trace_function
26102
26103 MCOUNT_RESTORE_FRAME
26104
26105 jmp ftrace_stub
26106-END(function_hook)
26107+ENDPROC(function_hook)
26108 #endif /* CONFIG_DYNAMIC_FTRACE */
26109 #endif /* CONFIG_FUNCTION_TRACER */
26110
26111@@ -196,8 +200,9 @@ ENTRY(ftrace_graph_caller)
26112
26113 MCOUNT_RESTORE_FRAME
26114
26115+ pax_force_retaddr
26116 retq
26117-END(ftrace_graph_caller)
26118+ENDPROC(ftrace_graph_caller)
26119
26120 GLOBAL(return_to_handler)
26121 subq $24, %rsp
26122@@ -213,5 +218,7 @@ GLOBAL(return_to_handler)
26123 movq 8(%rsp), %rdx
26124 movq (%rsp), %rax
26125 addq $24, %rsp
26126+ pax_force_fptr %rdi
26127 jmp *%rdi
26128+ENDPROC(return_to_handler)
26129 #endif
26130diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
26131index e69f988..da078ea 100644
26132--- a/arch/x86/kernel/module.c
26133+++ b/arch/x86/kernel/module.c
26134@@ -81,17 +81,62 @@ static unsigned long int get_module_load_offset(void)
26135 }
26136 #endif
26137
26138-void *module_alloc(unsigned long size)
26139+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
26140 {
26141- if (PAGE_ALIGN(size) > MODULES_LEN)
26142+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
26143 return NULL;
26144 return __vmalloc_node_range(size, 1,
26145 MODULES_VADDR + get_module_load_offset(),
26146- MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
26147- PAGE_KERNEL_EXEC, NUMA_NO_NODE,
26148+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
26149+ prot, NUMA_NO_NODE,
26150 __builtin_return_address(0));
26151 }
26152
26153+void *module_alloc(unsigned long size)
26154+{
26155+
26156+#ifdef CONFIG_PAX_KERNEXEC
26157+ return __module_alloc(size, PAGE_KERNEL);
26158+#else
26159+ return __module_alloc(size, PAGE_KERNEL_EXEC);
26160+#endif
26161+
26162+}
26163+
26164+#ifdef CONFIG_PAX_KERNEXEC
26165+#ifdef CONFIG_X86_32
26166+void *module_alloc_exec(unsigned long size)
26167+{
26168+ struct vm_struct *area;
26169+
26170+ if (size == 0)
26171+ return NULL;
26172+
26173+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
26174+return area ? area->addr : NULL;
26175+}
26176+EXPORT_SYMBOL(module_alloc_exec);
26177+
26178+void module_free_exec(struct module *mod, void *module_region)
26179+{
26180+ vunmap(module_region);
26181+}
26182+EXPORT_SYMBOL(module_free_exec);
26183+#else
26184+void module_free_exec(struct module *mod, void *module_region)
26185+{
26186+ module_free(mod, module_region);
26187+}
26188+EXPORT_SYMBOL(module_free_exec);
26189+
26190+void *module_alloc_exec(unsigned long size)
26191+{
26192+ return __module_alloc(size, PAGE_KERNEL_RX);
26193+}
26194+EXPORT_SYMBOL(module_alloc_exec);
26195+#endif
26196+#endif
26197+
26198 #ifdef CONFIG_X86_32
26199 int apply_relocate(Elf32_Shdr *sechdrs,
26200 const char *strtab,
26201@@ -102,14 +147,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26202 unsigned int i;
26203 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
26204 Elf32_Sym *sym;
26205- uint32_t *location;
26206+ uint32_t *plocation, location;
26207
26208 DEBUGP("Applying relocate section %u to %u\n",
26209 relsec, sechdrs[relsec].sh_info);
26210 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
26211 /* This is where to make the change */
26212- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
26213- + rel[i].r_offset;
26214+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
26215+ location = (uint32_t)plocation;
26216+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
26217+ plocation = ktla_ktva((void *)plocation);
26218 /* This is the symbol it is referring to. Note that all
26219 undefined symbols have been resolved. */
26220 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
26221@@ -118,11 +165,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26222 switch (ELF32_R_TYPE(rel[i].r_info)) {
26223 case R_386_32:
26224 /* We add the value into the location given */
26225- *location += sym->st_value;
26226+ pax_open_kernel();
26227+ *plocation += sym->st_value;
26228+ pax_close_kernel();
26229 break;
26230 case R_386_PC32:
26231 /* Add the value, subtract its position */
26232- *location += sym->st_value - (uint32_t)location;
26233+ pax_open_kernel();
26234+ *plocation += sym->st_value - location;
26235+ pax_close_kernel();
26236 break;
26237 default:
26238 pr_err("%s: Unknown relocation: %u\n",
26239@@ -167,21 +218,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
26240 case R_X86_64_NONE:
26241 break;
26242 case R_X86_64_64:
26243+ pax_open_kernel();
26244 *(u64 *)loc = val;
26245+ pax_close_kernel();
26246 break;
26247 case R_X86_64_32:
26248+ pax_open_kernel();
26249 *(u32 *)loc = val;
26250+ pax_close_kernel();
26251 if (val != *(u32 *)loc)
26252 goto overflow;
26253 break;
26254 case R_X86_64_32S:
26255+ pax_open_kernel();
26256 *(s32 *)loc = val;
26257+ pax_close_kernel();
26258 if ((s64)val != *(s32 *)loc)
26259 goto overflow;
26260 break;
26261 case R_X86_64_PC32:
26262 val -= (u64)loc;
26263+ pax_open_kernel();
26264 *(u32 *)loc = val;
26265+ pax_close_kernel();
26266+
26267 #if 0
26268 if ((s64)val != *(s32 *)loc)
26269 goto overflow;
26270diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
26271index c9603ac..9f88728 100644
26272--- a/arch/x86/kernel/msr.c
26273+++ b/arch/x86/kernel/msr.c
26274@@ -37,6 +37,7 @@
26275 #include <linux/notifier.h>
26276 #include <linux/uaccess.h>
26277 #include <linux/gfp.h>
26278+#include <linux/grsecurity.h>
26279
26280 #include <asm/processor.h>
26281 #include <asm/msr.h>
26282@@ -103,6 +104,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
26283 int err = 0;
26284 ssize_t bytes = 0;
26285
26286+#ifdef CONFIG_GRKERNSEC_KMEM
26287+ gr_handle_msr_write();
26288+ return -EPERM;
26289+#endif
26290+
26291 if (count % 8)
26292 return -EINVAL; /* Invalid chunk size */
26293
26294@@ -150,6 +156,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
26295 err = -EBADF;
26296 break;
26297 }
26298+#ifdef CONFIG_GRKERNSEC_KMEM
26299+ gr_handle_msr_write();
26300+ return -EPERM;
26301+#endif
26302 if (copy_from_user(&regs, uregs, sizeof regs)) {
26303 err = -EFAULT;
26304 break;
26305@@ -233,7 +243,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
26306 return notifier_from_errno(err);
26307 }
26308
26309-static struct notifier_block __refdata msr_class_cpu_notifier = {
26310+static struct notifier_block msr_class_cpu_notifier = {
26311 .notifier_call = msr_class_cpu_callback,
26312 };
26313
26314diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
26315index c3e985d..110a36a 100644
26316--- a/arch/x86/kernel/nmi.c
26317+++ b/arch/x86/kernel/nmi.c
26318@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
26319
26320 static void nmi_max_handler(struct irq_work *w)
26321 {
26322- struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
26323+ struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
26324 int remainder_ns, decimal_msecs;
26325- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
26326+ u64 whole_msecs = ACCESS_ONCE(n->max_duration);
26327
26328 remainder_ns = do_div(whole_msecs, (1000 * 1000));
26329 decimal_msecs = remainder_ns / 1000;
26330
26331 printk_ratelimited(KERN_INFO
26332 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
26333- a->handler, whole_msecs, decimal_msecs);
26334+ n->action->handler, whole_msecs, decimal_msecs);
26335 }
26336
26337 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26338@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26339 delta = sched_clock() - delta;
26340 trace_nmi_handler(a->handler, (int)delta, thishandled);
26341
26342- if (delta < nmi_longest_ns || delta < a->max_duration)
26343+ if (delta < nmi_longest_ns || delta < a->work->max_duration)
26344 continue;
26345
26346- a->max_duration = delta;
26347- irq_work_queue(&a->irq_work);
26348+ a->work->max_duration = delta;
26349+ irq_work_queue(&a->work->irq_work);
26350 }
26351
26352 rcu_read_unlock();
26353@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26354 }
26355 NOKPROBE_SYMBOL(nmi_handle);
26356
26357-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26358+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
26359 {
26360 struct nmi_desc *desc = nmi_to_desc(type);
26361 unsigned long flags;
26362@@ -156,7 +156,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26363 if (!action->handler)
26364 return -EINVAL;
26365
26366- init_irq_work(&action->irq_work, nmi_max_handler);
26367+ action->work->action = action;
26368+ init_irq_work(&action->work->irq_work, nmi_max_handler);
26369
26370 spin_lock_irqsave(&desc->lock, flags);
26371
26372@@ -174,9 +175,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26373 * event confuses some handlers (kdump uses this flag)
26374 */
26375 if (action->flags & NMI_FLAG_FIRST)
26376- list_add_rcu(&action->list, &desc->head);
26377+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
26378 else
26379- list_add_tail_rcu(&action->list, &desc->head);
26380+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
26381
26382 spin_unlock_irqrestore(&desc->lock, flags);
26383 return 0;
26384@@ -199,7 +200,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
26385 if (!strcmp(n->name, name)) {
26386 WARN(in_nmi(),
26387 "Trying to free NMI (%s) from NMI context!\n", n->name);
26388- list_del_rcu(&n->list);
26389+ pax_list_del_rcu((struct list_head *)&n->list);
26390 break;
26391 }
26392 }
26393@@ -528,6 +529,17 @@ static inline void nmi_nesting_postprocess(void)
26394 dotraplinkage notrace void
26395 do_nmi(struct pt_regs *regs, long error_code)
26396 {
26397+
26398+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26399+ if (!user_mode(regs)) {
26400+ unsigned long cs = regs->cs & 0xFFFF;
26401+ unsigned long ip = ktva_ktla(regs->ip);
26402+
26403+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
26404+ regs->ip = ip;
26405+ }
26406+#endif
26407+
26408 nmi_nesting_preprocess(regs);
26409
26410 nmi_enter();
26411diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
26412index 6d9582e..f746287 100644
26413--- a/arch/x86/kernel/nmi_selftest.c
26414+++ b/arch/x86/kernel/nmi_selftest.c
26415@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
26416 {
26417 /* trap all the unknown NMIs we may generate */
26418 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
26419- __initdata);
26420+ __initconst);
26421 }
26422
26423 static void __init cleanup_nmi_testsuite(void)
26424@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
26425 unsigned long timeout;
26426
26427 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
26428- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
26429+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
26430 nmi_fail = FAILURE;
26431 return;
26432 }
26433diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
26434index bbb6c73..24a58ef 100644
26435--- a/arch/x86/kernel/paravirt-spinlocks.c
26436+++ b/arch/x86/kernel/paravirt-spinlocks.c
26437@@ -8,7 +8,7 @@
26438
26439 #include <asm/paravirt.h>
26440
26441-struct pv_lock_ops pv_lock_ops = {
26442+struct pv_lock_ops pv_lock_ops __read_only = {
26443 #ifdef CONFIG_SMP
26444 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
26445 .unlock_kick = paravirt_nop,
26446diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
26447index 548d25f..f8fb99c 100644
26448--- a/arch/x86/kernel/paravirt.c
26449+++ b/arch/x86/kernel/paravirt.c
26450@@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
26451 {
26452 return x;
26453 }
26454+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26455+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
26456+#endif
26457
26458 void __init default_banner(void)
26459 {
26460@@ -142,16 +145,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
26461
26462 if (opfunc == NULL)
26463 /* If there's no function, patch it with a ud2a (BUG) */
26464- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
26465- else if (opfunc == _paravirt_nop)
26466+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
26467+ else if (opfunc == (void *)_paravirt_nop)
26468 /* If the operation is a nop, then nop the callsite */
26469 ret = paravirt_patch_nop();
26470
26471 /* identity functions just return their single argument */
26472- else if (opfunc == _paravirt_ident_32)
26473+ else if (opfunc == (void *)_paravirt_ident_32)
26474 ret = paravirt_patch_ident_32(insnbuf, len);
26475- else if (opfunc == _paravirt_ident_64)
26476+ else if (opfunc == (void *)_paravirt_ident_64)
26477 ret = paravirt_patch_ident_64(insnbuf, len);
26478+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26479+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
26480+ ret = paravirt_patch_ident_64(insnbuf, len);
26481+#endif
26482
26483 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
26484 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
26485@@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
26486 if (insn_len > len || start == NULL)
26487 insn_len = len;
26488 else
26489- memcpy(insnbuf, start, insn_len);
26490+ memcpy(insnbuf, ktla_ktva(start), insn_len);
26491
26492 return insn_len;
26493 }
26494@@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
26495 return this_cpu_read(paravirt_lazy_mode);
26496 }
26497
26498-struct pv_info pv_info = {
26499+struct pv_info pv_info __read_only = {
26500 .name = "bare hardware",
26501 .paravirt_enabled = 0,
26502 .kernel_rpl = 0,
26503@@ -311,16 +318,16 @@ struct pv_info pv_info = {
26504 #endif
26505 };
26506
26507-struct pv_init_ops pv_init_ops = {
26508+struct pv_init_ops pv_init_ops __read_only = {
26509 .patch = native_patch,
26510 };
26511
26512-struct pv_time_ops pv_time_ops = {
26513+struct pv_time_ops pv_time_ops __read_only = {
26514 .sched_clock = native_sched_clock,
26515 .steal_clock = native_steal_clock,
26516 };
26517
26518-__visible struct pv_irq_ops pv_irq_ops = {
26519+__visible struct pv_irq_ops pv_irq_ops __read_only = {
26520 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
26521 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
26522 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
26523@@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
26524 #endif
26525 };
26526
26527-__visible struct pv_cpu_ops pv_cpu_ops = {
26528+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
26529 .cpuid = native_cpuid,
26530 .get_debugreg = native_get_debugreg,
26531 .set_debugreg = native_set_debugreg,
26532@@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
26533 NOKPROBE_SYMBOL(native_set_debugreg);
26534 NOKPROBE_SYMBOL(native_load_idt);
26535
26536-struct pv_apic_ops pv_apic_ops = {
26537+struct pv_apic_ops pv_apic_ops __read_only= {
26538 #ifdef CONFIG_X86_LOCAL_APIC
26539 .startup_ipi_hook = paravirt_nop,
26540 #endif
26541 };
26542
26543-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
26544+#ifdef CONFIG_X86_32
26545+#ifdef CONFIG_X86_PAE
26546+/* 64-bit pagetable entries */
26547+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
26548+#else
26549 /* 32-bit pagetable entries */
26550 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
26551+#endif
26552 #else
26553 /* 64-bit pagetable entries */
26554 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
26555 #endif
26556
26557-struct pv_mmu_ops pv_mmu_ops = {
26558+struct pv_mmu_ops pv_mmu_ops __read_only = {
26559
26560 .read_cr2 = native_read_cr2,
26561 .write_cr2 = native_write_cr2,
26562@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
26563 .make_pud = PTE_IDENT,
26564
26565 .set_pgd = native_set_pgd,
26566+ .set_pgd_batched = native_set_pgd_batched,
26567 #endif
26568 #endif /* PAGETABLE_LEVELS >= 3 */
26569
26570@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
26571 },
26572
26573 .set_fixmap = native_set_fixmap,
26574+
26575+#ifdef CONFIG_PAX_KERNEXEC
26576+ .pax_open_kernel = native_pax_open_kernel,
26577+ .pax_close_kernel = native_pax_close_kernel,
26578+#endif
26579+
26580 };
26581
26582 EXPORT_SYMBOL_GPL(pv_time_ops);
26583diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
26584index 0497f71..7186c0d 100644
26585--- a/arch/x86/kernel/pci-calgary_64.c
26586+++ b/arch/x86/kernel/pci-calgary_64.c
26587@@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
26588 tce_space = be64_to_cpu(readq(target));
26589 tce_space = tce_space & TAR_SW_BITS;
26590
26591- tce_space = tce_space & (~specified_table_size);
26592+ tce_space = tce_space & (~(unsigned long)specified_table_size);
26593 info->tce_space = (u64 *)__va(tce_space);
26594 }
26595 }
26596diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
26597index 35ccf75..7a15747 100644
26598--- a/arch/x86/kernel/pci-iommu_table.c
26599+++ b/arch/x86/kernel/pci-iommu_table.c
26600@@ -2,7 +2,7 @@
26601 #include <asm/iommu_table.h>
26602 #include <linux/string.h>
26603 #include <linux/kallsyms.h>
26604-
26605+#include <linux/sched.h>
26606
26607 #define DEBUG 1
26608
26609diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
26610index 77dd0ad..9ec4723 100644
26611--- a/arch/x86/kernel/pci-swiotlb.c
26612+++ b/arch/x86/kernel/pci-swiotlb.c
26613@@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
26614 struct dma_attrs *attrs)
26615 {
26616 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
26617- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
26618+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
26619 else
26620 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
26621 }
26622diff --git a/arch/x86/kernel/preempt.S b/arch/x86/kernel/preempt.S
26623index ca7f0d5..8996469 100644
26624--- a/arch/x86/kernel/preempt.S
26625+++ b/arch/x86/kernel/preempt.S
26626@@ -3,12 +3,14 @@
26627 #include <asm/dwarf2.h>
26628 #include <asm/asm.h>
26629 #include <asm/calling.h>
26630+#include <asm/alternative-asm.h>
26631
26632 ENTRY(___preempt_schedule)
26633 CFI_STARTPROC
26634 SAVE_ALL
26635 call preempt_schedule
26636 RESTORE_ALL
26637+ pax_force_retaddr
26638 ret
26639 CFI_ENDPROC
26640
26641@@ -19,6 +21,7 @@ ENTRY(___preempt_schedule_context)
26642 SAVE_ALL
26643 call preempt_schedule_context
26644 RESTORE_ALL
26645+ pax_force_retaddr
26646 ret
26647 CFI_ENDPROC
26648
26649diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
26650index 4505e2a..ae28b0d 100644
26651--- a/arch/x86/kernel/process.c
26652+++ b/arch/x86/kernel/process.c
26653@@ -36,7 +36,8 @@
26654 * section. Since TSS's are completely CPU-local, we want them
26655 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
26656 */
26657-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
26658+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
26659+EXPORT_SYMBOL(init_tss);
26660
26661 #ifdef CONFIG_X86_64
26662 static DEFINE_PER_CPU(unsigned char, is_idle);
26663@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
26664 task_xstate_cachep =
26665 kmem_cache_create("task_xstate", xstate_size,
26666 __alignof__(union thread_xstate),
26667- SLAB_PANIC | SLAB_NOTRACK, NULL);
26668+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
26669 }
26670
26671 /*
26672@@ -105,7 +106,7 @@ void exit_thread(void)
26673 unsigned long *bp = t->io_bitmap_ptr;
26674
26675 if (bp) {
26676- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
26677+ struct tss_struct *tss = init_tss + get_cpu();
26678
26679 t->io_bitmap_ptr = NULL;
26680 clear_thread_flag(TIF_IO_BITMAP);
26681@@ -125,6 +126,9 @@ void flush_thread(void)
26682 {
26683 struct task_struct *tsk = current;
26684
26685+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
26686+ loadsegment(gs, 0);
26687+#endif
26688 flush_ptrace_hw_breakpoint(tsk);
26689 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
26690 drop_init_fpu(tsk);
26691@@ -271,7 +275,7 @@ static void __exit_idle(void)
26692 void exit_idle(void)
26693 {
26694 /* idle loop has pid 0 */
26695- if (current->pid)
26696+ if (task_pid_nr(current))
26697 return;
26698 __exit_idle();
26699 }
26700@@ -324,7 +328,7 @@ bool xen_set_default_idle(void)
26701 return ret;
26702 }
26703 #endif
26704-void stop_this_cpu(void *dummy)
26705+__noreturn void stop_this_cpu(void *dummy)
26706 {
26707 local_irq_disable();
26708 /*
26709@@ -453,16 +457,37 @@ static int __init idle_setup(char *str)
26710 }
26711 early_param("idle", idle_setup);
26712
26713-unsigned long arch_align_stack(unsigned long sp)
26714+#ifdef CONFIG_PAX_RANDKSTACK
26715+void pax_randomize_kstack(struct pt_regs *regs)
26716 {
26717- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
26718- sp -= get_random_int() % 8192;
26719- return sp & ~0xf;
26720-}
26721+ struct thread_struct *thread = &current->thread;
26722+ unsigned long time;
26723
26724-unsigned long arch_randomize_brk(struct mm_struct *mm)
26725-{
26726- unsigned long range_end = mm->brk + 0x02000000;
26727- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
26728-}
26729+ if (!randomize_va_space)
26730+ return;
26731+
26732+ if (v8086_mode(regs))
26733+ return;
26734
26735+ rdtscl(time);
26736+
26737+ /* P4 seems to return a 0 LSB, ignore it */
26738+#ifdef CONFIG_MPENTIUM4
26739+ time &= 0x3EUL;
26740+ time <<= 2;
26741+#elif defined(CONFIG_X86_64)
26742+ time &= 0xFUL;
26743+ time <<= 4;
26744+#else
26745+ time &= 0x1FUL;
26746+ time <<= 3;
26747+#endif
26748+
26749+ thread->sp0 ^= time;
26750+ load_sp0(init_tss + smp_processor_id(), thread);
26751+
26752+#ifdef CONFIG_X86_64
26753+ this_cpu_write(kernel_stack, thread->sp0);
26754+#endif
26755+}
26756+#endif
26757diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
26758index 7bc86bb..0ea06e8 100644
26759--- a/arch/x86/kernel/process_32.c
26760+++ b/arch/x86/kernel/process_32.c
26761@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
26762 unsigned long thread_saved_pc(struct task_struct *tsk)
26763 {
26764 return ((unsigned long *)tsk->thread.sp)[3];
26765+//XXX return tsk->thread.eip;
26766 }
26767
26768 void __show_regs(struct pt_regs *regs, int all)
26769@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all)
26770 unsigned long sp;
26771 unsigned short ss, gs;
26772
26773- if (user_mode_vm(regs)) {
26774+ if (user_mode(regs)) {
26775 sp = regs->sp;
26776 ss = regs->ss & 0xffff;
26777- gs = get_user_gs(regs);
26778 } else {
26779 sp = kernel_stack_pointer(regs);
26780 savesegment(ss, ss);
26781- savesegment(gs, gs);
26782 }
26783+ gs = get_user_gs(regs);
26784
26785 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
26786 (u16)regs->cs, regs->ip, regs->flags,
26787- smp_processor_id());
26788+ raw_smp_processor_id());
26789 print_symbol("EIP is at %s\n", regs->ip);
26790
26791 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
26792@@ -132,20 +132,21 @@ void release_thread(struct task_struct *dead_task)
26793 int copy_thread(unsigned long clone_flags, unsigned long sp,
26794 unsigned long arg, struct task_struct *p)
26795 {
26796- struct pt_regs *childregs = task_pt_regs(p);
26797+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
26798 struct task_struct *tsk;
26799 int err;
26800
26801 p->thread.sp = (unsigned long) childregs;
26802 p->thread.sp0 = (unsigned long) (childregs+1);
26803+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
26804
26805 if (unlikely(p->flags & PF_KTHREAD)) {
26806 /* kernel thread */
26807 memset(childregs, 0, sizeof(struct pt_regs));
26808 p->thread.ip = (unsigned long) ret_from_kernel_thread;
26809- task_user_gs(p) = __KERNEL_STACK_CANARY;
26810- childregs->ds = __USER_DS;
26811- childregs->es = __USER_DS;
26812+ savesegment(gs, childregs->gs);
26813+ childregs->ds = __KERNEL_DS;
26814+ childregs->es = __KERNEL_DS;
26815 childregs->fs = __KERNEL_PERCPU;
26816 childregs->bx = sp; /* function */
26817 childregs->bp = arg;
26818@@ -252,7 +253,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26819 struct thread_struct *prev = &prev_p->thread,
26820 *next = &next_p->thread;
26821 int cpu = smp_processor_id();
26822- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26823+ struct tss_struct *tss = init_tss + cpu;
26824 fpu_switch_t fpu;
26825
26826 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
26827@@ -276,6 +277,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26828 */
26829 lazy_save_gs(prev->gs);
26830
26831+#ifdef CONFIG_PAX_MEMORY_UDEREF
26832+ __set_fs(task_thread_info(next_p)->addr_limit);
26833+#endif
26834+
26835 /*
26836 * Load the per-thread Thread-Local Storage descriptor.
26837 */
26838@@ -314,9 +319,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26839 */
26840 arch_end_context_switch(next_p);
26841
26842- this_cpu_write(kernel_stack,
26843- (unsigned long)task_stack_page(next_p) +
26844- THREAD_SIZE - KERNEL_STACK_OFFSET);
26845+ this_cpu_write(current_task, next_p);
26846+ this_cpu_write(current_tinfo, &next_p->tinfo);
26847+ this_cpu_write(kernel_stack, next->sp0);
26848
26849 /*
26850 * Restore %gs if needed (which is common)
26851@@ -326,8 +331,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26852
26853 switch_fpu_finish(next_p, fpu);
26854
26855- this_cpu_write(current_task, next_p);
26856-
26857 return prev_p;
26858 }
26859
26860@@ -357,4 +360,3 @@ unsigned long get_wchan(struct task_struct *p)
26861 } while (count++ < 16);
26862 return 0;
26863 }
26864-
26865diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
26866index ca5b02d..c0b2f6a 100644
26867--- a/arch/x86/kernel/process_64.c
26868+++ b/arch/x86/kernel/process_64.c
26869@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26870 struct pt_regs *childregs;
26871 struct task_struct *me = current;
26872
26873- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
26874+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
26875 childregs = task_pt_regs(p);
26876 p->thread.sp = (unsigned long) childregs;
26877 p->thread.usersp = me->thread.usersp;
26878+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
26879 set_tsk_thread_flag(p, TIF_FORK);
26880 p->thread.fpu_counter = 0;
26881 p->thread.io_bitmap_ptr = NULL;
26882@@ -172,6 +173,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26883 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
26884 savesegment(es, p->thread.es);
26885 savesegment(ds, p->thread.ds);
26886+ savesegment(ss, p->thread.ss);
26887+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
26888 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26889
26890 if (unlikely(p->flags & PF_KTHREAD)) {
26891@@ -280,7 +283,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26892 struct thread_struct *prev = &prev_p->thread;
26893 struct thread_struct *next = &next_p->thread;
26894 int cpu = smp_processor_id();
26895- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26896+ struct tss_struct *tss = init_tss + cpu;
26897 unsigned fsindex, gsindex;
26898 fpu_switch_t fpu;
26899
26900@@ -303,6 +306,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26901 if (unlikely(next->ds | prev->ds))
26902 loadsegment(ds, next->ds);
26903
26904+ savesegment(ss, prev->ss);
26905+ if (unlikely(next->ss != prev->ss))
26906+ loadsegment(ss, next->ss);
26907
26908 /* We must save %fs and %gs before load_TLS() because
26909 * %fs and %gs may be cleared by load_TLS().
26910@@ -362,6 +368,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26911 prev->usersp = this_cpu_read(old_rsp);
26912 this_cpu_write(old_rsp, next->usersp);
26913 this_cpu_write(current_task, next_p);
26914+ this_cpu_write(current_tinfo, &next_p->tinfo);
26915
26916 /*
26917 * If it were not for PREEMPT_ACTIVE we could guarantee that the
26918@@ -371,9 +378,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26919 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
26920 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
26921
26922- this_cpu_write(kernel_stack,
26923- (unsigned long)task_stack_page(next_p) +
26924- THREAD_SIZE - KERNEL_STACK_OFFSET);
26925+ this_cpu_write(kernel_stack, next->sp0);
26926
26927 /*
26928 * Now maybe reload the debug registers and handle I/O bitmaps
26929@@ -443,12 +448,11 @@ unsigned long get_wchan(struct task_struct *p)
26930 if (!p || p == current || p->state == TASK_RUNNING)
26931 return 0;
26932 stack = (unsigned long)task_stack_page(p);
26933- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
26934+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
26935 return 0;
26936 fp = *(u64 *)(p->thread.sp);
26937 do {
26938- if (fp < (unsigned long)stack ||
26939- fp >= (unsigned long)stack+THREAD_SIZE)
26940+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
26941 return 0;
26942 ip = *(u64 *)(fp+8);
26943 if (!in_sched_functions(ip))
26944diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
26945index 678c0ad..2fc2a7b 100644
26946--- a/arch/x86/kernel/ptrace.c
26947+++ b/arch/x86/kernel/ptrace.c
26948@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
26949 unsigned long sp = (unsigned long)&regs->sp;
26950 u32 *prev_esp;
26951
26952- if (context == (sp & ~(THREAD_SIZE - 1)))
26953+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
26954 return sp;
26955
26956- prev_esp = (u32 *)(context);
26957+ prev_esp = *(u32 **)(context);
26958 if (prev_esp)
26959 return (unsigned long)prev_esp;
26960
26961@@ -452,6 +452,20 @@ static int putreg(struct task_struct *child,
26962 if (child->thread.gs != value)
26963 return do_arch_prctl(child, ARCH_SET_GS, value);
26964 return 0;
26965+
26966+ case offsetof(struct user_regs_struct,ip):
26967+ /*
26968+ * Protect against any attempt to set ip to an
26969+ * impossible address. There are dragons lurking if the
26970+ * address is noncanonical. (This explicitly allows
26971+ * setting ip to TASK_SIZE_MAX, because user code can do
26972+ * that all by itself by running off the end of its
26973+ * address space.
26974+ */
26975+ if (value > TASK_SIZE_MAX)
26976+ return -EIO;
26977+ break;
26978+
26979 #endif
26980 }
26981
26982@@ -588,7 +602,7 @@ static void ptrace_triggered(struct perf_event *bp,
26983 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
26984 {
26985 int i;
26986- int dr7 = 0;
26987+ unsigned long dr7 = 0;
26988 struct arch_hw_breakpoint *info;
26989
26990 for (i = 0; i < HBP_NUM; i++) {
26991@@ -822,7 +836,7 @@ long arch_ptrace(struct task_struct *child, long request,
26992 unsigned long addr, unsigned long data)
26993 {
26994 int ret;
26995- unsigned long __user *datap = (unsigned long __user *)data;
26996+ unsigned long __user *datap = (__force unsigned long __user *)data;
26997
26998 switch (request) {
26999 /* read the word at location addr in the USER area. */
27000@@ -907,14 +921,14 @@ long arch_ptrace(struct task_struct *child, long request,
27001 if ((int) addr < 0)
27002 return -EIO;
27003 ret = do_get_thread_area(child, addr,
27004- (struct user_desc __user *)data);
27005+ (__force struct user_desc __user *) data);
27006 break;
27007
27008 case PTRACE_SET_THREAD_AREA:
27009 if ((int) addr < 0)
27010 return -EIO;
27011 ret = do_set_thread_area(child, addr,
27012- (struct user_desc __user *)data, 0);
27013+ (__force struct user_desc __user *) data, 0);
27014 break;
27015 #endif
27016
27017@@ -1292,7 +1306,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
27018
27019 #ifdef CONFIG_X86_64
27020
27021-static struct user_regset x86_64_regsets[] __read_mostly = {
27022+static user_regset_no_const x86_64_regsets[] __read_only = {
27023 [REGSET_GENERAL] = {
27024 .core_note_type = NT_PRSTATUS,
27025 .n = sizeof(struct user_regs_struct) / sizeof(long),
27026@@ -1333,7 +1347,7 @@ static const struct user_regset_view user_x86_64_view = {
27027 #endif /* CONFIG_X86_64 */
27028
27029 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
27030-static struct user_regset x86_32_regsets[] __read_mostly = {
27031+static user_regset_no_const x86_32_regsets[] __read_only = {
27032 [REGSET_GENERAL] = {
27033 .core_note_type = NT_PRSTATUS,
27034 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
27035@@ -1386,7 +1400,7 @@ static const struct user_regset_view user_x86_32_view = {
27036 */
27037 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
27038
27039-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
27040+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
27041 {
27042 #ifdef CONFIG_X86_64
27043 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
27044@@ -1421,7 +1435,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
27045 memset(info, 0, sizeof(*info));
27046 info->si_signo = SIGTRAP;
27047 info->si_code = si_code;
27048- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
27049+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
27050 }
27051
27052 void user_single_step_siginfo(struct task_struct *tsk,
27053@@ -1450,6 +1464,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
27054 # define IS_IA32 0
27055 #endif
27056
27057+#ifdef CONFIG_GRKERNSEC_SETXID
27058+extern void gr_delayed_cred_worker(void);
27059+#endif
27060+
27061 /*
27062 * We must return the syscall number to actually look up in the table.
27063 * This can be -1L to skip running any syscall at all.
27064@@ -1460,6 +1478,11 @@ long syscall_trace_enter(struct pt_regs *regs)
27065
27066 user_exit();
27067
27068+#ifdef CONFIG_GRKERNSEC_SETXID
27069+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
27070+ gr_delayed_cred_worker();
27071+#endif
27072+
27073 /*
27074 * If we stepped into a sysenter/syscall insn, it trapped in
27075 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
27076@@ -1515,6 +1538,11 @@ void syscall_trace_leave(struct pt_regs *regs)
27077 */
27078 user_exit();
27079
27080+#ifdef CONFIG_GRKERNSEC_SETXID
27081+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
27082+ gr_delayed_cred_worker();
27083+#endif
27084+
27085 audit_syscall_exit(regs);
27086
27087 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
27088diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
27089index 2f355d2..e75ed0a 100644
27090--- a/arch/x86/kernel/pvclock.c
27091+++ b/arch/x86/kernel/pvclock.c
27092@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
27093 reset_hung_task_detector();
27094 }
27095
27096-static atomic64_t last_value = ATOMIC64_INIT(0);
27097+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
27098
27099 void pvclock_resume(void)
27100 {
27101- atomic64_set(&last_value, 0);
27102+ atomic64_set_unchecked(&last_value, 0);
27103 }
27104
27105 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
27106@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
27107 * updating at the same time, and one of them could be slightly behind,
27108 * making the assumption that last_value always go forward fail to hold.
27109 */
27110- last = atomic64_read(&last_value);
27111+ last = atomic64_read_unchecked(&last_value);
27112 do {
27113 if (ret < last)
27114 return last;
27115- last = atomic64_cmpxchg(&last_value, last, ret);
27116+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
27117 } while (unlikely(last != ret));
27118
27119 return ret;
27120diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
27121index 52b1157..c6e67c4 100644
27122--- a/arch/x86/kernel/reboot.c
27123+++ b/arch/x86/kernel/reboot.c
27124@@ -68,6 +68,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
27125
27126 void __noreturn machine_real_restart(unsigned int type)
27127 {
27128+
27129+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
27130+ struct desc_struct *gdt;
27131+#endif
27132+
27133 local_irq_disable();
27134
27135 /*
27136@@ -95,7 +100,29 @@ void __noreturn machine_real_restart(unsigned int type)
27137
27138 /* Jump to the identity-mapped low memory code */
27139 #ifdef CONFIG_X86_32
27140- asm volatile("jmpl *%0" : :
27141+
27142+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
27143+ gdt = get_cpu_gdt_table(smp_processor_id());
27144+ pax_open_kernel();
27145+#ifdef CONFIG_PAX_MEMORY_UDEREF
27146+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
27147+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
27148+ loadsegment(ds, __KERNEL_DS);
27149+ loadsegment(es, __KERNEL_DS);
27150+ loadsegment(ss, __KERNEL_DS);
27151+#endif
27152+#ifdef CONFIG_PAX_KERNEXEC
27153+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
27154+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
27155+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
27156+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
27157+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
27158+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
27159+#endif
27160+ pax_close_kernel();
27161+#endif
27162+
27163+ asm volatile("ljmpl *%0" : :
27164 "rm" (real_mode_header->machine_real_restart_asm),
27165 "a" (type));
27166 #else
27167@@ -486,7 +513,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
27168 * This means that this function can never return, it can misbehave
27169 * by not rebooting properly and hanging.
27170 */
27171-static void native_machine_emergency_restart(void)
27172+static void __noreturn native_machine_emergency_restart(void)
27173 {
27174 int i;
27175 int attempt = 0;
27176@@ -610,13 +637,13 @@ void native_machine_shutdown(void)
27177 #endif
27178 }
27179
27180-static void __machine_emergency_restart(int emergency)
27181+static void __noreturn __machine_emergency_restart(int emergency)
27182 {
27183 reboot_emergency = emergency;
27184 machine_ops.emergency_restart();
27185 }
27186
27187-static void native_machine_restart(char *__unused)
27188+static void __noreturn native_machine_restart(char *__unused)
27189 {
27190 pr_notice("machine restart\n");
27191
27192@@ -625,7 +652,7 @@ static void native_machine_restart(char *__unused)
27193 __machine_emergency_restart(0);
27194 }
27195
27196-static void native_machine_halt(void)
27197+static void __noreturn native_machine_halt(void)
27198 {
27199 /* Stop other cpus and apics */
27200 machine_shutdown();
27201@@ -635,7 +662,7 @@ static void native_machine_halt(void)
27202 stop_this_cpu(NULL);
27203 }
27204
27205-static void native_machine_power_off(void)
27206+static void __noreturn native_machine_power_off(void)
27207 {
27208 if (pm_power_off) {
27209 if (!reboot_force)
27210@@ -644,9 +671,10 @@ static void native_machine_power_off(void)
27211 }
27212 /* A fallback in case there is no PM info available */
27213 tboot_shutdown(TB_SHUTDOWN_HALT);
27214+ unreachable();
27215 }
27216
27217-struct machine_ops machine_ops = {
27218+struct machine_ops machine_ops __read_only = {
27219 .power_off = native_machine_power_off,
27220 .shutdown = native_machine_shutdown,
27221 .emergency_restart = native_machine_emergency_restart,
27222diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
27223index c8e41e9..64049ef 100644
27224--- a/arch/x86/kernel/reboot_fixups_32.c
27225+++ b/arch/x86/kernel/reboot_fixups_32.c
27226@@ -57,7 +57,7 @@ struct device_fixup {
27227 unsigned int vendor;
27228 unsigned int device;
27229 void (*reboot_fixup)(struct pci_dev *);
27230-};
27231+} __do_const;
27232
27233 /*
27234 * PCI ids solely used for fixups_table go here
27235diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
27236index 3fd2c69..a444264 100644
27237--- a/arch/x86/kernel/relocate_kernel_64.S
27238+++ b/arch/x86/kernel/relocate_kernel_64.S
27239@@ -96,8 +96,7 @@ relocate_kernel:
27240
27241 /* jump to identity mapped page */
27242 addq $(identity_mapped - relocate_kernel), %r8
27243- pushq %r8
27244- ret
27245+ jmp *%r8
27246
27247 identity_mapped:
27248 /* set return address to 0 if not preserving context */
27249diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
27250index 78a0e62..5c2e510 100644
27251--- a/arch/x86/kernel/setup.c
27252+++ b/arch/x86/kernel/setup.c
27253@@ -110,6 +110,7 @@
27254 #include <asm/mce.h>
27255 #include <asm/alternative.h>
27256 #include <asm/prom.h>
27257+#include <asm/boot.h>
27258
27259 /*
27260 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
27261@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
27262 #endif
27263
27264
27265-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
27266-__visible unsigned long mmu_cr4_features;
27267+#ifdef CONFIG_X86_64
27268+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
27269+#elif defined(CONFIG_X86_PAE)
27270+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
27271 #else
27272-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
27273+__visible unsigned long mmu_cr4_features __read_only;
27274 #endif
27275
27276+void set_in_cr4(unsigned long mask)
27277+{
27278+ unsigned long cr4 = read_cr4();
27279+
27280+ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
27281+ return;
27282+
27283+ pax_open_kernel();
27284+ mmu_cr4_features |= mask;
27285+ pax_close_kernel();
27286+
27287+ if (trampoline_cr4_features)
27288+ *trampoline_cr4_features = mmu_cr4_features;
27289+ cr4 |= mask;
27290+ write_cr4(cr4);
27291+}
27292+EXPORT_SYMBOL(set_in_cr4);
27293+
27294+void clear_in_cr4(unsigned long mask)
27295+{
27296+ unsigned long cr4 = read_cr4();
27297+
27298+ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
27299+ return;
27300+
27301+ pax_open_kernel();
27302+ mmu_cr4_features &= ~mask;
27303+ pax_close_kernel();
27304+
27305+ if (trampoline_cr4_features)
27306+ *trampoline_cr4_features = mmu_cr4_features;
27307+ cr4 &= ~mask;
27308+ write_cr4(cr4);
27309+}
27310+EXPORT_SYMBOL(clear_in_cr4);
27311+
27312 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
27313 int bootloader_type, bootloader_version;
27314
27315@@ -772,7 +811,7 @@ static void __init trim_bios_range(void)
27316 * area (640->1Mb) as ram even though it is not.
27317 * take them out.
27318 */
27319- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
27320+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
27321
27322 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
27323 }
27324@@ -780,7 +819,7 @@ static void __init trim_bios_range(void)
27325 /* called before trim_bios_range() to spare extra sanitize */
27326 static void __init e820_add_kernel_range(void)
27327 {
27328- u64 start = __pa_symbol(_text);
27329+ u64 start = __pa_symbol(ktla_ktva(_text));
27330 u64 size = __pa_symbol(_end) - start;
27331
27332 /*
27333@@ -856,8 +895,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
27334
27335 void __init setup_arch(char **cmdline_p)
27336 {
27337+#ifdef CONFIG_X86_32
27338+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
27339+#else
27340 memblock_reserve(__pa_symbol(_text),
27341 (unsigned long)__bss_stop - (unsigned long)_text);
27342+#endif
27343
27344 early_reserve_initrd();
27345
27346@@ -946,14 +989,14 @@ void __init setup_arch(char **cmdline_p)
27347
27348 if (!boot_params.hdr.root_flags)
27349 root_mountflags &= ~MS_RDONLY;
27350- init_mm.start_code = (unsigned long) _text;
27351- init_mm.end_code = (unsigned long) _etext;
27352+ init_mm.start_code = ktla_ktva((unsigned long) _text);
27353+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
27354 init_mm.end_data = (unsigned long) _edata;
27355 init_mm.brk = _brk_end;
27356
27357- code_resource.start = __pa_symbol(_text);
27358- code_resource.end = __pa_symbol(_etext)-1;
27359- data_resource.start = __pa_symbol(_etext);
27360+ code_resource.start = __pa_symbol(ktla_ktva(_text));
27361+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
27362+ data_resource.start = __pa_symbol(_sdata);
27363 data_resource.end = __pa_symbol(_edata)-1;
27364 bss_resource.start = __pa_symbol(__bss_start);
27365 bss_resource.end = __pa_symbol(__bss_stop)-1;
27366diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
27367index 5cdff03..80fa283 100644
27368--- a/arch/x86/kernel/setup_percpu.c
27369+++ b/arch/x86/kernel/setup_percpu.c
27370@@ -21,19 +21,17 @@
27371 #include <asm/cpu.h>
27372 #include <asm/stackprotector.h>
27373
27374-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27375+#ifdef CONFIG_SMP
27376+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
27377 EXPORT_PER_CPU_SYMBOL(cpu_number);
27378+#endif
27379
27380-#ifdef CONFIG_X86_64
27381 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
27382-#else
27383-#define BOOT_PERCPU_OFFSET 0
27384-#endif
27385
27386 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
27387 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
27388
27389-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
27390+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
27391 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
27392 };
27393 EXPORT_SYMBOL(__per_cpu_offset);
27394@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
27395 {
27396 #ifdef CONFIG_NEED_MULTIPLE_NODES
27397 pg_data_t *last = NULL;
27398- unsigned int cpu;
27399+ int cpu;
27400
27401 for_each_possible_cpu(cpu) {
27402 int node = early_cpu_to_node(cpu);
27403@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
27404 {
27405 #ifdef CONFIG_X86_32
27406 struct desc_struct gdt;
27407+ unsigned long base = per_cpu_offset(cpu);
27408
27409- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
27410- 0x2 | DESCTYPE_S, 0x8);
27411- gdt.s = 1;
27412+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
27413+ 0x83 | DESCTYPE_S, 0xC);
27414 write_gdt_entry(get_cpu_gdt_table(cpu),
27415 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
27416 #endif
27417@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
27418 /* alrighty, percpu areas up and running */
27419 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27420 for_each_possible_cpu(cpu) {
27421+#ifdef CONFIG_CC_STACKPROTECTOR
27422+#ifdef CONFIG_X86_32
27423+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
27424+#endif
27425+#endif
27426 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
27427 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
27428 per_cpu(cpu_number, cpu) = cpu;
27429@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
27430 */
27431 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
27432 #endif
27433+#ifdef CONFIG_CC_STACKPROTECTOR
27434+#ifdef CONFIG_X86_32
27435+ if (!cpu)
27436+ per_cpu(stack_canary.canary, cpu) = canary;
27437+#endif
27438+#endif
27439 /*
27440 * Up to this point, the boot CPU has been using .init.data
27441 * area. Reload any changed state for the boot CPU.
27442diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
27443index 2851d63..83bf567 100644
27444--- a/arch/x86/kernel/signal.c
27445+++ b/arch/x86/kernel/signal.c
27446@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
27447 * Align the stack pointer according to the i386 ABI,
27448 * i.e. so that on function entry ((sp + 4) & 15) == 0.
27449 */
27450- sp = ((sp + 4) & -16ul) - 4;
27451+ sp = ((sp - 12) & -16ul) - 4;
27452 #else /* !CONFIG_X86_32 */
27453 sp = round_down(sp, 16) - 8;
27454 #endif
27455@@ -298,10 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27456 }
27457
27458 if (current->mm->context.vdso)
27459- restorer = current->mm->context.vdso +
27460- selected_vdso32->sym___kernel_sigreturn;
27461+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_sigreturn);
27462 else
27463- restorer = &frame->retcode;
27464+ restorer = (void __user *)&frame->retcode;
27465 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27466 restorer = ksig->ka.sa.sa_restorer;
27467
27468@@ -315,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27469 * reasons and because gdb uses it as a signature to notice
27470 * signal handler stack frames.
27471 */
27472- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
27473+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
27474
27475 if (err)
27476 return -EFAULT;
27477@@ -362,8 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27478 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
27479
27480 /* Set up to return from userspace. */
27481- restorer = current->mm->context.vdso +
27482- selected_vdso32->sym___kernel_rt_sigreturn;
27483+ if (current->mm->context.vdso)
27484+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_rt_sigreturn);
27485+ else
27486+ restorer = (void __user *)&frame->retcode;
27487 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27488 restorer = ksig->ka.sa.sa_restorer;
27489 put_user_ex(restorer, &frame->pretcode);
27490@@ -375,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27491 * reasons and because gdb uses it as a signature to notice
27492 * signal handler stack frames.
27493 */
27494- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
27495+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
27496 } put_user_catch(err);
27497
27498 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
27499@@ -611,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27500 {
27501 int usig = signr_convert(ksig->sig);
27502 sigset_t *set = sigmask_to_save();
27503- compat_sigset_t *cset = (compat_sigset_t *) set;
27504+ sigset_t sigcopy;
27505+ compat_sigset_t *cset;
27506+
27507+ sigcopy = *set;
27508+
27509+ cset = (compat_sigset_t *) &sigcopy;
27510
27511 /* Set up the stack frame */
27512 if (is_ia32_frame()) {
27513@@ -622,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27514 } else if (is_x32_frame()) {
27515 return x32_setup_rt_frame(ksig, cset, regs);
27516 } else {
27517- return __setup_rt_frame(ksig->sig, ksig, set, regs);
27518+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
27519 }
27520 }
27521
27522diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
27523index be8e1bd..a3d93fa 100644
27524--- a/arch/x86/kernel/smp.c
27525+++ b/arch/x86/kernel/smp.c
27526@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
27527
27528 __setup("nonmi_ipi", nonmi_ipi_setup);
27529
27530-struct smp_ops smp_ops = {
27531+struct smp_ops smp_ops __read_only = {
27532 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
27533 .smp_prepare_cpus = native_smp_prepare_cpus,
27534 .smp_cpus_done = native_smp_cpus_done,
27535diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
27536index 5492798..a3bd4f2 100644
27537--- a/arch/x86/kernel/smpboot.c
27538+++ b/arch/x86/kernel/smpboot.c
27539@@ -230,14 +230,17 @@ static void notrace start_secondary(void *unused)
27540
27541 enable_start_cpu0 = 0;
27542
27543-#ifdef CONFIG_X86_32
27544+ /* otherwise gcc will move up smp_processor_id before the cpu_init */
27545+ barrier();
27546+
27547 /* switch away from the initial page table */
27548+#ifdef CONFIG_PAX_PER_CPU_PGD
27549+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
27550+#else
27551 load_cr3(swapper_pg_dir);
27552+#endif
27553 __flush_tlb_all();
27554-#endif
27555
27556- /* otherwise gcc will move up smp_processor_id before the cpu_init */
27557- barrier();
27558 /*
27559 * Check TSC synchronization with the BP:
27560 */
27561@@ -764,8 +767,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27562 alternatives_enable_smp();
27563
27564 idle->thread.sp = (unsigned long) (((struct pt_regs *)
27565- (THREAD_SIZE + task_stack_page(idle))) - 1);
27566+ (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
27567 per_cpu(current_task, cpu) = idle;
27568+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27569
27570 #ifdef CONFIG_X86_32
27571 /* Stack for startup_32 can be just as for start_secondary onwards */
27572@@ -774,10 +778,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27573 clear_tsk_thread_flag(idle, TIF_FORK);
27574 initial_gs = per_cpu_offset(cpu);
27575 #endif
27576- per_cpu(kernel_stack, cpu) =
27577- (unsigned long)task_stack_page(idle) -
27578- KERNEL_STACK_OFFSET + THREAD_SIZE;
27579+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27580+ pax_open_kernel();
27581 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
27582+ pax_close_kernel();
27583 initial_code = (unsigned long)start_secondary;
27584 stack_start = idle->thread.sp;
27585
27586@@ -923,6 +927,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
27587 /* the FPU context is blank, nobody can own it */
27588 __cpu_disable_lazy_restore(cpu);
27589
27590+#ifdef CONFIG_PAX_PER_CPU_PGD
27591+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
27592+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27593+ KERNEL_PGD_PTRS);
27594+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
27595+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27596+ KERNEL_PGD_PTRS);
27597+#endif
27598+
27599 err = do_boot_cpu(apicid, cpu, tidle);
27600 if (err) {
27601 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
27602diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
27603index 9b4d51d..5d28b58 100644
27604--- a/arch/x86/kernel/step.c
27605+++ b/arch/x86/kernel/step.c
27606@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27607 struct desc_struct *desc;
27608 unsigned long base;
27609
27610- seg &= ~7UL;
27611+ seg >>= 3;
27612
27613 mutex_lock(&child->mm->context.lock);
27614- if (unlikely((seg >> 3) >= child->mm->context.size))
27615+ if (unlikely(seg >= child->mm->context.size))
27616 addr = -1L; /* bogus selector, access would fault */
27617 else {
27618 desc = child->mm->context.ldt + seg;
27619@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27620 addr += base;
27621 }
27622 mutex_unlock(&child->mm->context.lock);
27623- }
27624+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
27625+ addr = ktla_ktva(addr);
27626
27627 return addr;
27628 }
27629@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
27630 unsigned char opcode[15];
27631 unsigned long addr = convert_ip_to_linear(child, regs);
27632
27633+ if (addr == -EINVAL)
27634+ return 0;
27635+
27636 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
27637 for (i = 0; i < copied; i++) {
27638 switch (opcode[i]) {
27639diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
27640new file mode 100644
27641index 0000000..5877189
27642--- /dev/null
27643+++ b/arch/x86/kernel/sys_i386_32.c
27644@@ -0,0 +1,189 @@
27645+/*
27646+ * This file contains various random system calls that
27647+ * have a non-standard calling sequence on the Linux/i386
27648+ * platform.
27649+ */
27650+
27651+#include <linux/errno.h>
27652+#include <linux/sched.h>
27653+#include <linux/mm.h>
27654+#include <linux/fs.h>
27655+#include <linux/smp.h>
27656+#include <linux/sem.h>
27657+#include <linux/msg.h>
27658+#include <linux/shm.h>
27659+#include <linux/stat.h>
27660+#include <linux/syscalls.h>
27661+#include <linux/mman.h>
27662+#include <linux/file.h>
27663+#include <linux/utsname.h>
27664+#include <linux/ipc.h>
27665+#include <linux/elf.h>
27666+
27667+#include <linux/uaccess.h>
27668+#include <linux/unistd.h>
27669+
27670+#include <asm/syscalls.h>
27671+
27672+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
27673+{
27674+ unsigned long pax_task_size = TASK_SIZE;
27675+
27676+#ifdef CONFIG_PAX_SEGMEXEC
27677+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
27678+ pax_task_size = SEGMEXEC_TASK_SIZE;
27679+#endif
27680+
27681+ if (flags & MAP_FIXED)
27682+ if (len > pax_task_size || addr > pax_task_size - len)
27683+ return -EINVAL;
27684+
27685+ return 0;
27686+}
27687+
27688+/*
27689+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27690+ */
27691+static unsigned long get_align_mask(void)
27692+{
27693+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
27694+ return 0;
27695+
27696+ if (!(current->flags & PF_RANDOMIZE))
27697+ return 0;
27698+
27699+ return va_align.mask;
27700+}
27701+
27702+unsigned long
27703+arch_get_unmapped_area(struct file *filp, unsigned long addr,
27704+ unsigned long len, unsigned long pgoff, unsigned long flags)
27705+{
27706+ struct mm_struct *mm = current->mm;
27707+ struct vm_area_struct *vma;
27708+ unsigned long pax_task_size = TASK_SIZE;
27709+ struct vm_unmapped_area_info info;
27710+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27711+
27712+#ifdef CONFIG_PAX_SEGMEXEC
27713+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27714+ pax_task_size = SEGMEXEC_TASK_SIZE;
27715+#endif
27716+
27717+ pax_task_size -= PAGE_SIZE;
27718+
27719+ if (len > pax_task_size)
27720+ return -ENOMEM;
27721+
27722+ if (flags & MAP_FIXED)
27723+ return addr;
27724+
27725+#ifdef CONFIG_PAX_RANDMMAP
27726+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27727+#endif
27728+
27729+ if (addr) {
27730+ addr = PAGE_ALIGN(addr);
27731+ if (pax_task_size - len >= addr) {
27732+ vma = find_vma(mm, addr);
27733+ if (check_heap_stack_gap(vma, addr, len, offset))
27734+ return addr;
27735+ }
27736+ }
27737+
27738+ info.flags = 0;
27739+ info.length = len;
27740+ info.align_mask = filp ? get_align_mask() : 0;
27741+ info.align_offset = pgoff << PAGE_SHIFT;
27742+ info.threadstack_offset = offset;
27743+
27744+#ifdef CONFIG_PAX_PAGEEXEC
27745+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
27746+ info.low_limit = 0x00110000UL;
27747+ info.high_limit = mm->start_code;
27748+
27749+#ifdef CONFIG_PAX_RANDMMAP
27750+ if (mm->pax_flags & MF_PAX_RANDMMAP)
27751+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
27752+#endif
27753+
27754+ if (info.low_limit < info.high_limit) {
27755+ addr = vm_unmapped_area(&info);
27756+ if (!IS_ERR_VALUE(addr))
27757+ return addr;
27758+ }
27759+ } else
27760+#endif
27761+
27762+ info.low_limit = mm->mmap_base;
27763+ info.high_limit = pax_task_size;
27764+
27765+ return vm_unmapped_area(&info);
27766+}
27767+
27768+unsigned long
27769+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27770+ const unsigned long len, const unsigned long pgoff,
27771+ const unsigned long flags)
27772+{
27773+ struct vm_area_struct *vma;
27774+ struct mm_struct *mm = current->mm;
27775+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
27776+ struct vm_unmapped_area_info info;
27777+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27778+
27779+#ifdef CONFIG_PAX_SEGMEXEC
27780+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27781+ pax_task_size = SEGMEXEC_TASK_SIZE;
27782+#endif
27783+
27784+ pax_task_size -= PAGE_SIZE;
27785+
27786+ /* requested length too big for entire address space */
27787+ if (len > pax_task_size)
27788+ return -ENOMEM;
27789+
27790+ if (flags & MAP_FIXED)
27791+ return addr;
27792+
27793+#ifdef CONFIG_PAX_PAGEEXEC
27794+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
27795+ goto bottomup;
27796+#endif
27797+
27798+#ifdef CONFIG_PAX_RANDMMAP
27799+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27800+#endif
27801+
27802+ /* requesting a specific address */
27803+ if (addr) {
27804+ addr = PAGE_ALIGN(addr);
27805+ if (pax_task_size - len >= addr) {
27806+ vma = find_vma(mm, addr);
27807+ if (check_heap_stack_gap(vma, addr, len, offset))
27808+ return addr;
27809+ }
27810+ }
27811+
27812+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
27813+ info.length = len;
27814+ info.low_limit = PAGE_SIZE;
27815+ info.high_limit = mm->mmap_base;
27816+ info.align_mask = filp ? get_align_mask() : 0;
27817+ info.align_offset = pgoff << PAGE_SHIFT;
27818+ info.threadstack_offset = offset;
27819+
27820+ addr = vm_unmapped_area(&info);
27821+ if (!(addr & ~PAGE_MASK))
27822+ return addr;
27823+ VM_BUG_ON(addr != -ENOMEM);
27824+
27825+bottomup:
27826+ /*
27827+ * A failed mmap() very likely causes application failure,
27828+ * so fall back to the bottom-up function here. This scenario
27829+ * can happen with large stack limits and large mmap()
27830+ * allocations.
27831+ */
27832+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
27833+}
27834diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
27835index 30277e2..5664a29 100644
27836--- a/arch/x86/kernel/sys_x86_64.c
27837+++ b/arch/x86/kernel/sys_x86_64.c
27838@@ -81,8 +81,8 @@ out:
27839 return error;
27840 }
27841
27842-static void find_start_end(unsigned long flags, unsigned long *begin,
27843- unsigned long *end)
27844+static void find_start_end(struct mm_struct *mm, unsigned long flags,
27845+ unsigned long *begin, unsigned long *end)
27846 {
27847 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
27848 unsigned long new_begin;
27849@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
27850 *begin = new_begin;
27851 }
27852 } else {
27853- *begin = current->mm->mmap_legacy_base;
27854+ *begin = mm->mmap_legacy_base;
27855 *end = TASK_SIZE;
27856 }
27857 }
27858@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27859 struct vm_area_struct *vma;
27860 struct vm_unmapped_area_info info;
27861 unsigned long begin, end;
27862+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27863
27864 if (flags & MAP_FIXED)
27865 return addr;
27866
27867- find_start_end(flags, &begin, &end);
27868+ find_start_end(mm, flags, &begin, &end);
27869
27870 if (len > end)
27871 return -ENOMEM;
27872
27873+#ifdef CONFIG_PAX_RANDMMAP
27874+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27875+#endif
27876+
27877 if (addr) {
27878 addr = PAGE_ALIGN(addr);
27879 vma = find_vma(mm, addr);
27880- if (end - len >= addr &&
27881- (!vma || addr + len <= vma->vm_start))
27882+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27883 return addr;
27884 }
27885
27886@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27887 info.high_limit = end;
27888 info.align_mask = filp ? get_align_mask() : 0;
27889 info.align_offset = pgoff << PAGE_SHIFT;
27890+ info.threadstack_offset = offset;
27891 return vm_unmapped_area(&info);
27892 }
27893
27894@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27895 struct mm_struct *mm = current->mm;
27896 unsigned long addr = addr0;
27897 struct vm_unmapped_area_info info;
27898+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27899
27900 /* requested length too big for entire address space */
27901 if (len > TASK_SIZE)
27902@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27903 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
27904 goto bottomup;
27905
27906+#ifdef CONFIG_PAX_RANDMMAP
27907+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27908+#endif
27909+
27910 /* requesting a specific address */
27911 if (addr) {
27912 addr = PAGE_ALIGN(addr);
27913 vma = find_vma(mm, addr);
27914- if (TASK_SIZE - len >= addr &&
27915- (!vma || addr + len <= vma->vm_start))
27916+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27917 return addr;
27918 }
27919
27920@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27921 info.high_limit = mm->mmap_base;
27922 info.align_mask = filp ? get_align_mask() : 0;
27923 info.align_offset = pgoff << PAGE_SHIFT;
27924+ info.threadstack_offset = offset;
27925 addr = vm_unmapped_area(&info);
27926 if (!(addr & ~PAGE_MASK))
27927 return addr;
27928diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
27929index 91a4496..bb87552 100644
27930--- a/arch/x86/kernel/tboot.c
27931+++ b/arch/x86/kernel/tboot.c
27932@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
27933
27934 void tboot_shutdown(u32 shutdown_type)
27935 {
27936- void (*shutdown)(void);
27937+ void (* __noreturn shutdown)(void);
27938
27939 if (!tboot_enabled())
27940 return;
27941@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
27942
27943 switch_to_tboot_pt();
27944
27945- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
27946+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
27947 shutdown();
27948
27949 /* should not reach here */
27950@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
27951 return -ENODEV;
27952 }
27953
27954-static atomic_t ap_wfs_count;
27955+static atomic_unchecked_t ap_wfs_count;
27956
27957 static int tboot_wait_for_aps(int num_aps)
27958 {
27959@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
27960 {
27961 switch (action) {
27962 case CPU_DYING:
27963- atomic_inc(&ap_wfs_count);
27964+ atomic_inc_unchecked(&ap_wfs_count);
27965 if (num_online_cpus() == 1)
27966- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
27967+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
27968 return NOTIFY_BAD;
27969 break;
27970 }
27971@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
27972
27973 tboot_create_trampoline();
27974
27975- atomic_set(&ap_wfs_count, 0);
27976+ atomic_set_unchecked(&ap_wfs_count, 0);
27977 register_hotcpu_notifier(&tboot_cpu_notifier);
27978
27979 #ifdef CONFIG_DEBUG_FS
27980diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
27981index bf7ef5c..59d0ac9 100644
27982--- a/arch/x86/kernel/time.c
27983+++ b/arch/x86/kernel/time.c
27984@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
27985 {
27986 unsigned long pc = instruction_pointer(regs);
27987
27988- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
27989+ if (!user_mode(regs) && in_lock_functions(pc)) {
27990 #ifdef CONFIG_FRAME_POINTER
27991- return *(unsigned long *)(regs->bp + sizeof(long));
27992+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
27993 #else
27994 unsigned long *sp =
27995 (unsigned long *)kernel_stack_pointer(regs);
27996@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
27997 * or above a saved flags. Eflags has bits 22-31 zero,
27998 * kernel addresses don't.
27999 */
28000+
28001+#ifdef CONFIG_PAX_KERNEXEC
28002+ return ktla_ktva(sp[0]);
28003+#else
28004 if (sp[0] >> 22)
28005 return sp[0];
28006 if (sp[1] >> 22)
28007 return sp[1];
28008 #endif
28009+
28010+#endif
28011 }
28012 return pc;
28013 }
28014diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
28015index f7fec09..9991981 100644
28016--- a/arch/x86/kernel/tls.c
28017+++ b/arch/x86/kernel/tls.c
28018@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
28019 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
28020 return -EINVAL;
28021
28022+#ifdef CONFIG_PAX_SEGMEXEC
28023+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
28024+ return -EINVAL;
28025+#endif
28026+
28027 set_tls_desc(p, idx, &info, 1);
28028
28029 return 0;
28030@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
28031
28032 if (kbuf)
28033 info = kbuf;
28034- else if (__copy_from_user(infobuf, ubuf, count))
28035+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
28036 return -EFAULT;
28037 else
28038 info = infobuf;
28039diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
28040index 1c113db..287b42e 100644
28041--- a/arch/x86/kernel/tracepoint.c
28042+++ b/arch/x86/kernel/tracepoint.c
28043@@ -9,11 +9,11 @@
28044 #include <linux/atomic.h>
28045
28046 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
28047-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
28048+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
28049 (unsigned long) trace_idt_table };
28050
28051 /* No need to be aligned, but done to keep all IDTs defined the same way. */
28052-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
28053+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
28054
28055 static int trace_irq_vector_refcount;
28056 static DEFINE_MUTEX(irq_vector_mutex);
28057diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
28058index 0d0e922..0886373 100644
28059--- a/arch/x86/kernel/traps.c
28060+++ b/arch/x86/kernel/traps.c
28061@@ -67,7 +67,7 @@
28062 #include <asm/proto.h>
28063
28064 /* No need to be aligned, but done to keep all IDTs defined the same way. */
28065-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
28066+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
28067 #else
28068 #include <asm/processor-flags.h>
28069 #include <asm/setup.h>
28070@@ -76,7 +76,7 @@ asmlinkage int system_call(void);
28071 #endif
28072
28073 /* Must be page-aligned because the real IDT is used in a fixmap. */
28074-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
28075+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
28076
28077 DECLARE_BITMAP(used_vectors, NR_VECTORS);
28078 EXPORT_SYMBOL_GPL(used_vectors);
28079@@ -108,11 +108,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
28080 }
28081
28082 static nokprobe_inline int
28083-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
28084+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
28085 struct pt_regs *regs, long error_code)
28086 {
28087 #ifdef CONFIG_X86_32
28088- if (regs->flags & X86_VM_MASK) {
28089+ if (v8086_mode(regs)) {
28090 /*
28091 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
28092 * On nmi (interrupt 2), do_trap should not be called.
28093@@ -125,12 +125,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
28094 return -1;
28095 }
28096 #endif
28097- if (!user_mode(regs)) {
28098+ if (!user_mode_novm(regs)) {
28099 if (!fixup_exception(regs)) {
28100 tsk->thread.error_code = error_code;
28101 tsk->thread.trap_nr = trapnr;
28102+
28103+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28104+ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
28105+ str = "PAX: suspicious stack segment fault";
28106+#endif
28107+
28108 die(str, regs, error_code);
28109 }
28110+
28111+#ifdef CONFIG_PAX_REFCOUNT
28112+ if (trapnr == X86_TRAP_OF)
28113+ pax_report_refcount_overflow(regs);
28114+#endif
28115+
28116 return 0;
28117 }
28118
28119@@ -169,7 +181,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
28120 }
28121
28122 static void
28123-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28124+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
28125 long error_code, siginfo_t *info)
28126 {
28127 struct task_struct *tsk = current;
28128@@ -193,7 +205,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28129 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
28130 printk_ratelimit()) {
28131 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
28132- tsk->comm, tsk->pid, str,
28133+ tsk->comm, task_pid_nr(tsk), str,
28134 regs->ip, regs->sp, error_code);
28135 print_vma_addr(" in ", regs->ip);
28136 pr_cont("\n");
28137@@ -266,6 +278,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
28138 tsk->thread.error_code = error_code;
28139 tsk->thread.trap_nr = X86_TRAP_DF;
28140
28141+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
28142+ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
28143+ die("grsec: kernel stack overflow detected", regs, error_code);
28144+#endif
28145+
28146 #ifdef CONFIG_DOUBLEFAULT
28147 df_debug(regs, error_code);
28148 #endif
28149@@ -288,7 +305,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
28150 conditional_sti(regs);
28151
28152 #ifdef CONFIG_X86_32
28153- if (regs->flags & X86_VM_MASK) {
28154+ if (v8086_mode(regs)) {
28155 local_irq_enable();
28156 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
28157 goto exit;
28158@@ -296,18 +313,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
28159 #endif
28160
28161 tsk = current;
28162- if (!user_mode(regs)) {
28163+ if (!user_mode_novm(regs)) {
28164 if (fixup_exception(regs))
28165 goto exit;
28166
28167 tsk->thread.error_code = error_code;
28168 tsk->thread.trap_nr = X86_TRAP_GP;
28169 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
28170- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
28171+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
28172+
28173+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28174+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
28175+ die("PAX: suspicious general protection fault", regs, error_code);
28176+ else
28177+#endif
28178+
28179 die("general protection fault", regs, error_code);
28180+ }
28181 goto exit;
28182 }
28183
28184+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28185+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
28186+ struct mm_struct *mm = tsk->mm;
28187+ unsigned long limit;
28188+
28189+ down_write(&mm->mmap_sem);
28190+ limit = mm->context.user_cs_limit;
28191+ if (limit < TASK_SIZE) {
28192+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
28193+ up_write(&mm->mmap_sem);
28194+ return;
28195+ }
28196+ up_write(&mm->mmap_sem);
28197+ }
28198+#endif
28199+
28200 tsk->thread.error_code = error_code;
28201 tsk->thread.trap_nr = X86_TRAP_GP;
28202
28203@@ -481,7 +522,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28204 /* It's safe to allow irq's after DR6 has been saved */
28205 preempt_conditional_sti(regs);
28206
28207- if (regs->flags & X86_VM_MASK) {
28208+ if (v8086_mode(regs)) {
28209 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
28210 X86_TRAP_DB);
28211 preempt_conditional_cli(regs);
28212@@ -496,7 +537,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28213 * We already checked v86 mode above, so we can check for kernel mode
28214 * by just checking the CPL of CS.
28215 */
28216- if ((dr6 & DR_STEP) && !user_mode(regs)) {
28217+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
28218 tsk->thread.debugreg6 &= ~DR_STEP;
28219 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
28220 regs->flags &= ~X86_EFLAGS_TF;
28221@@ -529,7 +570,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
28222 return;
28223 conditional_sti(regs);
28224
28225- if (!user_mode_vm(regs))
28226+ if (!user_mode(regs))
28227 {
28228 if (!fixup_exception(regs)) {
28229 task->thread.error_code = error_code;
28230diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
28231index ea03031..34a5cdda 100644
28232--- a/arch/x86/kernel/tsc.c
28233+++ b/arch/x86/kernel/tsc.c
28234@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
28235 */
28236 smp_wmb();
28237
28238- ACCESS_ONCE(c2n->head) = data;
28239+ ACCESS_ONCE_RW(c2n->head) = data;
28240 }
28241
28242 /*
28243diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
28244index 5d1cbfe..2a21feb 100644
28245--- a/arch/x86/kernel/uprobes.c
28246+++ b/arch/x86/kernel/uprobes.c
28247@@ -845,7 +845,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
28248 int ret = NOTIFY_DONE;
28249
28250 /* We are only interested in userspace traps */
28251- if (regs && !user_mode_vm(regs))
28252+ if (regs && !user_mode(regs))
28253 return NOTIFY_DONE;
28254
28255 switch (val) {
28256@@ -919,7 +919,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
28257
28258 if (nleft != rasize) {
28259 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
28260- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
28261+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
28262
28263 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
28264 }
28265diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
28266index b9242ba..50c5edd 100644
28267--- a/arch/x86/kernel/verify_cpu.S
28268+++ b/arch/x86/kernel/verify_cpu.S
28269@@ -20,6 +20,7 @@
28270 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
28271 * arch/x86/kernel/trampoline_64.S: secondary processor verification
28272 * arch/x86/kernel/head_32.S: processor startup
28273+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
28274 *
28275 * verify_cpu, returns the status of longmode and SSE in register %eax.
28276 * 0: Success 1: Failure
28277diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
28278index e8edcf5..27f9344 100644
28279--- a/arch/x86/kernel/vm86_32.c
28280+++ b/arch/x86/kernel/vm86_32.c
28281@@ -44,6 +44,7 @@
28282 #include <linux/ptrace.h>
28283 #include <linux/audit.h>
28284 #include <linux/stddef.h>
28285+#include <linux/grsecurity.h>
28286
28287 #include <asm/uaccess.h>
28288 #include <asm/io.h>
28289@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
28290 do_exit(SIGSEGV);
28291 }
28292
28293- tss = &per_cpu(init_tss, get_cpu());
28294+ tss = init_tss + get_cpu();
28295 current->thread.sp0 = current->thread.saved_sp0;
28296 current->thread.sysenter_cs = __KERNEL_CS;
28297 load_sp0(tss, &current->thread);
28298@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
28299
28300 if (tsk->thread.saved_sp0)
28301 return -EPERM;
28302+
28303+#ifdef CONFIG_GRKERNSEC_VM86
28304+ if (!capable(CAP_SYS_RAWIO)) {
28305+ gr_handle_vm86();
28306+ return -EPERM;
28307+ }
28308+#endif
28309+
28310 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
28311 offsetof(struct kernel_vm86_struct, vm86plus) -
28312 sizeof(info.regs));
28313@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
28314 int tmp;
28315 struct vm86plus_struct __user *v86;
28316
28317+#ifdef CONFIG_GRKERNSEC_VM86
28318+ if (!capable(CAP_SYS_RAWIO)) {
28319+ gr_handle_vm86();
28320+ return -EPERM;
28321+ }
28322+#endif
28323+
28324 tsk = current;
28325 switch (cmd) {
28326 case VM86_REQUEST_IRQ:
28327@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
28328 tsk->thread.saved_fs = info->regs32->fs;
28329 tsk->thread.saved_gs = get_user_gs(info->regs32);
28330
28331- tss = &per_cpu(init_tss, get_cpu());
28332+ tss = init_tss + get_cpu();
28333 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
28334 if (cpu_has_sep)
28335 tsk->thread.sysenter_cs = 0;
28336@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
28337 goto cannot_handle;
28338 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
28339 goto cannot_handle;
28340- intr_ptr = (unsigned long __user *) (i << 2);
28341+ intr_ptr = (__force unsigned long __user *) (i << 2);
28342 if (get_user(segoffs, intr_ptr))
28343 goto cannot_handle;
28344 if ((segoffs >> 16) == BIOSSEG)
28345diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
28346index 49edf2d..c0d1362 100644
28347--- a/arch/x86/kernel/vmlinux.lds.S
28348+++ b/arch/x86/kernel/vmlinux.lds.S
28349@@ -26,6 +26,13 @@
28350 #include <asm/page_types.h>
28351 #include <asm/cache.h>
28352 #include <asm/boot.h>
28353+#include <asm/segment.h>
28354+
28355+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28356+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
28357+#else
28358+#define __KERNEL_TEXT_OFFSET 0
28359+#endif
28360
28361 #undef i386 /* in case the preprocessor is a 32bit one */
28362
28363@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
28364
28365 PHDRS {
28366 text PT_LOAD FLAGS(5); /* R_E */
28367+#ifdef CONFIG_X86_32
28368+ module PT_LOAD FLAGS(5); /* R_E */
28369+#endif
28370+#ifdef CONFIG_XEN
28371+ rodata PT_LOAD FLAGS(5); /* R_E */
28372+#else
28373+ rodata PT_LOAD FLAGS(4); /* R__ */
28374+#endif
28375 data PT_LOAD FLAGS(6); /* RW_ */
28376-#ifdef CONFIG_X86_64
28377+ init.begin PT_LOAD FLAGS(6); /* RW_ */
28378 #ifdef CONFIG_SMP
28379 percpu PT_LOAD FLAGS(6); /* RW_ */
28380 #endif
28381+ text.init PT_LOAD FLAGS(5); /* R_E */
28382+ text.exit PT_LOAD FLAGS(5); /* R_E */
28383 init PT_LOAD FLAGS(7); /* RWE */
28384-#endif
28385 note PT_NOTE FLAGS(0); /* ___ */
28386 }
28387
28388 SECTIONS
28389 {
28390 #ifdef CONFIG_X86_32
28391- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
28392- phys_startup_32 = startup_32 - LOAD_OFFSET;
28393+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
28394 #else
28395- . = __START_KERNEL;
28396- phys_startup_64 = startup_64 - LOAD_OFFSET;
28397+ . = __START_KERNEL;
28398 #endif
28399
28400 /* Text and read-only data */
28401- .text : AT(ADDR(.text) - LOAD_OFFSET) {
28402- _text = .;
28403+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28404 /* bootstrapping code */
28405+#ifdef CONFIG_X86_32
28406+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28407+#else
28408+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28409+#endif
28410+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28411+ _text = .;
28412 HEAD_TEXT
28413 . = ALIGN(8);
28414 _stext = .;
28415@@ -104,13 +124,47 @@ SECTIONS
28416 IRQENTRY_TEXT
28417 *(.fixup)
28418 *(.gnu.warning)
28419- /* End of text section */
28420- _etext = .;
28421 } :text = 0x9090
28422
28423- NOTES :text :note
28424+ . += __KERNEL_TEXT_OFFSET;
28425
28426- EXCEPTION_TABLE(16) :text = 0x9090
28427+#ifdef CONFIG_X86_32
28428+ . = ALIGN(PAGE_SIZE);
28429+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
28430+
28431+#ifdef CONFIG_PAX_KERNEXEC
28432+ MODULES_EXEC_VADDR = .;
28433+ BYTE(0)
28434+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
28435+ . = ALIGN(HPAGE_SIZE) - 1;
28436+ MODULES_EXEC_END = .;
28437+#endif
28438+
28439+ } :module
28440+#endif
28441+
28442+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
28443+ /* End of text section */
28444+ BYTE(0)
28445+ _etext = . - __KERNEL_TEXT_OFFSET;
28446+ }
28447+
28448+#ifdef CONFIG_X86_32
28449+ . = ALIGN(PAGE_SIZE);
28450+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
28451+ . = ALIGN(PAGE_SIZE);
28452+ *(.empty_zero_page)
28453+ *(.initial_pg_fixmap)
28454+ *(.initial_pg_pmd)
28455+ *(.initial_page_table)
28456+ *(.swapper_pg_dir)
28457+ } :rodata
28458+#endif
28459+
28460+ . = ALIGN(PAGE_SIZE);
28461+ NOTES :rodata :note
28462+
28463+ EXCEPTION_TABLE(16) :rodata
28464
28465 #if defined(CONFIG_DEBUG_RODATA)
28466 /* .text should occupy whole number of pages */
28467@@ -122,16 +176,20 @@ SECTIONS
28468
28469 /* Data */
28470 .data : AT(ADDR(.data) - LOAD_OFFSET) {
28471+
28472+#ifdef CONFIG_PAX_KERNEXEC
28473+ . = ALIGN(HPAGE_SIZE);
28474+#else
28475+ . = ALIGN(PAGE_SIZE);
28476+#endif
28477+
28478 /* Start of data section */
28479 _sdata = .;
28480
28481 /* init_task */
28482 INIT_TASK_DATA(THREAD_SIZE)
28483
28484-#ifdef CONFIG_X86_32
28485- /* 32 bit has nosave before _edata */
28486 NOSAVE_DATA
28487-#endif
28488
28489 PAGE_ALIGNED_DATA(PAGE_SIZE)
28490
28491@@ -174,12 +232,19 @@ SECTIONS
28492 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
28493
28494 /* Init code and data - will be freed after init */
28495- . = ALIGN(PAGE_SIZE);
28496 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
28497+ BYTE(0)
28498+
28499+#ifdef CONFIG_PAX_KERNEXEC
28500+ . = ALIGN(HPAGE_SIZE);
28501+#else
28502+ . = ALIGN(PAGE_SIZE);
28503+#endif
28504+
28505 __init_begin = .; /* paired with __init_end */
28506- }
28507+ } :init.begin
28508
28509-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
28510+#ifdef CONFIG_SMP
28511 /*
28512 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
28513 * output PHDR, so the next output section - .init.text - should
28514@@ -188,12 +253,27 @@ SECTIONS
28515 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
28516 #endif
28517
28518- INIT_TEXT_SECTION(PAGE_SIZE)
28519-#ifdef CONFIG_X86_64
28520- :init
28521-#endif
28522+ . = ALIGN(PAGE_SIZE);
28523+ init_begin = .;
28524+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
28525+ VMLINUX_SYMBOL(_sinittext) = .;
28526+ INIT_TEXT
28527+ VMLINUX_SYMBOL(_einittext) = .;
28528+ . = ALIGN(PAGE_SIZE);
28529+ } :text.init
28530
28531- INIT_DATA_SECTION(16)
28532+ /*
28533+ * .exit.text is discard at runtime, not link time, to deal with
28534+ * references from .altinstructions and .eh_frame
28535+ */
28536+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28537+ EXIT_TEXT
28538+ . = ALIGN(16);
28539+ } :text.exit
28540+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
28541+
28542+ . = ALIGN(PAGE_SIZE);
28543+ INIT_DATA_SECTION(16) :init
28544
28545 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
28546 __x86_cpu_dev_start = .;
28547@@ -264,19 +344,12 @@ SECTIONS
28548 }
28549
28550 . = ALIGN(8);
28551- /*
28552- * .exit.text is discard at runtime, not link time, to deal with
28553- * references from .altinstructions and .eh_frame
28554- */
28555- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
28556- EXIT_TEXT
28557- }
28558
28559 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
28560 EXIT_DATA
28561 }
28562
28563-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
28564+#ifndef CONFIG_SMP
28565 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
28566 #endif
28567
28568@@ -295,16 +368,10 @@ SECTIONS
28569 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
28570 __smp_locks = .;
28571 *(.smp_locks)
28572- . = ALIGN(PAGE_SIZE);
28573 __smp_locks_end = .;
28574+ . = ALIGN(PAGE_SIZE);
28575 }
28576
28577-#ifdef CONFIG_X86_64
28578- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
28579- NOSAVE_DATA
28580- }
28581-#endif
28582-
28583 /* BSS */
28584 . = ALIGN(PAGE_SIZE);
28585 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
28586@@ -320,6 +387,7 @@ SECTIONS
28587 __brk_base = .;
28588 . += 64 * 1024; /* 64k alignment slop space */
28589 *(.brk_reservation) /* areas brk users have reserved */
28590+ . = ALIGN(HPAGE_SIZE);
28591 __brk_limit = .;
28592 }
28593
28594@@ -346,13 +414,12 @@ SECTIONS
28595 * for the boot processor.
28596 */
28597 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
28598-INIT_PER_CPU(gdt_page);
28599 INIT_PER_CPU(irq_stack_union);
28600
28601 /*
28602 * Build-time check on the image size:
28603 */
28604-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
28605+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
28606 "kernel image bigger than KERNEL_IMAGE_SIZE");
28607
28608 #ifdef CONFIG_SMP
28609diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
28610index e1e1e80..1400089 100644
28611--- a/arch/x86/kernel/vsyscall_64.c
28612+++ b/arch/x86/kernel/vsyscall_64.c
28613@@ -54,15 +54,13 @@
28614
28615 DEFINE_VVAR(int, vgetcpu_mode);
28616
28617-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
28618+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
28619
28620 static int __init vsyscall_setup(char *str)
28621 {
28622 if (str) {
28623 if (!strcmp("emulate", str))
28624 vsyscall_mode = EMULATE;
28625- else if (!strcmp("native", str))
28626- vsyscall_mode = NATIVE;
28627 else if (!strcmp("none", str))
28628 vsyscall_mode = NONE;
28629 else
28630@@ -279,8 +277,7 @@ do_ret:
28631 return true;
28632
28633 sigsegv:
28634- force_sig(SIGSEGV, current);
28635- return true;
28636+ do_group_exit(SIGKILL);
28637 }
28638
28639 /*
28640@@ -331,10 +328,7 @@ void __init map_vsyscall(void)
28641 extern char __vsyscall_page;
28642 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
28643
28644- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
28645- vsyscall_mode == NATIVE
28646- ? PAGE_KERNEL_VSYSCALL
28647- : PAGE_KERNEL_VVAR);
28648+ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
28649 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
28650 (unsigned long)VSYSCALL_ADDR);
28651 }
28652diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
28653index 04068192..4d75aa6 100644
28654--- a/arch/x86/kernel/x8664_ksyms_64.c
28655+++ b/arch/x86/kernel/x8664_ksyms_64.c
28656@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
28657 EXPORT_SYMBOL(copy_user_generic_unrolled);
28658 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
28659 EXPORT_SYMBOL(__copy_user_nocache);
28660-EXPORT_SYMBOL(_copy_from_user);
28661-EXPORT_SYMBOL(_copy_to_user);
28662
28663 EXPORT_SYMBOL(copy_page);
28664 EXPORT_SYMBOL(clear_page);
28665@@ -73,3 +71,7 @@ EXPORT_SYMBOL(___preempt_schedule);
28666 EXPORT_SYMBOL(___preempt_schedule_context);
28667 #endif
28668 #endif
28669+
28670+#ifdef CONFIG_PAX_PER_CPU_PGD
28671+EXPORT_SYMBOL(cpu_pgd);
28672+#endif
28673diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
28674index e48b674..a451dd9 100644
28675--- a/arch/x86/kernel/x86_init.c
28676+++ b/arch/x86/kernel/x86_init.c
28677@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
28678 static void default_nmi_init(void) { };
28679 static int default_i8042_detect(void) { return 1; };
28680
28681-struct x86_platform_ops x86_platform = {
28682+struct x86_platform_ops x86_platform __read_only = {
28683 .calibrate_tsc = native_calibrate_tsc,
28684 .get_wallclock = mach_get_cmos_time,
28685 .set_wallclock = mach_set_rtc_mmss,
28686@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
28687 EXPORT_SYMBOL_GPL(x86_platform);
28688
28689 #if defined(CONFIG_PCI_MSI)
28690-struct x86_msi_ops x86_msi = {
28691+struct x86_msi_ops x86_msi __read_only = {
28692 .setup_msi_irqs = native_setup_msi_irqs,
28693 .compose_msi_msg = native_compose_msi_msg,
28694 .teardown_msi_irq = native_teardown_msi_irq,
28695@@ -150,7 +150,7 @@ u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag)
28696 }
28697 #endif
28698
28699-struct x86_io_apic_ops x86_io_apic_ops = {
28700+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
28701 .init = native_io_apic_init_mappings,
28702 .read = native_io_apic_read,
28703 .write = native_io_apic_write,
28704diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
28705index a4b451c..8dfe1ad 100644
28706--- a/arch/x86/kernel/xsave.c
28707+++ b/arch/x86/kernel/xsave.c
28708@@ -164,18 +164,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28709
28710 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
28711 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
28712- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28713+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28714
28715 if (!use_xsave())
28716 return err;
28717
28718- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
28719+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
28720
28721 /*
28722 * Read the xstate_bv which we copied (directly from the cpu or
28723 * from the state in task struct) to the user buffers.
28724 */
28725- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28726+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28727
28728 /*
28729 * For legacy compatible, we always set FP/SSE bits in the bit
28730@@ -190,7 +190,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28731 */
28732 xstate_bv |= XSTATE_FPSSE;
28733
28734- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28735+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28736
28737 return err;
28738 }
28739@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
28740 {
28741 int err;
28742
28743+ buf = (struct xsave_struct __user *)____m(buf);
28744 if (use_xsave())
28745 err = xsave_user(buf);
28746 else if (use_fxsr())
28747@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
28748 */
28749 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
28750 {
28751+ buf = (void __user *)____m(buf);
28752 if (use_xsave()) {
28753 if ((unsigned long)buf % 64 || fx_only) {
28754 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
28755diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
28756index 38a0afe..94421a9 100644
28757--- a/arch/x86/kvm/cpuid.c
28758+++ b/arch/x86/kvm/cpuid.c
28759@@ -166,15 +166,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
28760 struct kvm_cpuid2 *cpuid,
28761 struct kvm_cpuid_entry2 __user *entries)
28762 {
28763- int r;
28764+ int r, i;
28765
28766 r = -E2BIG;
28767 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
28768 goto out;
28769 r = -EFAULT;
28770- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
28771- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28772+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28773 goto out;
28774+ for (i = 0; i < cpuid->nent; ++i) {
28775+ struct kvm_cpuid_entry2 cpuid_entry;
28776+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
28777+ goto out;
28778+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
28779+ }
28780 vcpu->arch.cpuid_nent = cpuid->nent;
28781 kvm_apic_set_version(vcpu);
28782 kvm_x86_ops->cpuid_update(vcpu);
28783@@ -189,15 +194,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
28784 struct kvm_cpuid2 *cpuid,
28785 struct kvm_cpuid_entry2 __user *entries)
28786 {
28787- int r;
28788+ int r, i;
28789
28790 r = -E2BIG;
28791 if (cpuid->nent < vcpu->arch.cpuid_nent)
28792 goto out;
28793 r = -EFAULT;
28794- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
28795- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28796+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28797 goto out;
28798+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
28799+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
28800+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
28801+ goto out;
28802+ }
28803 return 0;
28804
28805 out:
28806diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
28807index 453e5fb..214168f 100644
28808--- a/arch/x86/kvm/lapic.c
28809+++ b/arch/x86/kvm/lapic.c
28810@@ -55,7 +55,7 @@
28811 #define APIC_BUS_CYCLE_NS 1
28812
28813 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
28814-#define apic_debug(fmt, arg...)
28815+#define apic_debug(fmt, arg...) do {} while (0)
28816
28817 #define APIC_LVT_NUM 6
28818 /* 14 is the version for Xeon and Pentium 8.4.8*/
28819diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
28820index 4107765..d9eb358 100644
28821--- a/arch/x86/kvm/paging_tmpl.h
28822+++ b/arch/x86/kvm/paging_tmpl.h
28823@@ -331,7 +331,7 @@ retry_walk:
28824 if (unlikely(kvm_is_error_hva(host_addr)))
28825 goto error;
28826
28827- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
28828+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
28829 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
28830 goto error;
28831 walker->ptep_user[walker->level - 1] = ptep_user;
28832diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
28833index b5e994a..35b5866 100644
28834--- a/arch/x86/kvm/svm.c
28835+++ b/arch/x86/kvm/svm.c
28836@@ -3541,7 +3541,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
28837 int cpu = raw_smp_processor_id();
28838
28839 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
28840+
28841+ pax_open_kernel();
28842 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
28843+ pax_close_kernel();
28844+
28845 load_TR_desc();
28846 }
28847
28848@@ -3942,6 +3946,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
28849 #endif
28850 #endif
28851
28852+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28853+ __set_fs(current_thread_info()->addr_limit);
28854+#endif
28855+
28856 reload_tss(vcpu);
28857
28858 local_irq_disable();
28859diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
28860index 801332e..eeff1cc 100644
28861--- a/arch/x86/kvm/vmx.c
28862+++ b/arch/x86/kvm/vmx.c
28863@@ -1339,12 +1339,12 @@ static void vmcs_write64(unsigned long field, u64 value)
28864 #endif
28865 }
28866
28867-static void vmcs_clear_bits(unsigned long field, u32 mask)
28868+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
28869 {
28870 vmcs_writel(field, vmcs_readl(field) & ~mask);
28871 }
28872
28873-static void vmcs_set_bits(unsigned long field, u32 mask)
28874+static void vmcs_set_bits(unsigned long field, unsigned long mask)
28875 {
28876 vmcs_writel(field, vmcs_readl(field) | mask);
28877 }
28878@@ -1604,7 +1604,11 @@ static void reload_tss(void)
28879 struct desc_struct *descs;
28880
28881 descs = (void *)gdt->address;
28882+
28883+ pax_open_kernel();
28884 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
28885+ pax_close_kernel();
28886+
28887 load_TR_desc();
28888 }
28889
28890@@ -1832,6 +1836,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
28891 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
28892 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
28893
28894+#ifdef CONFIG_PAX_PER_CPU_PGD
28895+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28896+#endif
28897+
28898 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
28899 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
28900 vmx->loaded_vmcs->cpu = cpu;
28901@@ -2121,7 +2129,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
28902 * reads and returns guest's timestamp counter "register"
28903 * guest_tsc = host_tsc + tsc_offset -- 21.3
28904 */
28905-static u64 guest_read_tsc(void)
28906+static u64 __intentional_overflow(-1) guest_read_tsc(void)
28907 {
28908 u64 host_tsc, tsc_offset;
28909
28910@@ -3093,8 +3101,11 @@ static __init int hardware_setup(void)
28911 if (!cpu_has_vmx_flexpriority())
28912 flexpriority_enabled = 0;
28913
28914- if (!cpu_has_vmx_tpr_shadow())
28915- kvm_x86_ops->update_cr8_intercept = NULL;
28916+ if (!cpu_has_vmx_tpr_shadow()) {
28917+ pax_open_kernel();
28918+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28919+ pax_close_kernel();
28920+ }
28921
28922 if (enable_ept && !cpu_has_vmx_ept_2m_page())
28923 kvm_disable_largepages();
28924@@ -3105,13 +3116,15 @@ static __init int hardware_setup(void)
28925 if (!cpu_has_vmx_apicv())
28926 enable_apicv = 0;
28927
28928+ pax_open_kernel();
28929 if (enable_apicv)
28930- kvm_x86_ops->update_cr8_intercept = NULL;
28931+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28932 else {
28933- kvm_x86_ops->hwapic_irr_update = NULL;
28934- kvm_x86_ops->deliver_posted_interrupt = NULL;
28935- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28936+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
28937+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
28938+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28939 }
28940+ pax_close_kernel();
28941
28942 if (nested)
28943 nested_vmx_setup_ctls_msrs();
28944@@ -4221,7 +4234,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28945
28946 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
28947 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
28948+
28949+#ifndef CONFIG_PAX_PER_CPU_PGD
28950 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28951+#endif
28952
28953 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
28954 #ifdef CONFIG_X86_64
28955@@ -4243,7 +4259,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28956 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
28957 vmx->host_idt_base = dt.address;
28958
28959- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
28960+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
28961
28962 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
28963 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
28964@@ -7413,6 +7429,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28965 "jmp 2f \n\t"
28966 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
28967 "2: "
28968+
28969+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28970+ "ljmp %[cs],$3f\n\t"
28971+ "3: "
28972+#endif
28973+
28974 /* Save guest registers, load host registers, keep flags */
28975 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
28976 "pop %0 \n\t"
28977@@ -7465,6 +7487,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28978 #endif
28979 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
28980 [wordsize]"i"(sizeof(ulong))
28981+
28982+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28983+ ,[cs]"i"(__KERNEL_CS)
28984+#endif
28985+
28986 : "cc", "memory"
28987 #ifdef CONFIG_X86_64
28988 , "rax", "rbx", "rdi", "rsi"
28989@@ -7478,7 +7505,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28990 if (debugctlmsr)
28991 update_debugctlmsr(debugctlmsr);
28992
28993-#ifndef CONFIG_X86_64
28994+#ifdef CONFIG_X86_32
28995 /*
28996 * The sysexit path does not restore ds/es, so we must set them to
28997 * a reasonable value ourselves.
28998@@ -7487,8 +7514,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28999 * may be executed in interrupt context, which saves and restore segments
29000 * around it, nullifying its effect.
29001 */
29002- loadsegment(ds, __USER_DS);
29003- loadsegment(es, __USER_DS);
29004+ loadsegment(ds, __KERNEL_DS);
29005+ loadsegment(es, __KERNEL_DS);
29006+ loadsegment(ss, __KERNEL_DS);
29007+
29008+#ifdef CONFIG_PAX_KERNEXEC
29009+ loadsegment(fs, __KERNEL_PERCPU);
29010+#endif
29011+
29012+#ifdef CONFIG_PAX_MEMORY_UDEREF
29013+ __set_fs(current_thread_info()->addr_limit);
29014+#endif
29015+
29016 #endif
29017
29018 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
29019diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
29020index ef432f8..a630659 100644
29021--- a/arch/x86/kvm/x86.c
29022+++ b/arch/x86/kvm/x86.c
29023@@ -1808,8 +1808,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
29024 {
29025 struct kvm *kvm = vcpu->kvm;
29026 int lm = is_long_mode(vcpu);
29027- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
29028- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
29029+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
29030+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
29031 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
29032 : kvm->arch.xen_hvm_config.blob_size_32;
29033 u32 page_num = data & ~PAGE_MASK;
29034@@ -2729,6 +2729,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
29035 if (n < msr_list.nmsrs)
29036 goto out;
29037 r = -EFAULT;
29038+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
29039+ goto out;
29040 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
29041 num_msrs_to_save * sizeof(u32)))
29042 goto out;
29043@@ -5567,7 +5569,7 @@ static struct notifier_block pvclock_gtod_notifier = {
29044 };
29045 #endif
29046
29047-int kvm_arch_init(void *opaque)
29048+int kvm_arch_init(const void *opaque)
29049 {
29050 int r;
29051 struct kvm_x86_ops *ops = opaque;
29052diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
29053index aae9413..d11e829 100644
29054--- a/arch/x86/lguest/boot.c
29055+++ b/arch/x86/lguest/boot.c
29056@@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
29057 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
29058 * Launcher to reboot us.
29059 */
29060-static void lguest_restart(char *reason)
29061+static __noreturn void lguest_restart(char *reason)
29062 {
29063 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
29064+ BUG();
29065 }
29066
29067 /*G:050
29068diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
29069index 00933d5..3a64af9 100644
29070--- a/arch/x86/lib/atomic64_386_32.S
29071+++ b/arch/x86/lib/atomic64_386_32.S
29072@@ -48,6 +48,10 @@ BEGIN(read)
29073 movl (v), %eax
29074 movl 4(v), %edx
29075 RET_ENDP
29076+BEGIN(read_unchecked)
29077+ movl (v), %eax
29078+ movl 4(v), %edx
29079+RET_ENDP
29080 #undef v
29081
29082 #define v %esi
29083@@ -55,6 +59,10 @@ BEGIN(set)
29084 movl %ebx, (v)
29085 movl %ecx, 4(v)
29086 RET_ENDP
29087+BEGIN(set_unchecked)
29088+ movl %ebx, (v)
29089+ movl %ecx, 4(v)
29090+RET_ENDP
29091 #undef v
29092
29093 #define v %esi
29094@@ -70,6 +78,20 @@ RET_ENDP
29095 BEGIN(add)
29096 addl %eax, (v)
29097 adcl %edx, 4(v)
29098+
29099+#ifdef CONFIG_PAX_REFCOUNT
29100+ jno 0f
29101+ subl %eax, (v)
29102+ sbbl %edx, 4(v)
29103+ int $4
29104+0:
29105+ _ASM_EXTABLE(0b, 0b)
29106+#endif
29107+
29108+RET_ENDP
29109+BEGIN(add_unchecked)
29110+ addl %eax, (v)
29111+ adcl %edx, 4(v)
29112 RET_ENDP
29113 #undef v
29114
29115@@ -77,6 +99,24 @@ RET_ENDP
29116 BEGIN(add_return)
29117 addl (v), %eax
29118 adcl 4(v), %edx
29119+
29120+#ifdef CONFIG_PAX_REFCOUNT
29121+ into
29122+1234:
29123+ _ASM_EXTABLE(1234b, 2f)
29124+#endif
29125+
29126+ movl %eax, (v)
29127+ movl %edx, 4(v)
29128+
29129+#ifdef CONFIG_PAX_REFCOUNT
29130+2:
29131+#endif
29132+
29133+RET_ENDP
29134+BEGIN(add_return_unchecked)
29135+ addl (v), %eax
29136+ adcl 4(v), %edx
29137 movl %eax, (v)
29138 movl %edx, 4(v)
29139 RET_ENDP
29140@@ -86,6 +126,20 @@ RET_ENDP
29141 BEGIN(sub)
29142 subl %eax, (v)
29143 sbbl %edx, 4(v)
29144+
29145+#ifdef CONFIG_PAX_REFCOUNT
29146+ jno 0f
29147+ addl %eax, (v)
29148+ adcl %edx, 4(v)
29149+ int $4
29150+0:
29151+ _ASM_EXTABLE(0b, 0b)
29152+#endif
29153+
29154+RET_ENDP
29155+BEGIN(sub_unchecked)
29156+ subl %eax, (v)
29157+ sbbl %edx, 4(v)
29158 RET_ENDP
29159 #undef v
29160
29161@@ -96,6 +150,27 @@ BEGIN(sub_return)
29162 sbbl $0, %edx
29163 addl (v), %eax
29164 adcl 4(v), %edx
29165+
29166+#ifdef CONFIG_PAX_REFCOUNT
29167+ into
29168+1234:
29169+ _ASM_EXTABLE(1234b, 2f)
29170+#endif
29171+
29172+ movl %eax, (v)
29173+ movl %edx, 4(v)
29174+
29175+#ifdef CONFIG_PAX_REFCOUNT
29176+2:
29177+#endif
29178+
29179+RET_ENDP
29180+BEGIN(sub_return_unchecked)
29181+ negl %edx
29182+ negl %eax
29183+ sbbl $0, %edx
29184+ addl (v), %eax
29185+ adcl 4(v), %edx
29186 movl %eax, (v)
29187 movl %edx, 4(v)
29188 RET_ENDP
29189@@ -105,6 +180,20 @@ RET_ENDP
29190 BEGIN(inc)
29191 addl $1, (v)
29192 adcl $0, 4(v)
29193+
29194+#ifdef CONFIG_PAX_REFCOUNT
29195+ jno 0f
29196+ subl $1, (v)
29197+ sbbl $0, 4(v)
29198+ int $4
29199+0:
29200+ _ASM_EXTABLE(0b, 0b)
29201+#endif
29202+
29203+RET_ENDP
29204+BEGIN(inc_unchecked)
29205+ addl $1, (v)
29206+ adcl $0, 4(v)
29207 RET_ENDP
29208 #undef v
29209
29210@@ -114,6 +203,26 @@ BEGIN(inc_return)
29211 movl 4(v), %edx
29212 addl $1, %eax
29213 adcl $0, %edx
29214+
29215+#ifdef CONFIG_PAX_REFCOUNT
29216+ into
29217+1234:
29218+ _ASM_EXTABLE(1234b, 2f)
29219+#endif
29220+
29221+ movl %eax, (v)
29222+ movl %edx, 4(v)
29223+
29224+#ifdef CONFIG_PAX_REFCOUNT
29225+2:
29226+#endif
29227+
29228+RET_ENDP
29229+BEGIN(inc_return_unchecked)
29230+ movl (v), %eax
29231+ movl 4(v), %edx
29232+ addl $1, %eax
29233+ adcl $0, %edx
29234 movl %eax, (v)
29235 movl %edx, 4(v)
29236 RET_ENDP
29237@@ -123,6 +232,20 @@ RET_ENDP
29238 BEGIN(dec)
29239 subl $1, (v)
29240 sbbl $0, 4(v)
29241+
29242+#ifdef CONFIG_PAX_REFCOUNT
29243+ jno 0f
29244+ addl $1, (v)
29245+ adcl $0, 4(v)
29246+ int $4
29247+0:
29248+ _ASM_EXTABLE(0b, 0b)
29249+#endif
29250+
29251+RET_ENDP
29252+BEGIN(dec_unchecked)
29253+ subl $1, (v)
29254+ sbbl $0, 4(v)
29255 RET_ENDP
29256 #undef v
29257
29258@@ -132,6 +255,26 @@ BEGIN(dec_return)
29259 movl 4(v), %edx
29260 subl $1, %eax
29261 sbbl $0, %edx
29262+
29263+#ifdef CONFIG_PAX_REFCOUNT
29264+ into
29265+1234:
29266+ _ASM_EXTABLE(1234b, 2f)
29267+#endif
29268+
29269+ movl %eax, (v)
29270+ movl %edx, 4(v)
29271+
29272+#ifdef CONFIG_PAX_REFCOUNT
29273+2:
29274+#endif
29275+
29276+RET_ENDP
29277+BEGIN(dec_return_unchecked)
29278+ movl (v), %eax
29279+ movl 4(v), %edx
29280+ subl $1, %eax
29281+ sbbl $0, %edx
29282 movl %eax, (v)
29283 movl %edx, 4(v)
29284 RET_ENDP
29285@@ -143,6 +286,13 @@ BEGIN(add_unless)
29286 adcl %edx, %edi
29287 addl (v), %eax
29288 adcl 4(v), %edx
29289+
29290+#ifdef CONFIG_PAX_REFCOUNT
29291+ into
29292+1234:
29293+ _ASM_EXTABLE(1234b, 2f)
29294+#endif
29295+
29296 cmpl %eax, %ecx
29297 je 3f
29298 1:
29299@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
29300 1:
29301 addl $1, %eax
29302 adcl $0, %edx
29303+
29304+#ifdef CONFIG_PAX_REFCOUNT
29305+ into
29306+1234:
29307+ _ASM_EXTABLE(1234b, 2f)
29308+#endif
29309+
29310 movl %eax, (v)
29311 movl %edx, 4(v)
29312 movl $1, %eax
29313@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
29314 movl 4(v), %edx
29315 subl $1, %eax
29316 sbbl $0, %edx
29317+
29318+#ifdef CONFIG_PAX_REFCOUNT
29319+ into
29320+1234:
29321+ _ASM_EXTABLE(1234b, 1f)
29322+#endif
29323+
29324 js 1f
29325 movl %eax, (v)
29326 movl %edx, 4(v)
29327diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
29328index f5cc9eb..51fa319 100644
29329--- a/arch/x86/lib/atomic64_cx8_32.S
29330+++ b/arch/x86/lib/atomic64_cx8_32.S
29331@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
29332 CFI_STARTPROC
29333
29334 read64 %ecx
29335+ pax_force_retaddr
29336 ret
29337 CFI_ENDPROC
29338 ENDPROC(atomic64_read_cx8)
29339
29340+ENTRY(atomic64_read_unchecked_cx8)
29341+ CFI_STARTPROC
29342+
29343+ read64 %ecx
29344+ pax_force_retaddr
29345+ ret
29346+ CFI_ENDPROC
29347+ENDPROC(atomic64_read_unchecked_cx8)
29348+
29349 ENTRY(atomic64_set_cx8)
29350 CFI_STARTPROC
29351
29352@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
29353 cmpxchg8b (%esi)
29354 jne 1b
29355
29356+ pax_force_retaddr
29357 ret
29358 CFI_ENDPROC
29359 ENDPROC(atomic64_set_cx8)
29360
29361+ENTRY(atomic64_set_unchecked_cx8)
29362+ CFI_STARTPROC
29363+
29364+1:
29365+/* we don't need LOCK_PREFIX since aligned 64-bit writes
29366+ * are atomic on 586 and newer */
29367+ cmpxchg8b (%esi)
29368+ jne 1b
29369+
29370+ pax_force_retaddr
29371+ ret
29372+ CFI_ENDPROC
29373+ENDPROC(atomic64_set_unchecked_cx8)
29374+
29375 ENTRY(atomic64_xchg_cx8)
29376 CFI_STARTPROC
29377
29378@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
29379 cmpxchg8b (%esi)
29380 jne 1b
29381
29382+ pax_force_retaddr
29383 ret
29384 CFI_ENDPROC
29385 ENDPROC(atomic64_xchg_cx8)
29386
29387-.macro addsub_return func ins insc
29388-ENTRY(atomic64_\func\()_return_cx8)
29389+.macro addsub_return func ins insc unchecked=""
29390+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29391 CFI_STARTPROC
29392 SAVE ebp
29393 SAVE ebx
29394@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
29395 movl %edx, %ecx
29396 \ins\()l %esi, %ebx
29397 \insc\()l %edi, %ecx
29398+
29399+.ifb \unchecked
29400+#ifdef CONFIG_PAX_REFCOUNT
29401+ into
29402+2:
29403+ _ASM_EXTABLE(2b, 3f)
29404+#endif
29405+.endif
29406+
29407 LOCK_PREFIX
29408 cmpxchg8b (%ebp)
29409 jne 1b
29410-
29411-10:
29412 movl %ebx, %eax
29413 movl %ecx, %edx
29414+
29415+.ifb \unchecked
29416+#ifdef CONFIG_PAX_REFCOUNT
29417+3:
29418+#endif
29419+.endif
29420+
29421 RESTORE edi
29422 RESTORE esi
29423 RESTORE ebx
29424 RESTORE ebp
29425+ pax_force_retaddr
29426 ret
29427 CFI_ENDPROC
29428-ENDPROC(atomic64_\func\()_return_cx8)
29429+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29430 .endm
29431
29432 addsub_return add add adc
29433 addsub_return sub sub sbb
29434+addsub_return add add adc _unchecked
29435+addsub_return sub sub sbb _unchecked
29436
29437-.macro incdec_return func ins insc
29438-ENTRY(atomic64_\func\()_return_cx8)
29439+.macro incdec_return func ins insc unchecked=""
29440+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29441 CFI_STARTPROC
29442 SAVE ebx
29443
29444@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
29445 movl %edx, %ecx
29446 \ins\()l $1, %ebx
29447 \insc\()l $0, %ecx
29448+
29449+.ifb \unchecked
29450+#ifdef CONFIG_PAX_REFCOUNT
29451+ into
29452+2:
29453+ _ASM_EXTABLE(2b, 3f)
29454+#endif
29455+.endif
29456+
29457 LOCK_PREFIX
29458 cmpxchg8b (%esi)
29459 jne 1b
29460
29461-10:
29462 movl %ebx, %eax
29463 movl %ecx, %edx
29464+
29465+.ifb \unchecked
29466+#ifdef CONFIG_PAX_REFCOUNT
29467+3:
29468+#endif
29469+.endif
29470+
29471 RESTORE ebx
29472+ pax_force_retaddr
29473 ret
29474 CFI_ENDPROC
29475-ENDPROC(atomic64_\func\()_return_cx8)
29476+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29477 .endm
29478
29479 incdec_return inc add adc
29480 incdec_return dec sub sbb
29481+incdec_return inc add adc _unchecked
29482+incdec_return dec sub sbb _unchecked
29483
29484 ENTRY(atomic64_dec_if_positive_cx8)
29485 CFI_STARTPROC
29486@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
29487 movl %edx, %ecx
29488 subl $1, %ebx
29489 sbb $0, %ecx
29490+
29491+#ifdef CONFIG_PAX_REFCOUNT
29492+ into
29493+1234:
29494+ _ASM_EXTABLE(1234b, 2f)
29495+#endif
29496+
29497 js 2f
29498 LOCK_PREFIX
29499 cmpxchg8b (%esi)
29500@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
29501 movl %ebx, %eax
29502 movl %ecx, %edx
29503 RESTORE ebx
29504+ pax_force_retaddr
29505 ret
29506 CFI_ENDPROC
29507 ENDPROC(atomic64_dec_if_positive_cx8)
29508@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
29509 movl %edx, %ecx
29510 addl %ebp, %ebx
29511 adcl %edi, %ecx
29512+
29513+#ifdef CONFIG_PAX_REFCOUNT
29514+ into
29515+1234:
29516+ _ASM_EXTABLE(1234b, 3f)
29517+#endif
29518+
29519 LOCK_PREFIX
29520 cmpxchg8b (%esi)
29521 jne 1b
29522@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
29523 CFI_ADJUST_CFA_OFFSET -8
29524 RESTORE ebx
29525 RESTORE ebp
29526+ pax_force_retaddr
29527 ret
29528 4:
29529 cmpl %edx, 4(%esp)
29530@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
29531 xorl %ecx, %ecx
29532 addl $1, %ebx
29533 adcl %edx, %ecx
29534+
29535+#ifdef CONFIG_PAX_REFCOUNT
29536+ into
29537+1234:
29538+ _ASM_EXTABLE(1234b, 3f)
29539+#endif
29540+
29541 LOCK_PREFIX
29542 cmpxchg8b (%esi)
29543 jne 1b
29544@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
29545 movl $1, %eax
29546 3:
29547 RESTORE ebx
29548+ pax_force_retaddr
29549 ret
29550 CFI_ENDPROC
29551 ENDPROC(atomic64_inc_not_zero_cx8)
29552diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
29553index e78b8eee..7e173a8 100644
29554--- a/arch/x86/lib/checksum_32.S
29555+++ b/arch/x86/lib/checksum_32.S
29556@@ -29,7 +29,8 @@
29557 #include <asm/dwarf2.h>
29558 #include <asm/errno.h>
29559 #include <asm/asm.h>
29560-
29561+#include <asm/segment.h>
29562+
29563 /*
29564 * computes a partial checksum, e.g. for TCP/UDP fragments
29565 */
29566@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
29567
29568 #define ARGBASE 16
29569 #define FP 12
29570-
29571-ENTRY(csum_partial_copy_generic)
29572+
29573+ENTRY(csum_partial_copy_generic_to_user)
29574 CFI_STARTPROC
29575+
29576+#ifdef CONFIG_PAX_MEMORY_UDEREF
29577+ pushl_cfi %gs
29578+ popl_cfi %es
29579+ jmp csum_partial_copy_generic
29580+#endif
29581+
29582+ENTRY(csum_partial_copy_generic_from_user)
29583+
29584+#ifdef CONFIG_PAX_MEMORY_UDEREF
29585+ pushl_cfi %gs
29586+ popl_cfi %ds
29587+#endif
29588+
29589+ENTRY(csum_partial_copy_generic)
29590 subl $4,%esp
29591 CFI_ADJUST_CFA_OFFSET 4
29592 pushl_cfi %edi
29593@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
29594 jmp 4f
29595 SRC(1: movw (%esi), %bx )
29596 addl $2, %esi
29597-DST( movw %bx, (%edi) )
29598+DST( movw %bx, %es:(%edi) )
29599 addl $2, %edi
29600 addw %bx, %ax
29601 adcl $0, %eax
29602@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
29603 SRC(1: movl (%esi), %ebx )
29604 SRC( movl 4(%esi), %edx )
29605 adcl %ebx, %eax
29606-DST( movl %ebx, (%edi) )
29607+DST( movl %ebx, %es:(%edi) )
29608 adcl %edx, %eax
29609-DST( movl %edx, 4(%edi) )
29610+DST( movl %edx, %es:4(%edi) )
29611
29612 SRC( movl 8(%esi), %ebx )
29613 SRC( movl 12(%esi), %edx )
29614 adcl %ebx, %eax
29615-DST( movl %ebx, 8(%edi) )
29616+DST( movl %ebx, %es:8(%edi) )
29617 adcl %edx, %eax
29618-DST( movl %edx, 12(%edi) )
29619+DST( movl %edx, %es:12(%edi) )
29620
29621 SRC( movl 16(%esi), %ebx )
29622 SRC( movl 20(%esi), %edx )
29623 adcl %ebx, %eax
29624-DST( movl %ebx, 16(%edi) )
29625+DST( movl %ebx, %es:16(%edi) )
29626 adcl %edx, %eax
29627-DST( movl %edx, 20(%edi) )
29628+DST( movl %edx, %es:20(%edi) )
29629
29630 SRC( movl 24(%esi), %ebx )
29631 SRC( movl 28(%esi), %edx )
29632 adcl %ebx, %eax
29633-DST( movl %ebx, 24(%edi) )
29634+DST( movl %ebx, %es:24(%edi) )
29635 adcl %edx, %eax
29636-DST( movl %edx, 28(%edi) )
29637+DST( movl %edx, %es:28(%edi) )
29638
29639 lea 32(%esi), %esi
29640 lea 32(%edi), %edi
29641@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
29642 shrl $2, %edx # This clears CF
29643 SRC(3: movl (%esi), %ebx )
29644 adcl %ebx, %eax
29645-DST( movl %ebx, (%edi) )
29646+DST( movl %ebx, %es:(%edi) )
29647 lea 4(%esi), %esi
29648 lea 4(%edi), %edi
29649 dec %edx
29650@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
29651 jb 5f
29652 SRC( movw (%esi), %cx )
29653 leal 2(%esi), %esi
29654-DST( movw %cx, (%edi) )
29655+DST( movw %cx, %es:(%edi) )
29656 leal 2(%edi), %edi
29657 je 6f
29658 shll $16,%ecx
29659 SRC(5: movb (%esi), %cl )
29660-DST( movb %cl, (%edi) )
29661+DST( movb %cl, %es:(%edi) )
29662 6: addl %ecx, %eax
29663 adcl $0, %eax
29664 7:
29665@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
29666
29667 6001:
29668 movl ARGBASE+20(%esp), %ebx # src_err_ptr
29669- movl $-EFAULT, (%ebx)
29670+ movl $-EFAULT, %ss:(%ebx)
29671
29672 # zero the complete destination - computing the rest
29673 # is too much work
29674@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
29675
29676 6002:
29677 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29678- movl $-EFAULT,(%ebx)
29679+ movl $-EFAULT,%ss:(%ebx)
29680 jmp 5000b
29681
29682 .previous
29683
29684+ pushl_cfi %ss
29685+ popl_cfi %ds
29686+ pushl_cfi %ss
29687+ popl_cfi %es
29688 popl_cfi %ebx
29689 CFI_RESTORE ebx
29690 popl_cfi %esi
29691@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
29692 popl_cfi %ecx # equivalent to addl $4,%esp
29693 ret
29694 CFI_ENDPROC
29695-ENDPROC(csum_partial_copy_generic)
29696+ENDPROC(csum_partial_copy_generic_to_user)
29697
29698 #else
29699
29700 /* Version for PentiumII/PPro */
29701
29702 #define ROUND1(x) \
29703+ nop; nop; nop; \
29704 SRC(movl x(%esi), %ebx ) ; \
29705 addl %ebx, %eax ; \
29706- DST(movl %ebx, x(%edi) ) ;
29707+ DST(movl %ebx, %es:x(%edi)) ;
29708
29709 #define ROUND(x) \
29710+ nop; nop; nop; \
29711 SRC(movl x(%esi), %ebx ) ; \
29712 adcl %ebx, %eax ; \
29713- DST(movl %ebx, x(%edi) ) ;
29714+ DST(movl %ebx, %es:x(%edi)) ;
29715
29716 #define ARGBASE 12
29717-
29718-ENTRY(csum_partial_copy_generic)
29719+
29720+ENTRY(csum_partial_copy_generic_to_user)
29721 CFI_STARTPROC
29722+
29723+#ifdef CONFIG_PAX_MEMORY_UDEREF
29724+ pushl_cfi %gs
29725+ popl_cfi %es
29726+ jmp csum_partial_copy_generic
29727+#endif
29728+
29729+ENTRY(csum_partial_copy_generic_from_user)
29730+
29731+#ifdef CONFIG_PAX_MEMORY_UDEREF
29732+ pushl_cfi %gs
29733+ popl_cfi %ds
29734+#endif
29735+
29736+ENTRY(csum_partial_copy_generic)
29737 pushl_cfi %ebx
29738 CFI_REL_OFFSET ebx, 0
29739 pushl_cfi %edi
29740@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
29741 subl %ebx, %edi
29742 lea -1(%esi),%edx
29743 andl $-32,%edx
29744- lea 3f(%ebx,%ebx), %ebx
29745+ lea 3f(%ebx,%ebx,2), %ebx
29746 testl %esi, %esi
29747 jmp *%ebx
29748 1: addl $64,%esi
29749@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
29750 jb 5f
29751 SRC( movw (%esi), %dx )
29752 leal 2(%esi), %esi
29753-DST( movw %dx, (%edi) )
29754+DST( movw %dx, %es:(%edi) )
29755 leal 2(%edi), %edi
29756 je 6f
29757 shll $16,%edx
29758 5:
29759 SRC( movb (%esi), %dl )
29760-DST( movb %dl, (%edi) )
29761+DST( movb %dl, %es:(%edi) )
29762 6: addl %edx, %eax
29763 adcl $0, %eax
29764 7:
29765 .section .fixup, "ax"
29766 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
29767- movl $-EFAULT, (%ebx)
29768+ movl $-EFAULT, %ss:(%ebx)
29769 # zero the complete destination (computing the rest is too much work)
29770 movl ARGBASE+8(%esp),%edi # dst
29771 movl ARGBASE+12(%esp),%ecx # len
29772@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
29773 rep; stosb
29774 jmp 7b
29775 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29776- movl $-EFAULT, (%ebx)
29777+ movl $-EFAULT, %ss:(%ebx)
29778 jmp 7b
29779 .previous
29780
29781+#ifdef CONFIG_PAX_MEMORY_UDEREF
29782+ pushl_cfi %ss
29783+ popl_cfi %ds
29784+ pushl_cfi %ss
29785+ popl_cfi %es
29786+#endif
29787+
29788 popl_cfi %esi
29789 CFI_RESTORE esi
29790 popl_cfi %edi
29791@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
29792 CFI_RESTORE ebx
29793 ret
29794 CFI_ENDPROC
29795-ENDPROC(csum_partial_copy_generic)
29796+ENDPROC(csum_partial_copy_generic_to_user)
29797
29798 #undef ROUND
29799 #undef ROUND1
29800diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
29801index f2145cf..cea889d 100644
29802--- a/arch/x86/lib/clear_page_64.S
29803+++ b/arch/x86/lib/clear_page_64.S
29804@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
29805 movl $4096/8,%ecx
29806 xorl %eax,%eax
29807 rep stosq
29808+ pax_force_retaddr
29809 ret
29810 CFI_ENDPROC
29811 ENDPROC(clear_page_c)
29812@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
29813 movl $4096,%ecx
29814 xorl %eax,%eax
29815 rep stosb
29816+ pax_force_retaddr
29817 ret
29818 CFI_ENDPROC
29819 ENDPROC(clear_page_c_e)
29820@@ -43,6 +45,7 @@ ENTRY(clear_page)
29821 leaq 64(%rdi),%rdi
29822 jnz .Lloop
29823 nop
29824+ pax_force_retaddr
29825 ret
29826 CFI_ENDPROC
29827 .Lclear_page_end:
29828@@ -58,7 +61,7 @@ ENDPROC(clear_page)
29829
29830 #include <asm/cpufeature.h>
29831
29832- .section .altinstr_replacement,"ax"
29833+ .section .altinstr_replacement,"a"
29834 1: .byte 0xeb /* jmp <disp8> */
29835 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
29836 2: .byte 0xeb /* jmp <disp8> */
29837diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
29838index 1e572c5..2a162cd 100644
29839--- a/arch/x86/lib/cmpxchg16b_emu.S
29840+++ b/arch/x86/lib/cmpxchg16b_emu.S
29841@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
29842
29843 popf
29844 mov $1, %al
29845+ pax_force_retaddr
29846 ret
29847
29848 not_same:
29849 popf
29850 xor %al,%al
29851+ pax_force_retaddr
29852 ret
29853
29854 CFI_ENDPROC
29855diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
29856index 176cca6..e0d658e 100644
29857--- a/arch/x86/lib/copy_page_64.S
29858+++ b/arch/x86/lib/copy_page_64.S
29859@@ -9,6 +9,7 @@ copy_page_rep:
29860 CFI_STARTPROC
29861 movl $4096/8, %ecx
29862 rep movsq
29863+ pax_force_retaddr
29864 ret
29865 CFI_ENDPROC
29866 ENDPROC(copy_page_rep)
29867@@ -24,8 +25,8 @@ ENTRY(copy_page)
29868 CFI_ADJUST_CFA_OFFSET 2*8
29869 movq %rbx, (%rsp)
29870 CFI_REL_OFFSET rbx, 0
29871- movq %r12, 1*8(%rsp)
29872- CFI_REL_OFFSET r12, 1*8
29873+ movq %r13, 1*8(%rsp)
29874+ CFI_REL_OFFSET r13, 1*8
29875
29876 movl $(4096/64)-5, %ecx
29877 .p2align 4
29878@@ -38,7 +39,7 @@ ENTRY(copy_page)
29879 movq 0x8*4(%rsi), %r9
29880 movq 0x8*5(%rsi), %r10
29881 movq 0x8*6(%rsi), %r11
29882- movq 0x8*7(%rsi), %r12
29883+ movq 0x8*7(%rsi), %r13
29884
29885 prefetcht0 5*64(%rsi)
29886
29887@@ -49,7 +50,7 @@ ENTRY(copy_page)
29888 movq %r9, 0x8*4(%rdi)
29889 movq %r10, 0x8*5(%rdi)
29890 movq %r11, 0x8*6(%rdi)
29891- movq %r12, 0x8*7(%rdi)
29892+ movq %r13, 0x8*7(%rdi)
29893
29894 leaq 64 (%rsi), %rsi
29895 leaq 64 (%rdi), %rdi
29896@@ -68,7 +69,7 @@ ENTRY(copy_page)
29897 movq 0x8*4(%rsi), %r9
29898 movq 0x8*5(%rsi), %r10
29899 movq 0x8*6(%rsi), %r11
29900- movq 0x8*7(%rsi), %r12
29901+ movq 0x8*7(%rsi), %r13
29902
29903 movq %rax, 0x8*0(%rdi)
29904 movq %rbx, 0x8*1(%rdi)
29905@@ -77,7 +78,7 @@ ENTRY(copy_page)
29906 movq %r9, 0x8*4(%rdi)
29907 movq %r10, 0x8*5(%rdi)
29908 movq %r11, 0x8*6(%rdi)
29909- movq %r12, 0x8*7(%rdi)
29910+ movq %r13, 0x8*7(%rdi)
29911
29912 leaq 64(%rdi), %rdi
29913 leaq 64(%rsi), %rsi
29914@@ -85,10 +86,11 @@ ENTRY(copy_page)
29915
29916 movq (%rsp), %rbx
29917 CFI_RESTORE rbx
29918- movq 1*8(%rsp), %r12
29919- CFI_RESTORE r12
29920+ movq 1*8(%rsp), %r13
29921+ CFI_RESTORE r13
29922 addq $2*8, %rsp
29923 CFI_ADJUST_CFA_OFFSET -2*8
29924+ pax_force_retaddr
29925 ret
29926 .Lcopy_page_end:
29927 CFI_ENDPROC
29928@@ -99,7 +101,7 @@ ENDPROC(copy_page)
29929
29930 #include <asm/cpufeature.h>
29931
29932- .section .altinstr_replacement,"ax"
29933+ .section .altinstr_replacement,"a"
29934 1: .byte 0xeb /* jmp <disp8> */
29935 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
29936 2:
29937diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
29938index dee945d..a84067b 100644
29939--- a/arch/x86/lib/copy_user_64.S
29940+++ b/arch/x86/lib/copy_user_64.S
29941@@ -18,31 +18,7 @@
29942 #include <asm/alternative-asm.h>
29943 #include <asm/asm.h>
29944 #include <asm/smap.h>
29945-
29946-/*
29947- * By placing feature2 after feature1 in altinstructions section, we logically
29948- * implement:
29949- * If CPU has feature2, jmp to alt2 is used
29950- * else if CPU has feature1, jmp to alt1 is used
29951- * else jmp to orig is used.
29952- */
29953- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
29954-0:
29955- .byte 0xe9 /* 32bit jump */
29956- .long \orig-1f /* by default jump to orig */
29957-1:
29958- .section .altinstr_replacement,"ax"
29959-2: .byte 0xe9 /* near jump with 32bit immediate */
29960- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
29961-3: .byte 0xe9 /* near jump with 32bit immediate */
29962- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
29963- .previous
29964-
29965- .section .altinstructions,"a"
29966- altinstruction_entry 0b,2b,\feature1,5,5
29967- altinstruction_entry 0b,3b,\feature2,5,5
29968- .previous
29969- .endm
29970+#include <asm/pgtable.h>
29971
29972 .macro ALIGN_DESTINATION
29973 #ifdef FIX_ALIGNMENT
29974@@ -70,52 +46,6 @@
29975 #endif
29976 .endm
29977
29978-/* Standard copy_to_user with segment limit checking */
29979-ENTRY(_copy_to_user)
29980- CFI_STARTPROC
29981- GET_THREAD_INFO(%rax)
29982- movq %rdi,%rcx
29983- addq %rdx,%rcx
29984- jc bad_to_user
29985- cmpq TI_addr_limit(%rax),%rcx
29986- ja bad_to_user
29987- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29988- copy_user_generic_unrolled,copy_user_generic_string, \
29989- copy_user_enhanced_fast_string
29990- CFI_ENDPROC
29991-ENDPROC(_copy_to_user)
29992-
29993-/* Standard copy_from_user with segment limit checking */
29994-ENTRY(_copy_from_user)
29995- CFI_STARTPROC
29996- GET_THREAD_INFO(%rax)
29997- movq %rsi,%rcx
29998- addq %rdx,%rcx
29999- jc bad_from_user
30000- cmpq TI_addr_limit(%rax),%rcx
30001- ja bad_from_user
30002- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
30003- copy_user_generic_unrolled,copy_user_generic_string, \
30004- copy_user_enhanced_fast_string
30005- CFI_ENDPROC
30006-ENDPROC(_copy_from_user)
30007-
30008- .section .fixup,"ax"
30009- /* must zero dest */
30010-ENTRY(bad_from_user)
30011-bad_from_user:
30012- CFI_STARTPROC
30013- movl %edx,%ecx
30014- xorl %eax,%eax
30015- rep
30016- stosb
30017-bad_to_user:
30018- movl %edx,%eax
30019- ret
30020- CFI_ENDPROC
30021-ENDPROC(bad_from_user)
30022- .previous
30023-
30024 /*
30025 * copy_user_generic_unrolled - memory copy with exception handling.
30026 * This version is for CPUs like P4 that don't have efficient micro
30027@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
30028 */
30029 ENTRY(copy_user_generic_unrolled)
30030 CFI_STARTPROC
30031+ ASM_PAX_OPEN_USERLAND
30032 ASM_STAC
30033 cmpl $8,%edx
30034 jb 20f /* less then 8 bytes, go to byte copy loop */
30035@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
30036 jnz 21b
30037 23: xor %eax,%eax
30038 ASM_CLAC
30039+ ASM_PAX_CLOSE_USERLAND
30040+ pax_force_retaddr
30041 ret
30042
30043 .section .fixup,"ax"
30044@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
30045 */
30046 ENTRY(copy_user_generic_string)
30047 CFI_STARTPROC
30048+ ASM_PAX_OPEN_USERLAND
30049 ASM_STAC
30050 cmpl $8,%edx
30051 jb 2f /* less than 8 bytes, go to byte copy loop */
30052@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string)
30053 movsb
30054 xorl %eax,%eax
30055 ASM_CLAC
30056+ ASM_PAX_CLOSE_USERLAND
30057+ pax_force_retaddr
30058 ret
30059
30060 .section .fixup,"ax"
30061@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string)
30062 */
30063 ENTRY(copy_user_enhanced_fast_string)
30064 CFI_STARTPROC
30065+ ASM_PAX_OPEN_USERLAND
30066 ASM_STAC
30067 movl %edx,%ecx
30068 1: rep
30069 movsb
30070 xorl %eax,%eax
30071 ASM_CLAC
30072+ ASM_PAX_CLOSE_USERLAND
30073+ pax_force_retaddr
30074 ret
30075
30076 .section .fixup,"ax"
30077diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
30078index 6a4f43c..c70fb52 100644
30079--- a/arch/x86/lib/copy_user_nocache_64.S
30080+++ b/arch/x86/lib/copy_user_nocache_64.S
30081@@ -8,6 +8,7 @@
30082
30083 #include <linux/linkage.h>
30084 #include <asm/dwarf2.h>
30085+#include <asm/alternative-asm.h>
30086
30087 #define FIX_ALIGNMENT 1
30088
30089@@ -16,6 +17,7 @@
30090 #include <asm/thread_info.h>
30091 #include <asm/asm.h>
30092 #include <asm/smap.h>
30093+#include <asm/pgtable.h>
30094
30095 .macro ALIGN_DESTINATION
30096 #ifdef FIX_ALIGNMENT
30097@@ -49,6 +51,16 @@
30098 */
30099 ENTRY(__copy_user_nocache)
30100 CFI_STARTPROC
30101+
30102+#ifdef CONFIG_PAX_MEMORY_UDEREF
30103+ mov pax_user_shadow_base,%rcx
30104+ cmp %rcx,%rsi
30105+ jae 1f
30106+ add %rcx,%rsi
30107+1:
30108+#endif
30109+
30110+ ASM_PAX_OPEN_USERLAND
30111 ASM_STAC
30112 cmpl $8,%edx
30113 jb 20f /* less then 8 bytes, go to byte copy loop */
30114@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
30115 jnz 21b
30116 23: xorl %eax,%eax
30117 ASM_CLAC
30118+ ASM_PAX_CLOSE_USERLAND
30119 sfence
30120+ pax_force_retaddr
30121 ret
30122
30123 .section .fixup,"ax"
30124diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
30125index 2419d5f..fe52d0e 100644
30126--- a/arch/x86/lib/csum-copy_64.S
30127+++ b/arch/x86/lib/csum-copy_64.S
30128@@ -9,6 +9,7 @@
30129 #include <asm/dwarf2.h>
30130 #include <asm/errno.h>
30131 #include <asm/asm.h>
30132+#include <asm/alternative-asm.h>
30133
30134 /*
30135 * Checksum copy with exception handling.
30136@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
30137 CFI_ADJUST_CFA_OFFSET 7*8
30138 movq %rbx, 2*8(%rsp)
30139 CFI_REL_OFFSET rbx, 2*8
30140- movq %r12, 3*8(%rsp)
30141- CFI_REL_OFFSET r12, 3*8
30142+ movq %r15, 3*8(%rsp)
30143+ CFI_REL_OFFSET r15, 3*8
30144 movq %r14, 4*8(%rsp)
30145 CFI_REL_OFFSET r14, 4*8
30146 movq %r13, 5*8(%rsp)
30147@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
30148 movl %edx, %ecx
30149
30150 xorl %r9d, %r9d
30151- movq %rcx, %r12
30152+ movq %rcx, %r15
30153
30154- shrq $6, %r12
30155+ shrq $6, %r15
30156 jz .Lhandle_tail /* < 64 */
30157
30158 clc
30159
30160 /* main loop. clear in 64 byte blocks */
30161 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
30162- /* r11: temp3, rdx: temp4, r12 loopcnt */
30163+ /* r11: temp3, rdx: temp4, r15 loopcnt */
30164 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
30165 .p2align 4
30166 .Lloop:
30167@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
30168 adcq %r14, %rax
30169 adcq %r13, %rax
30170
30171- decl %r12d
30172+ decl %r15d
30173
30174 dest
30175 movq %rbx, (%rsi)
30176@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
30177 .Lende:
30178 movq 2*8(%rsp), %rbx
30179 CFI_RESTORE rbx
30180- movq 3*8(%rsp), %r12
30181- CFI_RESTORE r12
30182+ movq 3*8(%rsp), %r15
30183+ CFI_RESTORE r15
30184 movq 4*8(%rsp), %r14
30185 CFI_RESTORE r14
30186 movq 5*8(%rsp), %r13
30187@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
30188 CFI_RESTORE rbp
30189 addq $7*8, %rsp
30190 CFI_ADJUST_CFA_OFFSET -7*8
30191+ pax_force_retaddr
30192 ret
30193 CFI_RESTORE_STATE
30194
30195diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
30196index 7609e0e..b449b98 100644
30197--- a/arch/x86/lib/csum-wrappers_64.c
30198+++ b/arch/x86/lib/csum-wrappers_64.c
30199@@ -53,10 +53,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
30200 len -= 2;
30201 }
30202 }
30203+ pax_open_userland();
30204 stac();
30205- isum = csum_partial_copy_generic((__force const void *)src,
30206+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
30207 dst, len, isum, errp, NULL);
30208 clac();
30209+ pax_close_userland();
30210 if (unlikely(*errp))
30211 goto out_err;
30212
30213@@ -110,10 +112,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
30214 }
30215
30216 *errp = 0;
30217+ pax_open_userland();
30218 stac();
30219- ret = csum_partial_copy_generic(src, (void __force *)dst,
30220+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
30221 len, isum, NULL, errp);
30222 clac();
30223+ pax_close_userland();
30224 return ret;
30225 }
30226 EXPORT_SYMBOL(csum_partial_copy_to_user);
30227diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
30228index a451235..1daa956 100644
30229--- a/arch/x86/lib/getuser.S
30230+++ b/arch/x86/lib/getuser.S
30231@@ -33,17 +33,40 @@
30232 #include <asm/thread_info.h>
30233 #include <asm/asm.h>
30234 #include <asm/smap.h>
30235+#include <asm/segment.h>
30236+#include <asm/pgtable.h>
30237+#include <asm/alternative-asm.h>
30238+
30239+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30240+#define __copyuser_seg gs;
30241+#else
30242+#define __copyuser_seg
30243+#endif
30244
30245 .text
30246 ENTRY(__get_user_1)
30247 CFI_STARTPROC
30248+
30249+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30250 GET_THREAD_INFO(%_ASM_DX)
30251 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30252 jae bad_get_user
30253 ASM_STAC
30254-1: movzbl (%_ASM_AX),%edx
30255+
30256+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30257+ mov pax_user_shadow_base,%_ASM_DX
30258+ cmp %_ASM_DX,%_ASM_AX
30259+ jae 1234f
30260+ add %_ASM_DX,%_ASM_AX
30261+1234:
30262+#endif
30263+
30264+#endif
30265+
30266+1: __copyuser_seg movzbl (%_ASM_AX),%edx
30267 xor %eax,%eax
30268 ASM_CLAC
30269+ pax_force_retaddr
30270 ret
30271 CFI_ENDPROC
30272 ENDPROC(__get_user_1)
30273@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
30274 ENTRY(__get_user_2)
30275 CFI_STARTPROC
30276 add $1,%_ASM_AX
30277+
30278+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30279 jc bad_get_user
30280 GET_THREAD_INFO(%_ASM_DX)
30281 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30282 jae bad_get_user
30283 ASM_STAC
30284-2: movzwl -1(%_ASM_AX),%edx
30285+
30286+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30287+ mov pax_user_shadow_base,%_ASM_DX
30288+ cmp %_ASM_DX,%_ASM_AX
30289+ jae 1234f
30290+ add %_ASM_DX,%_ASM_AX
30291+1234:
30292+#endif
30293+
30294+#endif
30295+
30296+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
30297 xor %eax,%eax
30298 ASM_CLAC
30299+ pax_force_retaddr
30300 ret
30301 CFI_ENDPROC
30302 ENDPROC(__get_user_2)
30303@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
30304 ENTRY(__get_user_4)
30305 CFI_STARTPROC
30306 add $3,%_ASM_AX
30307+
30308+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30309 jc bad_get_user
30310 GET_THREAD_INFO(%_ASM_DX)
30311 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30312 jae bad_get_user
30313 ASM_STAC
30314-3: movl -3(%_ASM_AX),%edx
30315+
30316+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30317+ mov pax_user_shadow_base,%_ASM_DX
30318+ cmp %_ASM_DX,%_ASM_AX
30319+ jae 1234f
30320+ add %_ASM_DX,%_ASM_AX
30321+1234:
30322+#endif
30323+
30324+#endif
30325+
30326+3: __copyuser_seg movl -3(%_ASM_AX),%edx
30327 xor %eax,%eax
30328 ASM_CLAC
30329+ pax_force_retaddr
30330 ret
30331 CFI_ENDPROC
30332 ENDPROC(__get_user_4)
30333@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
30334 GET_THREAD_INFO(%_ASM_DX)
30335 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30336 jae bad_get_user
30337+
30338+#ifdef CONFIG_PAX_MEMORY_UDEREF
30339+ mov pax_user_shadow_base,%_ASM_DX
30340+ cmp %_ASM_DX,%_ASM_AX
30341+ jae 1234f
30342+ add %_ASM_DX,%_ASM_AX
30343+1234:
30344+#endif
30345+
30346 ASM_STAC
30347 4: movq -7(%_ASM_AX),%rdx
30348 xor %eax,%eax
30349 ASM_CLAC
30350+ pax_force_retaddr
30351 ret
30352 #else
30353 add $7,%_ASM_AX
30354@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
30355 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30356 jae bad_get_user_8
30357 ASM_STAC
30358-4: movl -7(%_ASM_AX),%edx
30359-5: movl -3(%_ASM_AX),%ecx
30360+4: __copyuser_seg movl -7(%_ASM_AX),%edx
30361+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
30362 xor %eax,%eax
30363 ASM_CLAC
30364+ pax_force_retaddr
30365 ret
30366 #endif
30367 CFI_ENDPROC
30368@@ -113,6 +175,7 @@ bad_get_user:
30369 xor %edx,%edx
30370 mov $(-EFAULT),%_ASM_AX
30371 ASM_CLAC
30372+ pax_force_retaddr
30373 ret
30374 CFI_ENDPROC
30375 END(bad_get_user)
30376@@ -124,6 +187,7 @@ bad_get_user_8:
30377 xor %ecx,%ecx
30378 mov $(-EFAULT),%_ASM_AX
30379 ASM_CLAC
30380+ pax_force_retaddr
30381 ret
30382 CFI_ENDPROC
30383 END(bad_get_user_8)
30384diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
30385index 54fcffe..7be149e 100644
30386--- a/arch/x86/lib/insn.c
30387+++ b/arch/x86/lib/insn.c
30388@@ -20,8 +20,10 @@
30389
30390 #ifdef __KERNEL__
30391 #include <linux/string.h>
30392+#include <asm/pgtable_types.h>
30393 #else
30394 #include <string.h>
30395+#define ktla_ktva(addr) addr
30396 #endif
30397 #include <asm/inat.h>
30398 #include <asm/insn.h>
30399@@ -53,8 +55,8 @@
30400 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
30401 {
30402 memset(insn, 0, sizeof(*insn));
30403- insn->kaddr = kaddr;
30404- insn->next_byte = kaddr;
30405+ insn->kaddr = ktla_ktva(kaddr);
30406+ insn->next_byte = ktla_ktva(kaddr);
30407 insn->x86_64 = x86_64 ? 1 : 0;
30408 insn->opnd_bytes = 4;
30409 if (x86_64)
30410diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
30411index 05a95e7..326f2fa 100644
30412--- a/arch/x86/lib/iomap_copy_64.S
30413+++ b/arch/x86/lib/iomap_copy_64.S
30414@@ -17,6 +17,7 @@
30415
30416 #include <linux/linkage.h>
30417 #include <asm/dwarf2.h>
30418+#include <asm/alternative-asm.h>
30419
30420 /*
30421 * override generic version in lib/iomap_copy.c
30422@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
30423 CFI_STARTPROC
30424 movl %edx,%ecx
30425 rep movsd
30426+ pax_force_retaddr
30427 ret
30428 CFI_ENDPROC
30429 ENDPROC(__iowrite32_copy)
30430diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
30431index 56313a3..0db417e 100644
30432--- a/arch/x86/lib/memcpy_64.S
30433+++ b/arch/x86/lib/memcpy_64.S
30434@@ -24,7 +24,7 @@
30435 * This gets patched over the unrolled variant (below) via the
30436 * alternative instructions framework:
30437 */
30438- .section .altinstr_replacement, "ax", @progbits
30439+ .section .altinstr_replacement, "a", @progbits
30440 .Lmemcpy_c:
30441 movq %rdi, %rax
30442 movq %rdx, %rcx
30443@@ -33,6 +33,7 @@
30444 rep movsq
30445 movl %edx, %ecx
30446 rep movsb
30447+ pax_force_retaddr
30448 ret
30449 .Lmemcpy_e:
30450 .previous
30451@@ -44,11 +45,12 @@
30452 * This gets patched over the unrolled variant (below) via the
30453 * alternative instructions framework:
30454 */
30455- .section .altinstr_replacement, "ax", @progbits
30456+ .section .altinstr_replacement, "a", @progbits
30457 .Lmemcpy_c_e:
30458 movq %rdi, %rax
30459 movq %rdx, %rcx
30460 rep movsb
30461+ pax_force_retaddr
30462 ret
30463 .Lmemcpy_e_e:
30464 .previous
30465@@ -136,6 +138,7 @@ ENTRY(memcpy)
30466 movq %r9, 1*8(%rdi)
30467 movq %r10, -2*8(%rdi, %rdx)
30468 movq %r11, -1*8(%rdi, %rdx)
30469+ pax_force_retaddr
30470 retq
30471 .p2align 4
30472 .Lless_16bytes:
30473@@ -148,6 +151,7 @@ ENTRY(memcpy)
30474 movq -1*8(%rsi, %rdx), %r9
30475 movq %r8, 0*8(%rdi)
30476 movq %r9, -1*8(%rdi, %rdx)
30477+ pax_force_retaddr
30478 retq
30479 .p2align 4
30480 .Lless_8bytes:
30481@@ -161,6 +165,7 @@ ENTRY(memcpy)
30482 movl -4(%rsi, %rdx), %r8d
30483 movl %ecx, (%rdi)
30484 movl %r8d, -4(%rdi, %rdx)
30485+ pax_force_retaddr
30486 retq
30487 .p2align 4
30488 .Lless_3bytes:
30489@@ -179,6 +184,7 @@ ENTRY(memcpy)
30490 movb %cl, (%rdi)
30491
30492 .Lend:
30493+ pax_force_retaddr
30494 retq
30495 CFI_ENDPROC
30496 ENDPROC(memcpy)
30497diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
30498index 65268a6..dd1de11 100644
30499--- a/arch/x86/lib/memmove_64.S
30500+++ b/arch/x86/lib/memmove_64.S
30501@@ -202,14 +202,16 @@ ENTRY(memmove)
30502 movb (%rsi), %r11b
30503 movb %r11b, (%rdi)
30504 13:
30505+ pax_force_retaddr
30506 retq
30507 CFI_ENDPROC
30508
30509- .section .altinstr_replacement,"ax"
30510+ .section .altinstr_replacement,"a"
30511 .Lmemmove_begin_forward_efs:
30512 /* Forward moving data. */
30513 movq %rdx, %rcx
30514 rep movsb
30515+ pax_force_retaddr
30516 retq
30517 .Lmemmove_end_forward_efs:
30518 .previous
30519diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
30520index 2dcb380..2eb79fe 100644
30521--- a/arch/x86/lib/memset_64.S
30522+++ b/arch/x86/lib/memset_64.S
30523@@ -16,7 +16,7 @@
30524 *
30525 * rax original destination
30526 */
30527- .section .altinstr_replacement, "ax", @progbits
30528+ .section .altinstr_replacement, "a", @progbits
30529 .Lmemset_c:
30530 movq %rdi,%r9
30531 movq %rdx,%rcx
30532@@ -30,6 +30,7 @@
30533 movl %edx,%ecx
30534 rep stosb
30535 movq %r9,%rax
30536+ pax_force_retaddr
30537 ret
30538 .Lmemset_e:
30539 .previous
30540@@ -45,13 +46,14 @@
30541 *
30542 * rax original destination
30543 */
30544- .section .altinstr_replacement, "ax", @progbits
30545+ .section .altinstr_replacement, "a", @progbits
30546 .Lmemset_c_e:
30547 movq %rdi,%r9
30548 movb %sil,%al
30549 movq %rdx,%rcx
30550 rep stosb
30551 movq %r9,%rax
30552+ pax_force_retaddr
30553 ret
30554 .Lmemset_e_e:
30555 .previous
30556@@ -118,6 +120,7 @@ ENTRY(__memset)
30557
30558 .Lende:
30559 movq %r10,%rax
30560+ pax_force_retaddr
30561 ret
30562
30563 CFI_RESTORE_STATE
30564diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
30565index c9f2d9b..e7fd2c0 100644
30566--- a/arch/x86/lib/mmx_32.c
30567+++ b/arch/x86/lib/mmx_32.c
30568@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30569 {
30570 void *p;
30571 int i;
30572+ unsigned long cr0;
30573
30574 if (unlikely(in_interrupt()))
30575 return __memcpy(to, from, len);
30576@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30577 kernel_fpu_begin();
30578
30579 __asm__ __volatile__ (
30580- "1: prefetch (%0)\n" /* This set is 28 bytes */
30581- " prefetch 64(%0)\n"
30582- " prefetch 128(%0)\n"
30583- " prefetch 192(%0)\n"
30584- " prefetch 256(%0)\n"
30585+ "1: prefetch (%1)\n" /* This set is 28 bytes */
30586+ " prefetch 64(%1)\n"
30587+ " prefetch 128(%1)\n"
30588+ " prefetch 192(%1)\n"
30589+ " prefetch 256(%1)\n"
30590 "2: \n"
30591 ".section .fixup, \"ax\"\n"
30592- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30593+ "3: \n"
30594+
30595+#ifdef CONFIG_PAX_KERNEXEC
30596+ " movl %%cr0, %0\n"
30597+ " movl %0, %%eax\n"
30598+ " andl $0xFFFEFFFF, %%eax\n"
30599+ " movl %%eax, %%cr0\n"
30600+#endif
30601+
30602+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30603+
30604+#ifdef CONFIG_PAX_KERNEXEC
30605+ " movl %0, %%cr0\n"
30606+#endif
30607+
30608 " jmp 2b\n"
30609 ".previous\n"
30610 _ASM_EXTABLE(1b, 3b)
30611- : : "r" (from));
30612+ : "=&r" (cr0) : "r" (from) : "ax");
30613
30614 for ( ; i > 5; i--) {
30615 __asm__ __volatile__ (
30616- "1: prefetch 320(%0)\n"
30617- "2: movq (%0), %%mm0\n"
30618- " movq 8(%0), %%mm1\n"
30619- " movq 16(%0), %%mm2\n"
30620- " movq 24(%0), %%mm3\n"
30621- " movq %%mm0, (%1)\n"
30622- " movq %%mm1, 8(%1)\n"
30623- " movq %%mm2, 16(%1)\n"
30624- " movq %%mm3, 24(%1)\n"
30625- " movq 32(%0), %%mm0\n"
30626- " movq 40(%0), %%mm1\n"
30627- " movq 48(%0), %%mm2\n"
30628- " movq 56(%0), %%mm3\n"
30629- " movq %%mm0, 32(%1)\n"
30630- " movq %%mm1, 40(%1)\n"
30631- " movq %%mm2, 48(%1)\n"
30632- " movq %%mm3, 56(%1)\n"
30633+ "1: prefetch 320(%1)\n"
30634+ "2: movq (%1), %%mm0\n"
30635+ " movq 8(%1), %%mm1\n"
30636+ " movq 16(%1), %%mm2\n"
30637+ " movq 24(%1), %%mm3\n"
30638+ " movq %%mm0, (%2)\n"
30639+ " movq %%mm1, 8(%2)\n"
30640+ " movq %%mm2, 16(%2)\n"
30641+ " movq %%mm3, 24(%2)\n"
30642+ " movq 32(%1), %%mm0\n"
30643+ " movq 40(%1), %%mm1\n"
30644+ " movq 48(%1), %%mm2\n"
30645+ " movq 56(%1), %%mm3\n"
30646+ " movq %%mm0, 32(%2)\n"
30647+ " movq %%mm1, 40(%2)\n"
30648+ " movq %%mm2, 48(%2)\n"
30649+ " movq %%mm3, 56(%2)\n"
30650 ".section .fixup, \"ax\"\n"
30651- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30652+ "3:\n"
30653+
30654+#ifdef CONFIG_PAX_KERNEXEC
30655+ " movl %%cr0, %0\n"
30656+ " movl %0, %%eax\n"
30657+ " andl $0xFFFEFFFF, %%eax\n"
30658+ " movl %%eax, %%cr0\n"
30659+#endif
30660+
30661+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30662+
30663+#ifdef CONFIG_PAX_KERNEXEC
30664+ " movl %0, %%cr0\n"
30665+#endif
30666+
30667 " jmp 2b\n"
30668 ".previous\n"
30669 _ASM_EXTABLE(1b, 3b)
30670- : : "r" (from), "r" (to) : "memory");
30671+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30672
30673 from += 64;
30674 to += 64;
30675@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
30676 static void fast_copy_page(void *to, void *from)
30677 {
30678 int i;
30679+ unsigned long cr0;
30680
30681 kernel_fpu_begin();
30682
30683@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
30684 * but that is for later. -AV
30685 */
30686 __asm__ __volatile__(
30687- "1: prefetch (%0)\n"
30688- " prefetch 64(%0)\n"
30689- " prefetch 128(%0)\n"
30690- " prefetch 192(%0)\n"
30691- " prefetch 256(%0)\n"
30692+ "1: prefetch (%1)\n"
30693+ " prefetch 64(%1)\n"
30694+ " prefetch 128(%1)\n"
30695+ " prefetch 192(%1)\n"
30696+ " prefetch 256(%1)\n"
30697 "2: \n"
30698 ".section .fixup, \"ax\"\n"
30699- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30700+ "3: \n"
30701+
30702+#ifdef CONFIG_PAX_KERNEXEC
30703+ " movl %%cr0, %0\n"
30704+ " movl %0, %%eax\n"
30705+ " andl $0xFFFEFFFF, %%eax\n"
30706+ " movl %%eax, %%cr0\n"
30707+#endif
30708+
30709+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30710+
30711+#ifdef CONFIG_PAX_KERNEXEC
30712+ " movl %0, %%cr0\n"
30713+#endif
30714+
30715 " jmp 2b\n"
30716 ".previous\n"
30717- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30718+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30719
30720 for (i = 0; i < (4096-320)/64; i++) {
30721 __asm__ __volatile__ (
30722- "1: prefetch 320(%0)\n"
30723- "2: movq (%0), %%mm0\n"
30724- " movntq %%mm0, (%1)\n"
30725- " movq 8(%0), %%mm1\n"
30726- " movntq %%mm1, 8(%1)\n"
30727- " movq 16(%0), %%mm2\n"
30728- " movntq %%mm2, 16(%1)\n"
30729- " movq 24(%0), %%mm3\n"
30730- " movntq %%mm3, 24(%1)\n"
30731- " movq 32(%0), %%mm4\n"
30732- " movntq %%mm4, 32(%1)\n"
30733- " movq 40(%0), %%mm5\n"
30734- " movntq %%mm5, 40(%1)\n"
30735- " movq 48(%0), %%mm6\n"
30736- " movntq %%mm6, 48(%1)\n"
30737- " movq 56(%0), %%mm7\n"
30738- " movntq %%mm7, 56(%1)\n"
30739+ "1: prefetch 320(%1)\n"
30740+ "2: movq (%1), %%mm0\n"
30741+ " movntq %%mm0, (%2)\n"
30742+ " movq 8(%1), %%mm1\n"
30743+ " movntq %%mm1, 8(%2)\n"
30744+ " movq 16(%1), %%mm2\n"
30745+ " movntq %%mm2, 16(%2)\n"
30746+ " movq 24(%1), %%mm3\n"
30747+ " movntq %%mm3, 24(%2)\n"
30748+ " movq 32(%1), %%mm4\n"
30749+ " movntq %%mm4, 32(%2)\n"
30750+ " movq 40(%1), %%mm5\n"
30751+ " movntq %%mm5, 40(%2)\n"
30752+ " movq 48(%1), %%mm6\n"
30753+ " movntq %%mm6, 48(%2)\n"
30754+ " movq 56(%1), %%mm7\n"
30755+ " movntq %%mm7, 56(%2)\n"
30756 ".section .fixup, \"ax\"\n"
30757- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30758+ "3:\n"
30759+
30760+#ifdef CONFIG_PAX_KERNEXEC
30761+ " movl %%cr0, %0\n"
30762+ " movl %0, %%eax\n"
30763+ " andl $0xFFFEFFFF, %%eax\n"
30764+ " movl %%eax, %%cr0\n"
30765+#endif
30766+
30767+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30768+
30769+#ifdef CONFIG_PAX_KERNEXEC
30770+ " movl %0, %%cr0\n"
30771+#endif
30772+
30773 " jmp 2b\n"
30774 ".previous\n"
30775- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
30776+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30777
30778 from += 64;
30779 to += 64;
30780@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
30781 static void fast_copy_page(void *to, void *from)
30782 {
30783 int i;
30784+ unsigned long cr0;
30785
30786 kernel_fpu_begin();
30787
30788 __asm__ __volatile__ (
30789- "1: prefetch (%0)\n"
30790- " prefetch 64(%0)\n"
30791- " prefetch 128(%0)\n"
30792- " prefetch 192(%0)\n"
30793- " prefetch 256(%0)\n"
30794+ "1: prefetch (%1)\n"
30795+ " prefetch 64(%1)\n"
30796+ " prefetch 128(%1)\n"
30797+ " prefetch 192(%1)\n"
30798+ " prefetch 256(%1)\n"
30799 "2: \n"
30800 ".section .fixup, \"ax\"\n"
30801- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30802+ "3: \n"
30803+
30804+#ifdef CONFIG_PAX_KERNEXEC
30805+ " movl %%cr0, %0\n"
30806+ " movl %0, %%eax\n"
30807+ " andl $0xFFFEFFFF, %%eax\n"
30808+ " movl %%eax, %%cr0\n"
30809+#endif
30810+
30811+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30812+
30813+#ifdef CONFIG_PAX_KERNEXEC
30814+ " movl %0, %%cr0\n"
30815+#endif
30816+
30817 " jmp 2b\n"
30818 ".previous\n"
30819- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30820+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30821
30822 for (i = 0; i < 4096/64; i++) {
30823 __asm__ __volatile__ (
30824- "1: prefetch 320(%0)\n"
30825- "2: movq (%0), %%mm0\n"
30826- " movq 8(%0), %%mm1\n"
30827- " movq 16(%0), %%mm2\n"
30828- " movq 24(%0), %%mm3\n"
30829- " movq %%mm0, (%1)\n"
30830- " movq %%mm1, 8(%1)\n"
30831- " movq %%mm2, 16(%1)\n"
30832- " movq %%mm3, 24(%1)\n"
30833- " movq 32(%0), %%mm0\n"
30834- " movq 40(%0), %%mm1\n"
30835- " movq 48(%0), %%mm2\n"
30836- " movq 56(%0), %%mm3\n"
30837- " movq %%mm0, 32(%1)\n"
30838- " movq %%mm1, 40(%1)\n"
30839- " movq %%mm2, 48(%1)\n"
30840- " movq %%mm3, 56(%1)\n"
30841+ "1: prefetch 320(%1)\n"
30842+ "2: movq (%1), %%mm0\n"
30843+ " movq 8(%1), %%mm1\n"
30844+ " movq 16(%1), %%mm2\n"
30845+ " movq 24(%1), %%mm3\n"
30846+ " movq %%mm0, (%2)\n"
30847+ " movq %%mm1, 8(%2)\n"
30848+ " movq %%mm2, 16(%2)\n"
30849+ " movq %%mm3, 24(%2)\n"
30850+ " movq 32(%1), %%mm0\n"
30851+ " movq 40(%1), %%mm1\n"
30852+ " movq 48(%1), %%mm2\n"
30853+ " movq 56(%1), %%mm3\n"
30854+ " movq %%mm0, 32(%2)\n"
30855+ " movq %%mm1, 40(%2)\n"
30856+ " movq %%mm2, 48(%2)\n"
30857+ " movq %%mm3, 56(%2)\n"
30858 ".section .fixup, \"ax\"\n"
30859- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30860+ "3:\n"
30861+
30862+#ifdef CONFIG_PAX_KERNEXEC
30863+ " movl %%cr0, %0\n"
30864+ " movl %0, %%eax\n"
30865+ " andl $0xFFFEFFFF, %%eax\n"
30866+ " movl %%eax, %%cr0\n"
30867+#endif
30868+
30869+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30870+
30871+#ifdef CONFIG_PAX_KERNEXEC
30872+ " movl %0, %%cr0\n"
30873+#endif
30874+
30875 " jmp 2b\n"
30876 ".previous\n"
30877 _ASM_EXTABLE(1b, 3b)
30878- : : "r" (from), "r" (to) : "memory");
30879+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30880
30881 from += 64;
30882 to += 64;
30883diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
30884index f6d13ee..d789440 100644
30885--- a/arch/x86/lib/msr-reg.S
30886+++ b/arch/x86/lib/msr-reg.S
30887@@ -3,6 +3,7 @@
30888 #include <asm/dwarf2.h>
30889 #include <asm/asm.h>
30890 #include <asm/msr.h>
30891+#include <asm/alternative-asm.h>
30892
30893 #ifdef CONFIG_X86_64
30894 /*
30895@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
30896 movl %edi, 28(%r10)
30897 popq_cfi %rbp
30898 popq_cfi %rbx
30899+ pax_force_retaddr
30900 ret
30901 3:
30902 CFI_RESTORE_STATE
30903diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
30904index fc6ba17..d4d989d 100644
30905--- a/arch/x86/lib/putuser.S
30906+++ b/arch/x86/lib/putuser.S
30907@@ -16,7 +16,9 @@
30908 #include <asm/errno.h>
30909 #include <asm/asm.h>
30910 #include <asm/smap.h>
30911-
30912+#include <asm/segment.h>
30913+#include <asm/pgtable.h>
30914+#include <asm/alternative-asm.h>
30915
30916 /*
30917 * __put_user_X
30918@@ -30,57 +32,125 @@
30919 * as they get called from within inline assembly.
30920 */
30921
30922-#define ENTER CFI_STARTPROC ; \
30923- GET_THREAD_INFO(%_ASM_BX)
30924-#define EXIT ASM_CLAC ; \
30925- ret ; \
30926+#define ENTER CFI_STARTPROC
30927+#define EXIT ASM_CLAC ; \
30928+ pax_force_retaddr ; \
30929+ ret ; \
30930 CFI_ENDPROC
30931
30932+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30933+#define _DEST %_ASM_CX,%_ASM_BX
30934+#else
30935+#define _DEST %_ASM_CX
30936+#endif
30937+
30938+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30939+#define __copyuser_seg gs;
30940+#else
30941+#define __copyuser_seg
30942+#endif
30943+
30944 .text
30945 ENTRY(__put_user_1)
30946 ENTER
30947+
30948+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30949+ GET_THREAD_INFO(%_ASM_BX)
30950 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
30951 jae bad_put_user
30952 ASM_STAC
30953-1: movb %al,(%_ASM_CX)
30954+
30955+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30956+ mov pax_user_shadow_base,%_ASM_BX
30957+ cmp %_ASM_BX,%_ASM_CX
30958+ jb 1234f
30959+ xor %ebx,%ebx
30960+1234:
30961+#endif
30962+
30963+#endif
30964+
30965+1: __copyuser_seg movb %al,(_DEST)
30966 xor %eax,%eax
30967 EXIT
30968 ENDPROC(__put_user_1)
30969
30970 ENTRY(__put_user_2)
30971 ENTER
30972+
30973+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30974+ GET_THREAD_INFO(%_ASM_BX)
30975 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30976 sub $1,%_ASM_BX
30977 cmp %_ASM_BX,%_ASM_CX
30978 jae bad_put_user
30979 ASM_STAC
30980-2: movw %ax,(%_ASM_CX)
30981+
30982+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30983+ mov pax_user_shadow_base,%_ASM_BX
30984+ cmp %_ASM_BX,%_ASM_CX
30985+ jb 1234f
30986+ xor %ebx,%ebx
30987+1234:
30988+#endif
30989+
30990+#endif
30991+
30992+2: __copyuser_seg movw %ax,(_DEST)
30993 xor %eax,%eax
30994 EXIT
30995 ENDPROC(__put_user_2)
30996
30997 ENTRY(__put_user_4)
30998 ENTER
30999+
31000+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31001+ GET_THREAD_INFO(%_ASM_BX)
31002 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
31003 sub $3,%_ASM_BX
31004 cmp %_ASM_BX,%_ASM_CX
31005 jae bad_put_user
31006 ASM_STAC
31007-3: movl %eax,(%_ASM_CX)
31008+
31009+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31010+ mov pax_user_shadow_base,%_ASM_BX
31011+ cmp %_ASM_BX,%_ASM_CX
31012+ jb 1234f
31013+ xor %ebx,%ebx
31014+1234:
31015+#endif
31016+
31017+#endif
31018+
31019+3: __copyuser_seg movl %eax,(_DEST)
31020 xor %eax,%eax
31021 EXIT
31022 ENDPROC(__put_user_4)
31023
31024 ENTRY(__put_user_8)
31025 ENTER
31026+
31027+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31028+ GET_THREAD_INFO(%_ASM_BX)
31029 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
31030 sub $7,%_ASM_BX
31031 cmp %_ASM_BX,%_ASM_CX
31032 jae bad_put_user
31033 ASM_STAC
31034-4: mov %_ASM_AX,(%_ASM_CX)
31035+
31036+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31037+ mov pax_user_shadow_base,%_ASM_BX
31038+ cmp %_ASM_BX,%_ASM_CX
31039+ jb 1234f
31040+ xor %ebx,%ebx
31041+1234:
31042+#endif
31043+
31044+#endif
31045+
31046+4: __copyuser_seg mov %_ASM_AX,(_DEST)
31047 #ifdef CONFIG_X86_32
31048-5: movl %edx,4(%_ASM_CX)
31049+5: __copyuser_seg movl %edx,4(_DEST)
31050 #endif
31051 xor %eax,%eax
31052 EXIT
31053diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
31054index 1cad221..de671ee 100644
31055--- a/arch/x86/lib/rwlock.S
31056+++ b/arch/x86/lib/rwlock.S
31057@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
31058 FRAME
31059 0: LOCK_PREFIX
31060 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
31061+
31062+#ifdef CONFIG_PAX_REFCOUNT
31063+ jno 1234f
31064+ LOCK_PREFIX
31065+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
31066+ int $4
31067+1234:
31068+ _ASM_EXTABLE(1234b, 1234b)
31069+#endif
31070+
31071 1: rep; nop
31072 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
31073 jne 1b
31074 LOCK_PREFIX
31075 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
31076+
31077+#ifdef CONFIG_PAX_REFCOUNT
31078+ jno 1234f
31079+ LOCK_PREFIX
31080+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
31081+ int $4
31082+1234:
31083+ _ASM_EXTABLE(1234b, 1234b)
31084+#endif
31085+
31086 jnz 0b
31087 ENDFRAME
31088+ pax_force_retaddr
31089 ret
31090 CFI_ENDPROC
31091 END(__write_lock_failed)
31092@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
31093 FRAME
31094 0: LOCK_PREFIX
31095 READ_LOCK_SIZE(inc) (%__lock_ptr)
31096+
31097+#ifdef CONFIG_PAX_REFCOUNT
31098+ jno 1234f
31099+ LOCK_PREFIX
31100+ READ_LOCK_SIZE(dec) (%__lock_ptr)
31101+ int $4
31102+1234:
31103+ _ASM_EXTABLE(1234b, 1234b)
31104+#endif
31105+
31106 1: rep; nop
31107 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
31108 js 1b
31109 LOCK_PREFIX
31110 READ_LOCK_SIZE(dec) (%__lock_ptr)
31111+
31112+#ifdef CONFIG_PAX_REFCOUNT
31113+ jno 1234f
31114+ LOCK_PREFIX
31115+ READ_LOCK_SIZE(inc) (%__lock_ptr)
31116+ int $4
31117+1234:
31118+ _ASM_EXTABLE(1234b, 1234b)
31119+#endif
31120+
31121 js 0b
31122 ENDFRAME
31123+ pax_force_retaddr
31124 ret
31125 CFI_ENDPROC
31126 END(__read_lock_failed)
31127diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
31128index 5dff5f0..cadebf4 100644
31129--- a/arch/x86/lib/rwsem.S
31130+++ b/arch/x86/lib/rwsem.S
31131@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
31132 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31133 CFI_RESTORE __ASM_REG(dx)
31134 restore_common_regs
31135+ pax_force_retaddr
31136 ret
31137 CFI_ENDPROC
31138 ENDPROC(call_rwsem_down_read_failed)
31139@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
31140 movq %rax,%rdi
31141 call rwsem_down_write_failed
31142 restore_common_regs
31143+ pax_force_retaddr
31144 ret
31145 CFI_ENDPROC
31146 ENDPROC(call_rwsem_down_write_failed)
31147@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
31148 movq %rax,%rdi
31149 call rwsem_wake
31150 restore_common_regs
31151-1: ret
31152+1: pax_force_retaddr
31153+ ret
31154 CFI_ENDPROC
31155 ENDPROC(call_rwsem_wake)
31156
31157@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
31158 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31159 CFI_RESTORE __ASM_REG(dx)
31160 restore_common_regs
31161+ pax_force_retaddr
31162 ret
31163 CFI_ENDPROC
31164 ENDPROC(call_rwsem_downgrade_wake)
31165diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
31166index 92d9fea..b2762c8 100644
31167--- a/arch/x86/lib/thunk_64.S
31168+++ b/arch/x86/lib/thunk_64.S
31169@@ -9,6 +9,7 @@
31170 #include <asm/dwarf2.h>
31171 #include <asm/calling.h>
31172 #include <asm/asm.h>
31173+#include <asm/alternative-asm.h>
31174
31175 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
31176 .macro THUNK name, func, put_ret_addr_in_rdi=0
31177@@ -16,11 +17,11 @@
31178 \name:
31179 CFI_STARTPROC
31180
31181- /* this one pushes 9 elems, the next one would be %rIP */
31182- SAVE_ARGS
31183+ /* this one pushes 15+1 elems, the next one would be %rIP */
31184+ SAVE_ARGS 8
31185
31186 .if \put_ret_addr_in_rdi
31187- movq_cfi_restore 9*8, rdi
31188+ movq_cfi_restore RIP, rdi
31189 .endif
31190
31191 call \func
31192@@ -40,9 +41,10 @@
31193
31194 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
31195 CFI_STARTPROC
31196- SAVE_ARGS
31197+ SAVE_ARGS 8
31198 restore:
31199- RESTORE_ARGS
31200+ RESTORE_ARGS 1,8
31201+ pax_force_retaddr
31202 ret
31203 CFI_ENDPROC
31204 _ASM_NOKPROBE(restore)
31205diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
31206index e2f5e21..4b22130 100644
31207--- a/arch/x86/lib/usercopy_32.c
31208+++ b/arch/x86/lib/usercopy_32.c
31209@@ -42,11 +42,13 @@ do { \
31210 int __d0; \
31211 might_fault(); \
31212 __asm__ __volatile__( \
31213+ __COPYUSER_SET_ES \
31214 ASM_STAC "\n" \
31215 "0: rep; stosl\n" \
31216 " movl %2,%0\n" \
31217 "1: rep; stosb\n" \
31218 "2: " ASM_CLAC "\n" \
31219+ __COPYUSER_RESTORE_ES \
31220 ".section .fixup,\"ax\"\n" \
31221 "3: lea 0(%2,%0,4),%0\n" \
31222 " jmp 2b\n" \
31223@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
31224
31225 #ifdef CONFIG_X86_INTEL_USERCOPY
31226 static unsigned long
31227-__copy_user_intel(void __user *to, const void *from, unsigned long size)
31228+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
31229 {
31230 int d0, d1;
31231 __asm__ __volatile__(
31232@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31233 " .align 2,0x90\n"
31234 "3: movl 0(%4), %%eax\n"
31235 "4: movl 4(%4), %%edx\n"
31236- "5: movl %%eax, 0(%3)\n"
31237- "6: movl %%edx, 4(%3)\n"
31238+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
31239+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
31240 "7: movl 8(%4), %%eax\n"
31241 "8: movl 12(%4),%%edx\n"
31242- "9: movl %%eax, 8(%3)\n"
31243- "10: movl %%edx, 12(%3)\n"
31244+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
31245+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
31246 "11: movl 16(%4), %%eax\n"
31247 "12: movl 20(%4), %%edx\n"
31248- "13: movl %%eax, 16(%3)\n"
31249- "14: movl %%edx, 20(%3)\n"
31250+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
31251+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
31252 "15: movl 24(%4), %%eax\n"
31253 "16: movl 28(%4), %%edx\n"
31254- "17: movl %%eax, 24(%3)\n"
31255- "18: movl %%edx, 28(%3)\n"
31256+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
31257+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
31258 "19: movl 32(%4), %%eax\n"
31259 "20: movl 36(%4), %%edx\n"
31260- "21: movl %%eax, 32(%3)\n"
31261- "22: movl %%edx, 36(%3)\n"
31262+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
31263+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
31264 "23: movl 40(%4), %%eax\n"
31265 "24: movl 44(%4), %%edx\n"
31266- "25: movl %%eax, 40(%3)\n"
31267- "26: movl %%edx, 44(%3)\n"
31268+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
31269+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
31270 "27: movl 48(%4), %%eax\n"
31271 "28: movl 52(%4), %%edx\n"
31272- "29: movl %%eax, 48(%3)\n"
31273- "30: movl %%edx, 52(%3)\n"
31274+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
31275+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
31276 "31: movl 56(%4), %%eax\n"
31277 "32: movl 60(%4), %%edx\n"
31278- "33: movl %%eax, 56(%3)\n"
31279- "34: movl %%edx, 60(%3)\n"
31280+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
31281+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
31282 " addl $-64, %0\n"
31283 " addl $64, %4\n"
31284 " addl $64, %3\n"
31285@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31286 " shrl $2, %0\n"
31287 " andl $3, %%eax\n"
31288 " cld\n"
31289+ __COPYUSER_SET_ES
31290 "99: rep; movsl\n"
31291 "36: movl %%eax, %0\n"
31292 "37: rep; movsb\n"
31293 "100:\n"
31294+ __COPYUSER_RESTORE_ES
31295+ ".section .fixup,\"ax\"\n"
31296+ "101: lea 0(%%eax,%0,4),%0\n"
31297+ " jmp 100b\n"
31298+ ".previous\n"
31299+ _ASM_EXTABLE(1b,100b)
31300+ _ASM_EXTABLE(2b,100b)
31301+ _ASM_EXTABLE(3b,100b)
31302+ _ASM_EXTABLE(4b,100b)
31303+ _ASM_EXTABLE(5b,100b)
31304+ _ASM_EXTABLE(6b,100b)
31305+ _ASM_EXTABLE(7b,100b)
31306+ _ASM_EXTABLE(8b,100b)
31307+ _ASM_EXTABLE(9b,100b)
31308+ _ASM_EXTABLE(10b,100b)
31309+ _ASM_EXTABLE(11b,100b)
31310+ _ASM_EXTABLE(12b,100b)
31311+ _ASM_EXTABLE(13b,100b)
31312+ _ASM_EXTABLE(14b,100b)
31313+ _ASM_EXTABLE(15b,100b)
31314+ _ASM_EXTABLE(16b,100b)
31315+ _ASM_EXTABLE(17b,100b)
31316+ _ASM_EXTABLE(18b,100b)
31317+ _ASM_EXTABLE(19b,100b)
31318+ _ASM_EXTABLE(20b,100b)
31319+ _ASM_EXTABLE(21b,100b)
31320+ _ASM_EXTABLE(22b,100b)
31321+ _ASM_EXTABLE(23b,100b)
31322+ _ASM_EXTABLE(24b,100b)
31323+ _ASM_EXTABLE(25b,100b)
31324+ _ASM_EXTABLE(26b,100b)
31325+ _ASM_EXTABLE(27b,100b)
31326+ _ASM_EXTABLE(28b,100b)
31327+ _ASM_EXTABLE(29b,100b)
31328+ _ASM_EXTABLE(30b,100b)
31329+ _ASM_EXTABLE(31b,100b)
31330+ _ASM_EXTABLE(32b,100b)
31331+ _ASM_EXTABLE(33b,100b)
31332+ _ASM_EXTABLE(34b,100b)
31333+ _ASM_EXTABLE(35b,100b)
31334+ _ASM_EXTABLE(36b,100b)
31335+ _ASM_EXTABLE(37b,100b)
31336+ _ASM_EXTABLE(99b,101b)
31337+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
31338+ : "1"(to), "2"(from), "0"(size)
31339+ : "eax", "edx", "memory");
31340+ return size;
31341+}
31342+
31343+static unsigned long
31344+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
31345+{
31346+ int d0, d1;
31347+ __asm__ __volatile__(
31348+ " .align 2,0x90\n"
31349+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
31350+ " cmpl $67, %0\n"
31351+ " jbe 3f\n"
31352+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
31353+ " .align 2,0x90\n"
31354+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
31355+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
31356+ "5: movl %%eax, 0(%3)\n"
31357+ "6: movl %%edx, 4(%3)\n"
31358+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
31359+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
31360+ "9: movl %%eax, 8(%3)\n"
31361+ "10: movl %%edx, 12(%3)\n"
31362+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
31363+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
31364+ "13: movl %%eax, 16(%3)\n"
31365+ "14: movl %%edx, 20(%3)\n"
31366+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
31367+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
31368+ "17: movl %%eax, 24(%3)\n"
31369+ "18: movl %%edx, 28(%3)\n"
31370+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
31371+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
31372+ "21: movl %%eax, 32(%3)\n"
31373+ "22: movl %%edx, 36(%3)\n"
31374+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
31375+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
31376+ "25: movl %%eax, 40(%3)\n"
31377+ "26: movl %%edx, 44(%3)\n"
31378+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
31379+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
31380+ "29: movl %%eax, 48(%3)\n"
31381+ "30: movl %%edx, 52(%3)\n"
31382+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
31383+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
31384+ "33: movl %%eax, 56(%3)\n"
31385+ "34: movl %%edx, 60(%3)\n"
31386+ " addl $-64, %0\n"
31387+ " addl $64, %4\n"
31388+ " addl $64, %3\n"
31389+ " cmpl $63, %0\n"
31390+ " ja 1b\n"
31391+ "35: movl %0, %%eax\n"
31392+ " shrl $2, %0\n"
31393+ " andl $3, %%eax\n"
31394+ " cld\n"
31395+ "99: rep; "__copyuser_seg" movsl\n"
31396+ "36: movl %%eax, %0\n"
31397+ "37: rep; "__copyuser_seg" movsb\n"
31398+ "100:\n"
31399 ".section .fixup,\"ax\"\n"
31400 "101: lea 0(%%eax,%0,4),%0\n"
31401 " jmp 100b\n"
31402@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31403 int d0, d1;
31404 __asm__ __volatile__(
31405 " .align 2,0x90\n"
31406- "0: movl 32(%4), %%eax\n"
31407+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31408 " cmpl $67, %0\n"
31409 " jbe 2f\n"
31410- "1: movl 64(%4), %%eax\n"
31411+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31412 " .align 2,0x90\n"
31413- "2: movl 0(%4), %%eax\n"
31414- "21: movl 4(%4), %%edx\n"
31415+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31416+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31417 " movl %%eax, 0(%3)\n"
31418 " movl %%edx, 4(%3)\n"
31419- "3: movl 8(%4), %%eax\n"
31420- "31: movl 12(%4),%%edx\n"
31421+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31422+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31423 " movl %%eax, 8(%3)\n"
31424 " movl %%edx, 12(%3)\n"
31425- "4: movl 16(%4), %%eax\n"
31426- "41: movl 20(%4), %%edx\n"
31427+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31428+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31429 " movl %%eax, 16(%3)\n"
31430 " movl %%edx, 20(%3)\n"
31431- "10: movl 24(%4), %%eax\n"
31432- "51: movl 28(%4), %%edx\n"
31433+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31434+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31435 " movl %%eax, 24(%3)\n"
31436 " movl %%edx, 28(%3)\n"
31437- "11: movl 32(%4), %%eax\n"
31438- "61: movl 36(%4), %%edx\n"
31439+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31440+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31441 " movl %%eax, 32(%3)\n"
31442 " movl %%edx, 36(%3)\n"
31443- "12: movl 40(%4), %%eax\n"
31444- "71: movl 44(%4), %%edx\n"
31445+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31446+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31447 " movl %%eax, 40(%3)\n"
31448 " movl %%edx, 44(%3)\n"
31449- "13: movl 48(%4), %%eax\n"
31450- "81: movl 52(%4), %%edx\n"
31451+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31452+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31453 " movl %%eax, 48(%3)\n"
31454 " movl %%edx, 52(%3)\n"
31455- "14: movl 56(%4), %%eax\n"
31456- "91: movl 60(%4), %%edx\n"
31457+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31458+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31459 " movl %%eax, 56(%3)\n"
31460 " movl %%edx, 60(%3)\n"
31461 " addl $-64, %0\n"
31462@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31463 " shrl $2, %0\n"
31464 " andl $3, %%eax\n"
31465 " cld\n"
31466- "6: rep; movsl\n"
31467+ "6: rep; "__copyuser_seg" movsl\n"
31468 " movl %%eax,%0\n"
31469- "7: rep; movsb\n"
31470+ "7: rep; "__copyuser_seg" movsb\n"
31471 "8:\n"
31472 ".section .fixup,\"ax\"\n"
31473 "9: lea 0(%%eax,%0,4),%0\n"
31474@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31475
31476 __asm__ __volatile__(
31477 " .align 2,0x90\n"
31478- "0: movl 32(%4), %%eax\n"
31479+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31480 " cmpl $67, %0\n"
31481 " jbe 2f\n"
31482- "1: movl 64(%4), %%eax\n"
31483+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31484 " .align 2,0x90\n"
31485- "2: movl 0(%4), %%eax\n"
31486- "21: movl 4(%4), %%edx\n"
31487+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31488+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31489 " movnti %%eax, 0(%3)\n"
31490 " movnti %%edx, 4(%3)\n"
31491- "3: movl 8(%4), %%eax\n"
31492- "31: movl 12(%4),%%edx\n"
31493+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31494+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31495 " movnti %%eax, 8(%3)\n"
31496 " movnti %%edx, 12(%3)\n"
31497- "4: movl 16(%4), %%eax\n"
31498- "41: movl 20(%4), %%edx\n"
31499+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31500+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31501 " movnti %%eax, 16(%3)\n"
31502 " movnti %%edx, 20(%3)\n"
31503- "10: movl 24(%4), %%eax\n"
31504- "51: movl 28(%4), %%edx\n"
31505+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31506+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31507 " movnti %%eax, 24(%3)\n"
31508 " movnti %%edx, 28(%3)\n"
31509- "11: movl 32(%4), %%eax\n"
31510- "61: movl 36(%4), %%edx\n"
31511+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31512+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31513 " movnti %%eax, 32(%3)\n"
31514 " movnti %%edx, 36(%3)\n"
31515- "12: movl 40(%4), %%eax\n"
31516- "71: movl 44(%4), %%edx\n"
31517+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31518+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31519 " movnti %%eax, 40(%3)\n"
31520 " movnti %%edx, 44(%3)\n"
31521- "13: movl 48(%4), %%eax\n"
31522- "81: movl 52(%4), %%edx\n"
31523+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31524+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31525 " movnti %%eax, 48(%3)\n"
31526 " movnti %%edx, 52(%3)\n"
31527- "14: movl 56(%4), %%eax\n"
31528- "91: movl 60(%4), %%edx\n"
31529+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31530+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31531 " movnti %%eax, 56(%3)\n"
31532 " movnti %%edx, 60(%3)\n"
31533 " addl $-64, %0\n"
31534@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31535 " shrl $2, %0\n"
31536 " andl $3, %%eax\n"
31537 " cld\n"
31538- "6: rep; movsl\n"
31539+ "6: rep; "__copyuser_seg" movsl\n"
31540 " movl %%eax,%0\n"
31541- "7: rep; movsb\n"
31542+ "7: rep; "__copyuser_seg" movsb\n"
31543 "8:\n"
31544 ".section .fixup,\"ax\"\n"
31545 "9: lea 0(%%eax,%0,4),%0\n"
31546@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
31547
31548 __asm__ __volatile__(
31549 " .align 2,0x90\n"
31550- "0: movl 32(%4), %%eax\n"
31551+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31552 " cmpl $67, %0\n"
31553 " jbe 2f\n"
31554- "1: movl 64(%4), %%eax\n"
31555+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31556 " .align 2,0x90\n"
31557- "2: movl 0(%4), %%eax\n"
31558- "21: movl 4(%4), %%edx\n"
31559+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31560+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31561 " movnti %%eax, 0(%3)\n"
31562 " movnti %%edx, 4(%3)\n"
31563- "3: movl 8(%4), %%eax\n"
31564- "31: movl 12(%4),%%edx\n"
31565+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31566+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31567 " movnti %%eax, 8(%3)\n"
31568 " movnti %%edx, 12(%3)\n"
31569- "4: movl 16(%4), %%eax\n"
31570- "41: movl 20(%4), %%edx\n"
31571+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31572+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31573 " movnti %%eax, 16(%3)\n"
31574 " movnti %%edx, 20(%3)\n"
31575- "10: movl 24(%4), %%eax\n"
31576- "51: movl 28(%4), %%edx\n"
31577+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31578+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31579 " movnti %%eax, 24(%3)\n"
31580 " movnti %%edx, 28(%3)\n"
31581- "11: movl 32(%4), %%eax\n"
31582- "61: movl 36(%4), %%edx\n"
31583+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31584+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31585 " movnti %%eax, 32(%3)\n"
31586 " movnti %%edx, 36(%3)\n"
31587- "12: movl 40(%4), %%eax\n"
31588- "71: movl 44(%4), %%edx\n"
31589+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31590+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31591 " movnti %%eax, 40(%3)\n"
31592 " movnti %%edx, 44(%3)\n"
31593- "13: movl 48(%4), %%eax\n"
31594- "81: movl 52(%4), %%edx\n"
31595+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31596+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31597 " movnti %%eax, 48(%3)\n"
31598 " movnti %%edx, 52(%3)\n"
31599- "14: movl 56(%4), %%eax\n"
31600- "91: movl 60(%4), %%edx\n"
31601+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31602+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31603 " movnti %%eax, 56(%3)\n"
31604 " movnti %%edx, 60(%3)\n"
31605 " addl $-64, %0\n"
31606@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
31607 " shrl $2, %0\n"
31608 " andl $3, %%eax\n"
31609 " cld\n"
31610- "6: rep; movsl\n"
31611+ "6: rep; "__copyuser_seg" movsl\n"
31612 " movl %%eax,%0\n"
31613- "7: rep; movsb\n"
31614+ "7: rep; "__copyuser_seg" movsb\n"
31615 "8:\n"
31616 ".section .fixup,\"ax\"\n"
31617 "9: lea 0(%%eax,%0,4),%0\n"
31618@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
31619 */
31620 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
31621 unsigned long size);
31622-unsigned long __copy_user_intel(void __user *to, const void *from,
31623+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
31624+ unsigned long size);
31625+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
31626 unsigned long size);
31627 unsigned long __copy_user_zeroing_intel_nocache(void *to,
31628 const void __user *from, unsigned long size);
31629 #endif /* CONFIG_X86_INTEL_USERCOPY */
31630
31631 /* Generic arbitrary sized copy. */
31632-#define __copy_user(to, from, size) \
31633+#define __copy_user(to, from, size, prefix, set, restore) \
31634 do { \
31635 int __d0, __d1, __d2; \
31636 __asm__ __volatile__( \
31637+ set \
31638 " cmp $7,%0\n" \
31639 " jbe 1f\n" \
31640 " movl %1,%0\n" \
31641 " negl %0\n" \
31642 " andl $7,%0\n" \
31643 " subl %0,%3\n" \
31644- "4: rep; movsb\n" \
31645+ "4: rep; "prefix"movsb\n" \
31646 " movl %3,%0\n" \
31647 " shrl $2,%0\n" \
31648 " andl $3,%3\n" \
31649 " .align 2,0x90\n" \
31650- "0: rep; movsl\n" \
31651+ "0: rep; "prefix"movsl\n" \
31652 " movl %3,%0\n" \
31653- "1: rep; movsb\n" \
31654+ "1: rep; "prefix"movsb\n" \
31655 "2:\n" \
31656+ restore \
31657 ".section .fixup,\"ax\"\n" \
31658 "5: addl %3,%0\n" \
31659 " jmp 2b\n" \
31660@@ -538,14 +650,14 @@ do { \
31661 " negl %0\n" \
31662 " andl $7,%0\n" \
31663 " subl %0,%3\n" \
31664- "4: rep; movsb\n" \
31665+ "4: rep; "__copyuser_seg"movsb\n" \
31666 " movl %3,%0\n" \
31667 " shrl $2,%0\n" \
31668 " andl $3,%3\n" \
31669 " .align 2,0x90\n" \
31670- "0: rep; movsl\n" \
31671+ "0: rep; "__copyuser_seg"movsl\n" \
31672 " movl %3,%0\n" \
31673- "1: rep; movsb\n" \
31674+ "1: rep; "__copyuser_seg"movsb\n" \
31675 "2:\n" \
31676 ".section .fixup,\"ax\"\n" \
31677 "5: addl %3,%0\n" \
31678@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
31679 {
31680 stac();
31681 if (movsl_is_ok(to, from, n))
31682- __copy_user(to, from, n);
31683+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
31684 else
31685- n = __copy_user_intel(to, from, n);
31686+ n = __generic_copy_to_user_intel(to, from, n);
31687 clac();
31688 return n;
31689 }
31690@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
31691 {
31692 stac();
31693 if (movsl_is_ok(to, from, n))
31694- __copy_user(to, from, n);
31695+ __copy_user(to, from, n, __copyuser_seg, "", "");
31696 else
31697- n = __copy_user_intel((void __user *)to,
31698- (const void *)from, n);
31699+ n = __generic_copy_from_user_intel(to, from, n);
31700 clac();
31701 return n;
31702 }
31703@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
31704 if (n > 64 && cpu_has_xmm2)
31705 n = __copy_user_intel_nocache(to, from, n);
31706 else
31707- __copy_user(to, from, n);
31708+ __copy_user(to, from, n, __copyuser_seg, "", "");
31709 #else
31710- __copy_user(to, from, n);
31711+ __copy_user(to, from, n, __copyuser_seg, "", "");
31712 #endif
31713 clac();
31714 return n;
31715 }
31716 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
31717
31718-/**
31719- * copy_to_user: - Copy a block of data into user space.
31720- * @to: Destination address, in user space.
31721- * @from: Source address, in kernel space.
31722- * @n: Number of bytes to copy.
31723- *
31724- * Context: User context only. This function may sleep.
31725- *
31726- * Copy data from kernel space to user space.
31727- *
31728- * Returns number of bytes that could not be copied.
31729- * On success, this will be zero.
31730- */
31731-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
31732+#ifdef CONFIG_PAX_MEMORY_UDEREF
31733+void __set_fs(mm_segment_t x)
31734 {
31735- if (access_ok(VERIFY_WRITE, to, n))
31736- n = __copy_to_user(to, from, n);
31737- return n;
31738+ switch (x.seg) {
31739+ case 0:
31740+ loadsegment(gs, 0);
31741+ break;
31742+ case TASK_SIZE_MAX:
31743+ loadsegment(gs, __USER_DS);
31744+ break;
31745+ case -1UL:
31746+ loadsegment(gs, __KERNEL_DS);
31747+ break;
31748+ default:
31749+ BUG();
31750+ }
31751 }
31752-EXPORT_SYMBOL(_copy_to_user);
31753+EXPORT_SYMBOL(__set_fs);
31754
31755-/**
31756- * copy_from_user: - Copy a block of data from user space.
31757- * @to: Destination address, in kernel space.
31758- * @from: Source address, in user space.
31759- * @n: Number of bytes to copy.
31760- *
31761- * Context: User context only. This function may sleep.
31762- *
31763- * Copy data from user space to kernel space.
31764- *
31765- * Returns number of bytes that could not be copied.
31766- * On success, this will be zero.
31767- *
31768- * If some data could not be copied, this function will pad the copied
31769- * data to the requested size using zero bytes.
31770- */
31771-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
31772+void set_fs(mm_segment_t x)
31773 {
31774- if (access_ok(VERIFY_READ, from, n))
31775- n = __copy_from_user(to, from, n);
31776- else
31777- memset(to, 0, n);
31778- return n;
31779+ current_thread_info()->addr_limit = x;
31780+ __set_fs(x);
31781 }
31782-EXPORT_SYMBOL(_copy_from_user);
31783+EXPORT_SYMBOL(set_fs);
31784+#endif
31785diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
31786index c905e89..01ab928 100644
31787--- a/arch/x86/lib/usercopy_64.c
31788+++ b/arch/x86/lib/usercopy_64.c
31789@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31790 might_fault();
31791 /* no memory constraint because it doesn't change any memory gcc knows
31792 about */
31793+ pax_open_userland();
31794 stac();
31795 asm volatile(
31796 " testq %[size8],%[size8]\n"
31797@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31798 _ASM_EXTABLE(0b,3b)
31799 _ASM_EXTABLE(1b,2b)
31800 : [size8] "=&c"(size), [dst] "=&D" (__d0)
31801- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
31802+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
31803 [zero] "r" (0UL), [eight] "r" (8UL));
31804 clac();
31805+ pax_close_userland();
31806 return size;
31807 }
31808 EXPORT_SYMBOL(__clear_user);
31809@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
31810 }
31811 EXPORT_SYMBOL(clear_user);
31812
31813-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
31814+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
31815 {
31816- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
31817- return copy_user_generic((__force void *)to, (__force void *)from, len);
31818- }
31819- return len;
31820+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
31821+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
31822+ return len;
31823 }
31824 EXPORT_SYMBOL(copy_in_user);
31825
31826@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
31827 * it is not necessary to optimize tail handling.
31828 */
31829 __visible unsigned long
31830-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31831+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
31832 {
31833 char c;
31834 unsigned zero_len;
31835
31836+ clac();
31837+ pax_close_userland();
31838 for (; len; --len, to++) {
31839 if (__get_user_nocheck(c, from++, sizeof(char)))
31840 break;
31841@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31842 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
31843 if (__put_user_nocheck(c, to++, sizeof(char)))
31844 break;
31845- clac();
31846 return len;
31847 }
31848diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
31849index 6a19ad9..1c48f9a 100644
31850--- a/arch/x86/mm/Makefile
31851+++ b/arch/x86/mm/Makefile
31852@@ -30,3 +30,7 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o
31853 obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
31854
31855 obj-$(CONFIG_MEMTEST) += memtest.o
31856+
31857+quote:="
31858+obj-$(CONFIG_X86_64) += uderef_64.o
31859+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
31860diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
31861index 903ec1e..c4166b2 100644
31862--- a/arch/x86/mm/extable.c
31863+++ b/arch/x86/mm/extable.c
31864@@ -6,12 +6,24 @@
31865 static inline unsigned long
31866 ex_insn_addr(const struct exception_table_entry *x)
31867 {
31868- return (unsigned long)&x->insn + x->insn;
31869+ unsigned long reloc = 0;
31870+
31871+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31872+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31873+#endif
31874+
31875+ return (unsigned long)&x->insn + x->insn + reloc;
31876 }
31877 static inline unsigned long
31878 ex_fixup_addr(const struct exception_table_entry *x)
31879 {
31880- return (unsigned long)&x->fixup + x->fixup;
31881+ unsigned long reloc = 0;
31882+
31883+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31884+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31885+#endif
31886+
31887+ return (unsigned long)&x->fixup + x->fixup + reloc;
31888 }
31889
31890 int fixup_exception(struct pt_regs *regs)
31891@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
31892 unsigned long new_ip;
31893
31894 #ifdef CONFIG_PNPBIOS
31895- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
31896+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
31897 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
31898 extern u32 pnp_bios_is_utter_crap;
31899 pnp_bios_is_utter_crap = 1;
31900@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
31901 i += 4;
31902 p->fixup -= i;
31903 i += 4;
31904+
31905+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31906+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
31907+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31908+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31909+#endif
31910+
31911 }
31912 }
31913
31914diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
31915index 3664279..c6a7830 100644
31916--- a/arch/x86/mm/fault.c
31917+++ b/arch/x86/mm/fault.c
31918@@ -14,12 +14,19 @@
31919 #include <linux/hugetlb.h> /* hstate_index_to_shift */
31920 #include <linux/prefetch.h> /* prefetchw */
31921 #include <linux/context_tracking.h> /* exception_enter(), ... */
31922+#include <linux/unistd.h>
31923+#include <linux/compiler.h>
31924
31925 #include <asm/traps.h> /* dotraplinkage, ... */
31926 #include <asm/pgalloc.h> /* pgd_*(), ... */
31927 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
31928 #include <asm/fixmap.h> /* VSYSCALL_ADDR */
31929 #include <asm/vsyscall.h> /* emulate_vsyscall */
31930+#include <asm/tlbflush.h>
31931+
31932+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31933+#include <asm/stacktrace.h>
31934+#endif
31935
31936 #define CREATE_TRACE_POINTS
31937 #include <asm/trace/exceptions.h>
31938@@ -60,7 +67,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
31939 int ret = 0;
31940
31941 /* kprobe_running() needs smp_processor_id() */
31942- if (kprobes_built_in() && !user_mode_vm(regs)) {
31943+ if (kprobes_built_in() && !user_mode(regs)) {
31944 preempt_disable();
31945 if (kprobe_running() && kprobe_fault_handler(regs, 14))
31946 ret = 1;
31947@@ -121,7 +128,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
31948 return !instr_lo || (instr_lo>>1) == 1;
31949 case 0x00:
31950 /* Prefetch instruction is 0x0F0D or 0x0F18 */
31951- if (probe_kernel_address(instr, opcode))
31952+ if (user_mode(regs)) {
31953+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31954+ return 0;
31955+ } else if (probe_kernel_address(instr, opcode))
31956 return 0;
31957
31958 *prefetch = (instr_lo == 0xF) &&
31959@@ -155,7 +165,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
31960 while (instr < max_instr) {
31961 unsigned char opcode;
31962
31963- if (probe_kernel_address(instr, opcode))
31964+ if (user_mode(regs)) {
31965+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31966+ break;
31967+ } else if (probe_kernel_address(instr, opcode))
31968 break;
31969
31970 instr++;
31971@@ -186,6 +199,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
31972 force_sig_info(si_signo, &info, tsk);
31973 }
31974
31975+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31976+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
31977+#endif
31978+
31979+#ifdef CONFIG_PAX_EMUTRAMP
31980+static int pax_handle_fetch_fault(struct pt_regs *regs);
31981+#endif
31982+
31983+#ifdef CONFIG_PAX_PAGEEXEC
31984+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
31985+{
31986+ pgd_t *pgd;
31987+ pud_t *pud;
31988+ pmd_t *pmd;
31989+
31990+ pgd = pgd_offset(mm, address);
31991+ if (!pgd_present(*pgd))
31992+ return NULL;
31993+ pud = pud_offset(pgd, address);
31994+ if (!pud_present(*pud))
31995+ return NULL;
31996+ pmd = pmd_offset(pud, address);
31997+ if (!pmd_present(*pmd))
31998+ return NULL;
31999+ return pmd;
32000+}
32001+#endif
32002+
32003 DEFINE_SPINLOCK(pgd_lock);
32004 LIST_HEAD(pgd_list);
32005
32006@@ -236,10 +277,27 @@ void vmalloc_sync_all(void)
32007 for (address = VMALLOC_START & PMD_MASK;
32008 address >= TASK_SIZE && address < FIXADDR_TOP;
32009 address += PMD_SIZE) {
32010+
32011+#ifdef CONFIG_PAX_PER_CPU_PGD
32012+ unsigned long cpu;
32013+#else
32014 struct page *page;
32015+#endif
32016
32017 spin_lock(&pgd_lock);
32018+
32019+#ifdef CONFIG_PAX_PER_CPU_PGD
32020+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
32021+ pgd_t *pgd = get_cpu_pgd(cpu, user);
32022+ pmd_t *ret;
32023+
32024+ ret = vmalloc_sync_one(pgd, address);
32025+ if (!ret)
32026+ break;
32027+ pgd = get_cpu_pgd(cpu, kernel);
32028+#else
32029 list_for_each_entry(page, &pgd_list, lru) {
32030+ pgd_t *pgd;
32031 spinlock_t *pgt_lock;
32032 pmd_t *ret;
32033
32034@@ -247,8 +305,14 @@ void vmalloc_sync_all(void)
32035 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
32036
32037 spin_lock(pgt_lock);
32038- ret = vmalloc_sync_one(page_address(page), address);
32039+ pgd = page_address(page);
32040+#endif
32041+
32042+ ret = vmalloc_sync_one(pgd, address);
32043+
32044+#ifndef CONFIG_PAX_PER_CPU_PGD
32045 spin_unlock(pgt_lock);
32046+#endif
32047
32048 if (!ret)
32049 break;
32050@@ -282,6 +346,12 @@ static noinline int vmalloc_fault(unsigned long address)
32051 * an interrupt in the middle of a task switch..
32052 */
32053 pgd_paddr = read_cr3();
32054+
32055+#ifdef CONFIG_PAX_PER_CPU_PGD
32056+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
32057+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
32058+#endif
32059+
32060 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
32061 if (!pmd_k)
32062 return -1;
32063@@ -378,11 +448,25 @@ static noinline int vmalloc_fault(unsigned long address)
32064 * happen within a race in page table update. In the later
32065 * case just flush:
32066 */
32067- pgd = pgd_offset(current->active_mm, address);
32068+
32069 pgd_ref = pgd_offset_k(address);
32070 if (pgd_none(*pgd_ref))
32071 return -1;
32072
32073+#ifdef CONFIG_PAX_PER_CPU_PGD
32074+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
32075+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
32076+ if (pgd_none(*pgd)) {
32077+ set_pgd(pgd, *pgd_ref);
32078+ arch_flush_lazy_mmu_mode();
32079+ } else {
32080+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
32081+ }
32082+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
32083+#else
32084+ pgd = pgd_offset(current->active_mm, address);
32085+#endif
32086+
32087 if (pgd_none(*pgd)) {
32088 set_pgd(pgd, *pgd_ref);
32089 arch_flush_lazy_mmu_mode();
32090@@ -549,7 +633,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
32091 static int is_errata100(struct pt_regs *regs, unsigned long address)
32092 {
32093 #ifdef CONFIG_X86_64
32094- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
32095+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
32096 return 1;
32097 #endif
32098 return 0;
32099@@ -576,7 +660,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
32100 }
32101
32102 static const char nx_warning[] = KERN_CRIT
32103-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
32104+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
32105
32106 static void
32107 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32108@@ -585,7 +669,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32109 if (!oops_may_print())
32110 return;
32111
32112- if (error_code & PF_INSTR) {
32113+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
32114 unsigned int level;
32115 pgd_t *pgd;
32116 pte_t *pte;
32117@@ -596,9 +680,21 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32118 pte = lookup_address_in_pgd(pgd, address, &level);
32119
32120 if (pte && pte_present(*pte) && !pte_exec(*pte))
32121- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
32122+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
32123 }
32124
32125+#ifdef CONFIG_PAX_KERNEXEC
32126+ if (init_mm.start_code <= address && address < init_mm.end_code) {
32127+ if (current->signal->curr_ip)
32128+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
32129+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
32130+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32131+ else
32132+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
32133+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32134+ }
32135+#endif
32136+
32137 printk(KERN_ALERT "BUG: unable to handle kernel ");
32138 if (address < PAGE_SIZE)
32139 printk(KERN_CONT "NULL pointer dereference");
32140@@ -779,6 +875,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
32141 return;
32142 }
32143 #endif
32144+
32145+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32146+ if (pax_is_fetch_fault(regs, error_code, address)) {
32147+
32148+#ifdef CONFIG_PAX_EMUTRAMP
32149+ switch (pax_handle_fetch_fault(regs)) {
32150+ case 2:
32151+ return;
32152+ }
32153+#endif
32154+
32155+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32156+ do_group_exit(SIGKILL);
32157+ }
32158+#endif
32159+
32160 /* Kernel addresses are always protection faults: */
32161 if (address >= TASK_SIZE)
32162 error_code |= PF_PROT;
32163@@ -864,7 +976,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
32164 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
32165 printk(KERN_ERR
32166 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
32167- tsk->comm, tsk->pid, address);
32168+ tsk->comm, task_pid_nr(tsk), address);
32169 code = BUS_MCEERR_AR;
32170 }
32171 #endif
32172@@ -918,6 +1030,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
32173 return 1;
32174 }
32175
32176+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32177+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
32178+{
32179+ pte_t *pte;
32180+ pmd_t *pmd;
32181+ spinlock_t *ptl;
32182+ unsigned char pte_mask;
32183+
32184+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
32185+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
32186+ return 0;
32187+
32188+ /* PaX: it's our fault, let's handle it if we can */
32189+
32190+ /* PaX: take a look at read faults before acquiring any locks */
32191+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
32192+ /* instruction fetch attempt from a protected page in user mode */
32193+ up_read(&mm->mmap_sem);
32194+
32195+#ifdef CONFIG_PAX_EMUTRAMP
32196+ switch (pax_handle_fetch_fault(regs)) {
32197+ case 2:
32198+ return 1;
32199+ }
32200+#endif
32201+
32202+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32203+ do_group_exit(SIGKILL);
32204+ }
32205+
32206+ pmd = pax_get_pmd(mm, address);
32207+ if (unlikely(!pmd))
32208+ return 0;
32209+
32210+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
32211+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
32212+ pte_unmap_unlock(pte, ptl);
32213+ return 0;
32214+ }
32215+
32216+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
32217+ /* write attempt to a protected page in user mode */
32218+ pte_unmap_unlock(pte, ptl);
32219+ return 0;
32220+ }
32221+
32222+#ifdef CONFIG_SMP
32223+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
32224+#else
32225+ if (likely(address > get_limit(regs->cs)))
32226+#endif
32227+ {
32228+ set_pte(pte, pte_mkread(*pte));
32229+ __flush_tlb_one(address);
32230+ pte_unmap_unlock(pte, ptl);
32231+ up_read(&mm->mmap_sem);
32232+ return 1;
32233+ }
32234+
32235+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
32236+
32237+ /*
32238+ * PaX: fill DTLB with user rights and retry
32239+ */
32240+ __asm__ __volatile__ (
32241+ "orb %2,(%1)\n"
32242+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
32243+/*
32244+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
32245+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
32246+ * page fault when examined during a TLB load attempt. this is true not only
32247+ * for PTEs holding a non-present entry but also present entries that will
32248+ * raise a page fault (such as those set up by PaX, or the copy-on-write
32249+ * mechanism). in effect it means that we do *not* need to flush the TLBs
32250+ * for our target pages since their PTEs are simply not in the TLBs at all.
32251+
32252+ * the best thing in omitting it is that we gain around 15-20% speed in the
32253+ * fast path of the page fault handler and can get rid of tracing since we
32254+ * can no longer flush unintended entries.
32255+ */
32256+ "invlpg (%0)\n"
32257+#endif
32258+ __copyuser_seg"testb $0,(%0)\n"
32259+ "xorb %3,(%1)\n"
32260+ :
32261+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
32262+ : "memory", "cc");
32263+ pte_unmap_unlock(pte, ptl);
32264+ up_read(&mm->mmap_sem);
32265+ return 1;
32266+}
32267+#endif
32268+
32269 /*
32270 * Handle a spurious fault caused by a stale TLB entry.
32271 *
32272@@ -985,6 +1190,9 @@ int show_unhandled_signals = 1;
32273 static inline int
32274 access_error(unsigned long error_code, struct vm_area_struct *vma)
32275 {
32276+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
32277+ return 1;
32278+
32279 if (error_code & PF_WRITE) {
32280 /* write, present and write, not present: */
32281 if (unlikely(!(vma->vm_flags & VM_WRITE)))
32282@@ -1019,7 +1227,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
32283 if (error_code & PF_USER)
32284 return false;
32285
32286- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
32287+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
32288 return false;
32289
32290 return true;
32291@@ -1047,6 +1255,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32292 tsk = current;
32293 mm = tsk->mm;
32294
32295+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32296+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
32297+ if (!search_exception_tables(regs->ip)) {
32298+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32299+ bad_area_nosemaphore(regs, error_code, address);
32300+ return;
32301+ }
32302+ if (address < pax_user_shadow_base) {
32303+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32304+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
32305+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
32306+ } else
32307+ address -= pax_user_shadow_base;
32308+ }
32309+#endif
32310+
32311 /*
32312 * Detect and handle instructions that would cause a page fault for
32313 * both a tracked kernel page and a userspace page.
32314@@ -1124,7 +1348,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32315 * User-mode registers count as a user access even for any
32316 * potential system fault or CPU buglet:
32317 */
32318- if (user_mode_vm(regs)) {
32319+ if (user_mode(regs)) {
32320 local_irq_enable();
32321 error_code |= PF_USER;
32322 flags |= FAULT_FLAG_USER;
32323@@ -1171,6 +1395,11 @@ retry:
32324 might_sleep();
32325 }
32326
32327+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32328+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
32329+ return;
32330+#endif
32331+
32332 vma = find_vma(mm, address);
32333 if (unlikely(!vma)) {
32334 bad_area(regs, error_code, address);
32335@@ -1182,18 +1411,24 @@ retry:
32336 bad_area(regs, error_code, address);
32337 return;
32338 }
32339- if (error_code & PF_USER) {
32340- /*
32341- * Accessing the stack below %sp is always a bug.
32342- * The large cushion allows instructions like enter
32343- * and pusha to work. ("enter $65535, $31" pushes
32344- * 32 pointers and then decrements %sp by 65535.)
32345- */
32346- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
32347- bad_area(regs, error_code, address);
32348- return;
32349- }
32350+ /*
32351+ * Accessing the stack below %sp is always a bug.
32352+ * The large cushion allows instructions like enter
32353+ * and pusha to work. ("enter $65535, $31" pushes
32354+ * 32 pointers and then decrements %sp by 65535.)
32355+ */
32356+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
32357+ bad_area(regs, error_code, address);
32358+ return;
32359 }
32360+
32361+#ifdef CONFIG_PAX_SEGMEXEC
32362+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
32363+ bad_area(regs, error_code, address);
32364+ return;
32365+ }
32366+#endif
32367+
32368 if (unlikely(expand_stack(vma, address))) {
32369 bad_area(regs, error_code, address);
32370 return;
32371@@ -1309,3 +1544,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
32372 }
32373 NOKPROBE_SYMBOL(trace_do_page_fault);
32374 #endif /* CONFIG_TRACING */
32375+
32376+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32377+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
32378+{
32379+ struct mm_struct *mm = current->mm;
32380+ unsigned long ip = regs->ip;
32381+
32382+ if (v8086_mode(regs))
32383+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
32384+
32385+#ifdef CONFIG_PAX_PAGEEXEC
32386+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
32387+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
32388+ return true;
32389+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
32390+ return true;
32391+ return false;
32392+ }
32393+#endif
32394+
32395+#ifdef CONFIG_PAX_SEGMEXEC
32396+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
32397+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
32398+ return true;
32399+ return false;
32400+ }
32401+#endif
32402+
32403+ return false;
32404+}
32405+#endif
32406+
32407+#ifdef CONFIG_PAX_EMUTRAMP
32408+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
32409+{
32410+ int err;
32411+
32412+ do { /* PaX: libffi trampoline emulation */
32413+ unsigned char mov, jmp;
32414+ unsigned int addr1, addr2;
32415+
32416+#ifdef CONFIG_X86_64
32417+ if ((regs->ip + 9) >> 32)
32418+ break;
32419+#endif
32420+
32421+ err = get_user(mov, (unsigned char __user *)regs->ip);
32422+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32423+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32424+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32425+
32426+ if (err)
32427+ break;
32428+
32429+ if (mov == 0xB8 && jmp == 0xE9) {
32430+ regs->ax = addr1;
32431+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32432+ return 2;
32433+ }
32434+ } while (0);
32435+
32436+ do { /* PaX: gcc trampoline emulation #1 */
32437+ unsigned char mov1, mov2;
32438+ unsigned short jmp;
32439+ unsigned int addr1, addr2;
32440+
32441+#ifdef CONFIG_X86_64
32442+ if ((regs->ip + 11) >> 32)
32443+ break;
32444+#endif
32445+
32446+ err = get_user(mov1, (unsigned char __user *)regs->ip);
32447+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32448+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
32449+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32450+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
32451+
32452+ if (err)
32453+ break;
32454+
32455+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
32456+ regs->cx = addr1;
32457+ regs->ax = addr2;
32458+ regs->ip = addr2;
32459+ return 2;
32460+ }
32461+ } while (0);
32462+
32463+ do { /* PaX: gcc trampoline emulation #2 */
32464+ unsigned char mov, jmp;
32465+ unsigned int addr1, addr2;
32466+
32467+#ifdef CONFIG_X86_64
32468+ if ((regs->ip + 9) >> 32)
32469+ break;
32470+#endif
32471+
32472+ err = get_user(mov, (unsigned char __user *)regs->ip);
32473+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32474+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32475+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32476+
32477+ if (err)
32478+ break;
32479+
32480+ if (mov == 0xB9 && jmp == 0xE9) {
32481+ regs->cx = addr1;
32482+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32483+ return 2;
32484+ }
32485+ } while (0);
32486+
32487+ return 1; /* PaX in action */
32488+}
32489+
32490+#ifdef CONFIG_X86_64
32491+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
32492+{
32493+ int err;
32494+
32495+ do { /* PaX: libffi trampoline emulation */
32496+ unsigned short mov1, mov2, jmp1;
32497+ unsigned char stcclc, jmp2;
32498+ unsigned long addr1, addr2;
32499+
32500+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32501+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32502+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32503+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32504+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
32505+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
32506+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
32507+
32508+ if (err)
32509+ break;
32510+
32511+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32512+ regs->r11 = addr1;
32513+ regs->r10 = addr2;
32514+ if (stcclc == 0xF8)
32515+ regs->flags &= ~X86_EFLAGS_CF;
32516+ else
32517+ regs->flags |= X86_EFLAGS_CF;
32518+ regs->ip = addr1;
32519+ return 2;
32520+ }
32521+ } while (0);
32522+
32523+ do { /* PaX: gcc trampoline emulation #1 */
32524+ unsigned short mov1, mov2, jmp1;
32525+ unsigned char jmp2;
32526+ unsigned int addr1;
32527+ unsigned long addr2;
32528+
32529+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32530+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
32531+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
32532+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
32533+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
32534+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
32535+
32536+ if (err)
32537+ break;
32538+
32539+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32540+ regs->r11 = addr1;
32541+ regs->r10 = addr2;
32542+ regs->ip = addr1;
32543+ return 2;
32544+ }
32545+ } while (0);
32546+
32547+ do { /* PaX: gcc trampoline emulation #2 */
32548+ unsigned short mov1, mov2, jmp1;
32549+ unsigned char jmp2;
32550+ unsigned long addr1, addr2;
32551+
32552+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32553+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32554+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32555+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32556+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
32557+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
32558+
32559+ if (err)
32560+ break;
32561+
32562+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32563+ regs->r11 = addr1;
32564+ regs->r10 = addr2;
32565+ regs->ip = addr1;
32566+ return 2;
32567+ }
32568+ } while (0);
32569+
32570+ return 1; /* PaX in action */
32571+}
32572+#endif
32573+
32574+/*
32575+ * PaX: decide what to do with offenders (regs->ip = fault address)
32576+ *
32577+ * returns 1 when task should be killed
32578+ * 2 when gcc trampoline was detected
32579+ */
32580+static int pax_handle_fetch_fault(struct pt_regs *regs)
32581+{
32582+ if (v8086_mode(regs))
32583+ return 1;
32584+
32585+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
32586+ return 1;
32587+
32588+#ifdef CONFIG_X86_32
32589+ return pax_handle_fetch_fault_32(regs);
32590+#else
32591+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
32592+ return pax_handle_fetch_fault_32(regs);
32593+ else
32594+ return pax_handle_fetch_fault_64(regs);
32595+#endif
32596+}
32597+#endif
32598+
32599+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32600+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
32601+{
32602+ long i;
32603+
32604+ printk(KERN_ERR "PAX: bytes at PC: ");
32605+ for (i = 0; i < 20; i++) {
32606+ unsigned char c;
32607+ if (get_user(c, (unsigned char __force_user *)pc+i))
32608+ printk(KERN_CONT "?? ");
32609+ else
32610+ printk(KERN_CONT "%02x ", c);
32611+ }
32612+ printk("\n");
32613+
32614+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
32615+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
32616+ unsigned long c;
32617+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
32618+#ifdef CONFIG_X86_32
32619+ printk(KERN_CONT "???????? ");
32620+#else
32621+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
32622+ printk(KERN_CONT "???????? ???????? ");
32623+ else
32624+ printk(KERN_CONT "???????????????? ");
32625+#endif
32626+ } else {
32627+#ifdef CONFIG_X86_64
32628+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
32629+ printk(KERN_CONT "%08x ", (unsigned int)c);
32630+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
32631+ } else
32632+#endif
32633+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
32634+ }
32635+ }
32636+ printk("\n");
32637+}
32638+#endif
32639+
32640+/**
32641+ * probe_kernel_write(): safely attempt to write to a location
32642+ * @dst: address to write to
32643+ * @src: pointer to the data that shall be written
32644+ * @size: size of the data chunk
32645+ *
32646+ * Safely write to address @dst from the buffer at @src. If a kernel fault
32647+ * happens, handle that and return -EFAULT.
32648+ */
32649+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
32650+{
32651+ long ret;
32652+ mm_segment_t old_fs = get_fs();
32653+
32654+ set_fs(KERNEL_DS);
32655+ pagefault_disable();
32656+ pax_open_kernel();
32657+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
32658+ pax_close_kernel();
32659+ pagefault_enable();
32660+ set_fs(old_fs);
32661+
32662+ return ret ? -EFAULT : 0;
32663+}
32664diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
32665index 207d9aef..69030980 100644
32666--- a/arch/x86/mm/gup.c
32667+++ b/arch/x86/mm/gup.c
32668@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
32669 addr = start;
32670 len = (unsigned long) nr_pages << PAGE_SHIFT;
32671 end = start + len;
32672- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
32673+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32674 (void __user *)start, len)))
32675 return 0;
32676
32677@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
32678 goto slow_irqon;
32679 #endif
32680
32681+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32682+ (void __user *)start, len)))
32683+ return 0;
32684+
32685 /*
32686 * XXX: batch / limit 'nr', to avoid large irq off latency
32687 * needs some instrumenting to determine the common sizes used by
32688diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
32689index 4500142..53a363c 100644
32690--- a/arch/x86/mm/highmem_32.c
32691+++ b/arch/x86/mm/highmem_32.c
32692@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
32693 idx = type + KM_TYPE_NR*smp_processor_id();
32694 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
32695 BUG_ON(!pte_none(*(kmap_pte-idx)));
32696+
32697+ pax_open_kernel();
32698 set_pte(kmap_pte-idx, mk_pte(page, prot));
32699+ pax_close_kernel();
32700+
32701 arch_flush_lazy_mmu_mode();
32702
32703 return (void *)vaddr;
32704diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
32705index 8b977eb..4732c33 100644
32706--- a/arch/x86/mm/hugetlbpage.c
32707+++ b/arch/x86/mm/hugetlbpage.c
32708@@ -80,23 +80,24 @@ int pud_huge(pud_t pud)
32709 #ifdef CONFIG_HUGETLB_PAGE
32710 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
32711 unsigned long addr, unsigned long len,
32712- unsigned long pgoff, unsigned long flags)
32713+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32714 {
32715 struct hstate *h = hstate_file(file);
32716 struct vm_unmapped_area_info info;
32717-
32718+
32719 info.flags = 0;
32720 info.length = len;
32721 info.low_limit = current->mm->mmap_legacy_base;
32722 info.high_limit = TASK_SIZE;
32723 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32724 info.align_offset = 0;
32725+ info.threadstack_offset = offset;
32726 return vm_unmapped_area(&info);
32727 }
32728
32729 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32730 unsigned long addr0, unsigned long len,
32731- unsigned long pgoff, unsigned long flags)
32732+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32733 {
32734 struct hstate *h = hstate_file(file);
32735 struct vm_unmapped_area_info info;
32736@@ -108,6 +109,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32737 info.high_limit = current->mm->mmap_base;
32738 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32739 info.align_offset = 0;
32740+ info.threadstack_offset = offset;
32741 addr = vm_unmapped_area(&info);
32742
32743 /*
32744@@ -120,6 +122,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32745 VM_BUG_ON(addr != -ENOMEM);
32746 info.flags = 0;
32747 info.low_limit = TASK_UNMAPPED_BASE;
32748+
32749+#ifdef CONFIG_PAX_RANDMMAP
32750+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
32751+ info.low_limit += current->mm->delta_mmap;
32752+#endif
32753+
32754 info.high_limit = TASK_SIZE;
32755 addr = vm_unmapped_area(&info);
32756 }
32757@@ -134,10 +142,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32758 struct hstate *h = hstate_file(file);
32759 struct mm_struct *mm = current->mm;
32760 struct vm_area_struct *vma;
32761+ unsigned long pax_task_size = TASK_SIZE;
32762+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
32763
32764 if (len & ~huge_page_mask(h))
32765 return -EINVAL;
32766- if (len > TASK_SIZE)
32767+
32768+#ifdef CONFIG_PAX_SEGMEXEC
32769+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32770+ pax_task_size = SEGMEXEC_TASK_SIZE;
32771+#endif
32772+
32773+ pax_task_size -= PAGE_SIZE;
32774+
32775+ if (len > pax_task_size)
32776 return -ENOMEM;
32777
32778 if (flags & MAP_FIXED) {
32779@@ -146,19 +164,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32780 return addr;
32781 }
32782
32783+#ifdef CONFIG_PAX_RANDMMAP
32784+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
32785+#endif
32786+
32787 if (addr) {
32788 addr = ALIGN(addr, huge_page_size(h));
32789 vma = find_vma(mm, addr);
32790- if (TASK_SIZE - len >= addr &&
32791- (!vma || addr + len <= vma->vm_start))
32792+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
32793 return addr;
32794 }
32795 if (mm->get_unmapped_area == arch_get_unmapped_area)
32796 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
32797- pgoff, flags);
32798+ pgoff, flags, offset);
32799 else
32800 return hugetlb_get_unmapped_area_topdown(file, addr, len,
32801- pgoff, flags);
32802+ pgoff, flags, offset);
32803 }
32804 #endif /* CONFIG_HUGETLB_PAGE */
32805
32806diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
32807index f971306..e83e0f6 100644
32808--- a/arch/x86/mm/init.c
32809+++ b/arch/x86/mm/init.c
32810@@ -4,6 +4,7 @@
32811 #include <linux/swap.h>
32812 #include <linux/memblock.h>
32813 #include <linux/bootmem.h> /* for max_low_pfn */
32814+#include <linux/tboot.h>
32815
32816 #include <asm/cacheflush.h>
32817 #include <asm/e820.h>
32818@@ -17,6 +18,8 @@
32819 #include <asm/proto.h>
32820 #include <asm/dma.h> /* for MAX_DMA_PFN */
32821 #include <asm/microcode.h>
32822+#include <asm/desc.h>
32823+#include <asm/bios_ebda.h>
32824
32825 #include "mm_internal.h"
32826
32827@@ -563,7 +566,18 @@ void __init init_mem_mapping(void)
32828 early_ioremap_page_table_range_init();
32829 #endif
32830
32831+#ifdef CONFIG_PAX_PER_CPU_PGD
32832+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
32833+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32834+ KERNEL_PGD_PTRS);
32835+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
32836+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32837+ KERNEL_PGD_PTRS);
32838+ load_cr3(get_cpu_pgd(0, kernel));
32839+#else
32840 load_cr3(swapper_pg_dir);
32841+#endif
32842+
32843 __flush_tlb_all();
32844
32845 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
32846@@ -579,10 +593,40 @@ void __init init_mem_mapping(void)
32847 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
32848 * mmio resources as well as potential bios/acpi data regions.
32849 */
32850+
32851+#ifdef CONFIG_GRKERNSEC_KMEM
32852+static unsigned int ebda_start __read_only;
32853+static unsigned int ebda_end __read_only;
32854+#endif
32855+
32856 int devmem_is_allowed(unsigned long pagenr)
32857 {
32858- if (pagenr < 256)
32859+#ifdef CONFIG_GRKERNSEC_KMEM
32860+ /* allow BDA */
32861+ if (!pagenr)
32862 return 1;
32863+ /* allow EBDA */
32864+ if (pagenr >= ebda_start && pagenr < ebda_end)
32865+ return 1;
32866+ /* if tboot is in use, allow access to its hardcoded serial log range */
32867+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
32868+ return 1;
32869+#else
32870+ if (!pagenr)
32871+ return 1;
32872+#ifdef CONFIG_VM86
32873+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
32874+ return 1;
32875+#endif
32876+#endif
32877+
32878+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
32879+ return 1;
32880+#ifdef CONFIG_GRKERNSEC_KMEM
32881+ /* throw out everything else below 1MB */
32882+ if (pagenr <= 256)
32883+ return 0;
32884+#endif
32885 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
32886 return 0;
32887 if (!page_is_ram(pagenr))
32888@@ -628,8 +672,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
32889 #endif
32890 }
32891
32892+#ifdef CONFIG_GRKERNSEC_KMEM
32893+static inline void gr_init_ebda(void)
32894+{
32895+ unsigned int ebda_addr;
32896+ unsigned int ebda_size = 0;
32897+
32898+ ebda_addr = get_bios_ebda();
32899+ if (ebda_addr) {
32900+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
32901+ ebda_size <<= 10;
32902+ }
32903+ if (ebda_addr && ebda_size) {
32904+ ebda_start = ebda_addr >> PAGE_SHIFT;
32905+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
32906+ } else {
32907+ ebda_start = 0x9f000 >> PAGE_SHIFT;
32908+ ebda_end = 0xa0000 >> PAGE_SHIFT;
32909+ }
32910+}
32911+#else
32912+static inline void gr_init_ebda(void) { }
32913+#endif
32914+
32915 void free_initmem(void)
32916 {
32917+#ifdef CONFIG_PAX_KERNEXEC
32918+#ifdef CONFIG_X86_32
32919+ /* PaX: limit KERNEL_CS to actual size */
32920+ unsigned long addr, limit;
32921+ struct desc_struct d;
32922+ int cpu;
32923+#else
32924+ pgd_t *pgd;
32925+ pud_t *pud;
32926+ pmd_t *pmd;
32927+ unsigned long addr, end;
32928+#endif
32929+#endif
32930+
32931+ gr_init_ebda();
32932+
32933+#ifdef CONFIG_PAX_KERNEXEC
32934+#ifdef CONFIG_X86_32
32935+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
32936+ limit = (limit - 1UL) >> PAGE_SHIFT;
32937+
32938+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
32939+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32940+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
32941+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
32942+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
32943+ }
32944+
32945+ /* PaX: make KERNEL_CS read-only */
32946+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
32947+ if (!paravirt_enabled())
32948+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
32949+/*
32950+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
32951+ pgd = pgd_offset_k(addr);
32952+ pud = pud_offset(pgd, addr);
32953+ pmd = pmd_offset(pud, addr);
32954+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32955+ }
32956+*/
32957+#ifdef CONFIG_X86_PAE
32958+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
32959+/*
32960+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
32961+ pgd = pgd_offset_k(addr);
32962+ pud = pud_offset(pgd, addr);
32963+ pmd = pmd_offset(pud, addr);
32964+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32965+ }
32966+*/
32967+#endif
32968+
32969+#ifdef CONFIG_MODULES
32970+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
32971+#endif
32972+
32973+#else
32974+ /* PaX: make kernel code/rodata read-only, rest non-executable */
32975+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
32976+ pgd = pgd_offset_k(addr);
32977+ pud = pud_offset(pgd, addr);
32978+ pmd = pmd_offset(pud, addr);
32979+ if (!pmd_present(*pmd))
32980+ continue;
32981+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
32982+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32983+ else
32984+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32985+ }
32986+
32987+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
32988+ end = addr + KERNEL_IMAGE_SIZE;
32989+ for (; addr < end; addr += PMD_SIZE) {
32990+ pgd = pgd_offset_k(addr);
32991+ pud = pud_offset(pgd, addr);
32992+ pmd = pmd_offset(pud, addr);
32993+ if (!pmd_present(*pmd))
32994+ continue;
32995+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
32996+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32997+ }
32998+#endif
32999+
33000+ flush_tlb_all();
33001+#endif
33002+
33003 free_init_pages("unused kernel",
33004 (unsigned long)(&__init_begin),
33005 (unsigned long)(&__init_end));
33006diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
33007index e395048..cd38278 100644
33008--- a/arch/x86/mm/init_32.c
33009+++ b/arch/x86/mm/init_32.c
33010@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
33011 bool __read_mostly __vmalloc_start_set = false;
33012
33013 /*
33014- * Creates a middle page table and puts a pointer to it in the
33015- * given global directory entry. This only returns the gd entry
33016- * in non-PAE compilation mode, since the middle layer is folded.
33017- */
33018-static pmd_t * __init one_md_table_init(pgd_t *pgd)
33019-{
33020- pud_t *pud;
33021- pmd_t *pmd_table;
33022-
33023-#ifdef CONFIG_X86_PAE
33024- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
33025- pmd_table = (pmd_t *)alloc_low_page();
33026- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
33027- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
33028- pud = pud_offset(pgd, 0);
33029- BUG_ON(pmd_table != pmd_offset(pud, 0));
33030-
33031- return pmd_table;
33032- }
33033-#endif
33034- pud = pud_offset(pgd, 0);
33035- pmd_table = pmd_offset(pud, 0);
33036-
33037- return pmd_table;
33038-}
33039-
33040-/*
33041 * Create a page table and place a pointer to it in a middle page
33042 * directory entry:
33043 */
33044@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
33045 pte_t *page_table = (pte_t *)alloc_low_page();
33046
33047 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
33048+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
33049+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
33050+#else
33051 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
33052+#endif
33053 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
33054 }
33055
33056 return pte_offset_kernel(pmd, 0);
33057 }
33058
33059+static pmd_t * __init one_md_table_init(pgd_t *pgd)
33060+{
33061+ pud_t *pud;
33062+ pmd_t *pmd_table;
33063+
33064+ pud = pud_offset(pgd, 0);
33065+ pmd_table = pmd_offset(pud, 0);
33066+
33067+ return pmd_table;
33068+}
33069+
33070 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
33071 {
33072 int pgd_idx = pgd_index(vaddr);
33073@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33074 int pgd_idx, pmd_idx;
33075 unsigned long vaddr;
33076 pgd_t *pgd;
33077+ pud_t *pud;
33078 pmd_t *pmd;
33079 pte_t *pte = NULL;
33080 unsigned long count = page_table_range_init_count(start, end);
33081@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33082 pgd = pgd_base + pgd_idx;
33083
33084 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
33085- pmd = one_md_table_init(pgd);
33086- pmd = pmd + pmd_index(vaddr);
33087+ pud = pud_offset(pgd, vaddr);
33088+ pmd = pmd_offset(pud, vaddr);
33089+
33090+#ifdef CONFIG_X86_PAE
33091+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33092+#endif
33093+
33094 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
33095 pmd++, pmd_idx++) {
33096 pte = page_table_kmap_check(one_page_table_init(pmd),
33097@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33098 }
33099 }
33100
33101-static inline int is_kernel_text(unsigned long addr)
33102+static inline int is_kernel_text(unsigned long start, unsigned long end)
33103 {
33104- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
33105- return 1;
33106- return 0;
33107+ if ((start >= ktla_ktva((unsigned long)_etext) ||
33108+ end <= ktla_ktva((unsigned long)_stext)) &&
33109+ (start >= ktla_ktva((unsigned long)_einittext) ||
33110+ end <= ktla_ktva((unsigned long)_sinittext)) &&
33111+
33112+#ifdef CONFIG_ACPI_SLEEP
33113+ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
33114+#endif
33115+
33116+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
33117+ return 0;
33118+ return 1;
33119 }
33120
33121 /*
33122@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
33123 unsigned long last_map_addr = end;
33124 unsigned long start_pfn, end_pfn;
33125 pgd_t *pgd_base = swapper_pg_dir;
33126- int pgd_idx, pmd_idx, pte_ofs;
33127+ unsigned int pgd_idx, pmd_idx, pte_ofs;
33128 unsigned long pfn;
33129 pgd_t *pgd;
33130+ pud_t *pud;
33131 pmd_t *pmd;
33132 pte_t *pte;
33133 unsigned pages_2m, pages_4k;
33134@@ -291,8 +295,13 @@ repeat:
33135 pfn = start_pfn;
33136 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33137 pgd = pgd_base + pgd_idx;
33138- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
33139- pmd = one_md_table_init(pgd);
33140+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
33141+ pud = pud_offset(pgd, 0);
33142+ pmd = pmd_offset(pud, 0);
33143+
33144+#ifdef CONFIG_X86_PAE
33145+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33146+#endif
33147
33148 if (pfn >= end_pfn)
33149 continue;
33150@@ -304,14 +313,13 @@ repeat:
33151 #endif
33152 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
33153 pmd++, pmd_idx++) {
33154- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
33155+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
33156
33157 /*
33158 * Map with big pages if possible, otherwise
33159 * create normal page tables:
33160 */
33161 if (use_pse) {
33162- unsigned int addr2;
33163 pgprot_t prot = PAGE_KERNEL_LARGE;
33164 /*
33165 * first pass will use the same initial
33166@@ -322,11 +330,7 @@ repeat:
33167 _PAGE_PSE);
33168
33169 pfn &= PMD_MASK >> PAGE_SHIFT;
33170- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
33171- PAGE_OFFSET + PAGE_SIZE-1;
33172-
33173- if (is_kernel_text(addr) ||
33174- is_kernel_text(addr2))
33175+ if (is_kernel_text(address, address + PMD_SIZE))
33176 prot = PAGE_KERNEL_LARGE_EXEC;
33177
33178 pages_2m++;
33179@@ -343,7 +347,7 @@ repeat:
33180 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33181 pte += pte_ofs;
33182 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
33183- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
33184+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
33185 pgprot_t prot = PAGE_KERNEL;
33186 /*
33187 * first pass will use the same initial
33188@@ -351,7 +355,7 @@ repeat:
33189 */
33190 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
33191
33192- if (is_kernel_text(addr))
33193+ if (is_kernel_text(address, address + PAGE_SIZE))
33194 prot = PAGE_KERNEL_EXEC;
33195
33196 pages_4k++;
33197@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
33198
33199 pud = pud_offset(pgd, va);
33200 pmd = pmd_offset(pud, va);
33201- if (!pmd_present(*pmd))
33202+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
33203 break;
33204
33205 /* should not be large page here */
33206@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
33207
33208 static void __init pagetable_init(void)
33209 {
33210- pgd_t *pgd_base = swapper_pg_dir;
33211-
33212- permanent_kmaps_init(pgd_base);
33213+ permanent_kmaps_init(swapper_pg_dir);
33214 }
33215
33216-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
33217+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
33218 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33219
33220 /* user-defined highmem size */
33221@@ -787,10 +789,10 @@ void __init mem_init(void)
33222 ((unsigned long)&__init_end -
33223 (unsigned long)&__init_begin) >> 10,
33224
33225- (unsigned long)&_etext, (unsigned long)&_edata,
33226- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
33227+ (unsigned long)&_sdata, (unsigned long)&_edata,
33228+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
33229
33230- (unsigned long)&_text, (unsigned long)&_etext,
33231+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
33232 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
33233
33234 /*
33235@@ -883,6 +885,7 @@ void set_kernel_text_rw(void)
33236 if (!kernel_set_to_readonly)
33237 return;
33238
33239+ start = ktla_ktva(start);
33240 pr_debug("Set kernel text: %lx - %lx for read write\n",
33241 start, start+size);
33242
33243@@ -897,6 +900,7 @@ void set_kernel_text_ro(void)
33244 if (!kernel_set_to_readonly)
33245 return;
33246
33247+ start = ktla_ktva(start);
33248 pr_debug("Set kernel text: %lx - %lx for read only\n",
33249 start, start+size);
33250
33251@@ -925,6 +929,7 @@ void mark_rodata_ro(void)
33252 unsigned long start = PFN_ALIGN(_text);
33253 unsigned long size = PFN_ALIGN(_etext) - start;
33254
33255+ start = ktla_ktva(start);
33256 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
33257 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
33258 size >> 10);
33259diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
33260index df1a992..94c272c 100644
33261--- a/arch/x86/mm/init_64.c
33262+++ b/arch/x86/mm/init_64.c
33263@@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on);
33264 * around without checking the pgd every time.
33265 */
33266
33267-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
33268+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
33269 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33270
33271 int force_personality32;
33272@@ -184,12 +184,29 @@ void sync_global_pgds(unsigned long start, unsigned long end)
33273
33274 for (address = start; address <= end; address += PGDIR_SIZE) {
33275 const pgd_t *pgd_ref = pgd_offset_k(address);
33276+
33277+#ifdef CONFIG_PAX_PER_CPU_PGD
33278+ unsigned long cpu;
33279+#else
33280 struct page *page;
33281+#endif
33282
33283 if (pgd_none(*pgd_ref))
33284 continue;
33285
33286 spin_lock(&pgd_lock);
33287+
33288+#ifdef CONFIG_PAX_PER_CPU_PGD
33289+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33290+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
33291+
33292+ if (pgd_none(*pgd))
33293+ set_pgd(pgd, *pgd_ref);
33294+ else
33295+ BUG_ON(pgd_page_vaddr(*pgd)
33296+ != pgd_page_vaddr(*pgd_ref));
33297+ pgd = pgd_offset_cpu(cpu, kernel, address);
33298+#else
33299 list_for_each_entry(page, &pgd_list, lru) {
33300 pgd_t *pgd;
33301 spinlock_t *pgt_lock;
33302@@ -198,6 +215,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
33303 /* the pgt_lock only for Xen */
33304 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
33305 spin_lock(pgt_lock);
33306+#endif
33307
33308 if (pgd_none(*pgd))
33309 set_pgd(pgd, *pgd_ref);
33310@@ -205,7 +223,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
33311 BUG_ON(pgd_page_vaddr(*pgd)
33312 != pgd_page_vaddr(*pgd_ref));
33313
33314+#ifndef CONFIG_PAX_PER_CPU_PGD
33315 spin_unlock(pgt_lock);
33316+#endif
33317+
33318 }
33319 spin_unlock(&pgd_lock);
33320 }
33321@@ -238,7 +259,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
33322 {
33323 if (pgd_none(*pgd)) {
33324 pud_t *pud = (pud_t *)spp_getpage();
33325- pgd_populate(&init_mm, pgd, pud);
33326+ pgd_populate_kernel(&init_mm, pgd, pud);
33327 if (pud != pud_offset(pgd, 0))
33328 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
33329 pud, pud_offset(pgd, 0));
33330@@ -250,7 +271,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
33331 {
33332 if (pud_none(*pud)) {
33333 pmd_t *pmd = (pmd_t *) spp_getpage();
33334- pud_populate(&init_mm, pud, pmd);
33335+ pud_populate_kernel(&init_mm, pud, pmd);
33336 if (pmd != pmd_offset(pud, 0))
33337 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
33338 pmd, pmd_offset(pud, 0));
33339@@ -279,7 +300,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
33340 pmd = fill_pmd(pud, vaddr);
33341 pte = fill_pte(pmd, vaddr);
33342
33343+ pax_open_kernel();
33344 set_pte(pte, new_pte);
33345+ pax_close_kernel();
33346
33347 /*
33348 * It's enough to flush this one mapping.
33349@@ -338,14 +361,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
33350 pgd = pgd_offset_k((unsigned long)__va(phys));
33351 if (pgd_none(*pgd)) {
33352 pud = (pud_t *) spp_getpage();
33353- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
33354- _PAGE_USER));
33355+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
33356 }
33357 pud = pud_offset(pgd, (unsigned long)__va(phys));
33358 if (pud_none(*pud)) {
33359 pmd = (pmd_t *) spp_getpage();
33360- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
33361- _PAGE_USER));
33362+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
33363 }
33364 pmd = pmd_offset(pud, phys);
33365 BUG_ON(!pmd_none(*pmd));
33366@@ -586,7 +607,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
33367 prot);
33368
33369 spin_lock(&init_mm.page_table_lock);
33370- pud_populate(&init_mm, pud, pmd);
33371+ pud_populate_kernel(&init_mm, pud, pmd);
33372 spin_unlock(&init_mm.page_table_lock);
33373 }
33374 __flush_tlb_all();
33375@@ -627,7 +648,7 @@ kernel_physical_mapping_init(unsigned long start,
33376 page_size_mask);
33377
33378 spin_lock(&init_mm.page_table_lock);
33379- pgd_populate(&init_mm, pgd, pud);
33380+ pgd_populate_kernel(&init_mm, pgd, pud);
33381 spin_unlock(&init_mm.page_table_lock);
33382 pgd_changed = true;
33383 }
33384@@ -1195,8 +1216,8 @@ static struct vm_operations_struct gate_vma_ops = {
33385 static struct vm_area_struct gate_vma = {
33386 .vm_start = VSYSCALL_ADDR,
33387 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
33388- .vm_page_prot = PAGE_READONLY_EXEC,
33389- .vm_flags = VM_READ | VM_EXEC,
33390+ .vm_page_prot = PAGE_READONLY,
33391+ .vm_flags = VM_READ,
33392 .vm_ops = &gate_vma_ops,
33393 };
33394
33395diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
33396index 7b179b4..6bd17777 100644
33397--- a/arch/x86/mm/iomap_32.c
33398+++ b/arch/x86/mm/iomap_32.c
33399@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
33400 type = kmap_atomic_idx_push();
33401 idx = type + KM_TYPE_NR * smp_processor_id();
33402 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33403+
33404+ pax_open_kernel();
33405 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
33406+ pax_close_kernel();
33407+
33408 arch_flush_lazy_mmu_mode();
33409
33410 return (void *)vaddr;
33411diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
33412index baff1da..2816ef4 100644
33413--- a/arch/x86/mm/ioremap.c
33414+++ b/arch/x86/mm/ioremap.c
33415@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
33416 unsigned long i;
33417
33418 for (i = 0; i < nr_pages; ++i)
33419- if (pfn_valid(start_pfn + i) &&
33420- !PageReserved(pfn_to_page(start_pfn + i)))
33421+ if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
33422+ !PageReserved(pfn_to_page(start_pfn + i))))
33423 return 1;
33424
33425 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
33426@@ -268,7 +268,7 @@ EXPORT_SYMBOL(ioremap_prot);
33427 *
33428 * Caller must ensure there is only one unmapping for the same pointer.
33429 */
33430-void iounmap(volatile void __iomem *addr)
33431+void iounmap(const volatile void __iomem *addr)
33432 {
33433 struct vm_struct *p, *o;
33434
33435@@ -322,6 +322,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
33436
33437 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
33438 if (page_is_ram(start >> PAGE_SHIFT))
33439+#ifdef CONFIG_HIGHMEM
33440+ if ((start >> PAGE_SHIFT) < max_low_pfn)
33441+#endif
33442 return __va(phys);
33443
33444 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
33445@@ -334,13 +337,16 @@ void *xlate_dev_mem_ptr(unsigned long phys)
33446 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
33447 {
33448 if (page_is_ram(phys >> PAGE_SHIFT))
33449+#ifdef CONFIG_HIGHMEM
33450+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33451+#endif
33452 return;
33453
33454 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
33455 return;
33456 }
33457
33458-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
33459+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
33460
33461 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
33462 {
33463@@ -376,8 +382,7 @@ void __init early_ioremap_init(void)
33464 early_ioremap_setup();
33465
33466 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
33467- memset(bm_pte, 0, sizeof(bm_pte));
33468- pmd_populate_kernel(&init_mm, pmd, bm_pte);
33469+ pmd_populate_user(&init_mm, pmd, bm_pte);
33470
33471 /*
33472 * The boot-ioremap range spans multiple pmds, for which
33473diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
33474index dd89a13..d77bdcc 100644
33475--- a/arch/x86/mm/kmemcheck/kmemcheck.c
33476+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
33477@@ -628,9 +628,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
33478 * memory (e.g. tracked pages)? For now, we need this to avoid
33479 * invoking kmemcheck for PnP BIOS calls.
33480 */
33481- if (regs->flags & X86_VM_MASK)
33482+ if (v8086_mode(regs))
33483 return false;
33484- if (regs->cs != __KERNEL_CS)
33485+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
33486 return false;
33487
33488 pte = kmemcheck_pte_lookup(address);
33489diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
33490index 25e7e13..1964579 100644
33491--- a/arch/x86/mm/mmap.c
33492+++ b/arch/x86/mm/mmap.c
33493@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
33494 * Leave an at least ~128 MB hole with possible stack randomization.
33495 */
33496 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
33497-#define MAX_GAP (TASK_SIZE/6*5)
33498+#define MAX_GAP (pax_task_size/6*5)
33499
33500 static int mmap_is_legacy(void)
33501 {
33502@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
33503 return rnd << PAGE_SHIFT;
33504 }
33505
33506-static unsigned long mmap_base(void)
33507+static unsigned long mmap_base(struct mm_struct *mm)
33508 {
33509 unsigned long gap = rlimit(RLIMIT_STACK);
33510+ unsigned long pax_task_size = TASK_SIZE;
33511+
33512+#ifdef CONFIG_PAX_SEGMEXEC
33513+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33514+ pax_task_size = SEGMEXEC_TASK_SIZE;
33515+#endif
33516
33517 if (gap < MIN_GAP)
33518 gap = MIN_GAP;
33519 else if (gap > MAX_GAP)
33520 gap = MAX_GAP;
33521
33522- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
33523+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
33524 }
33525
33526 /*
33527 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
33528 * does, but not when emulating X86_32
33529 */
33530-static unsigned long mmap_legacy_base(void)
33531+static unsigned long mmap_legacy_base(struct mm_struct *mm)
33532 {
33533- if (mmap_is_ia32())
33534+ if (mmap_is_ia32()) {
33535+
33536+#ifdef CONFIG_PAX_SEGMEXEC
33537+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33538+ return SEGMEXEC_TASK_UNMAPPED_BASE;
33539+ else
33540+#endif
33541+
33542 return TASK_UNMAPPED_BASE;
33543- else
33544+ } else
33545 return TASK_UNMAPPED_BASE + mmap_rnd();
33546 }
33547
33548@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
33549 */
33550 void arch_pick_mmap_layout(struct mm_struct *mm)
33551 {
33552- mm->mmap_legacy_base = mmap_legacy_base();
33553- mm->mmap_base = mmap_base();
33554+ mm->mmap_legacy_base = mmap_legacy_base(mm);
33555+ mm->mmap_base = mmap_base(mm);
33556+
33557+#ifdef CONFIG_PAX_RANDMMAP
33558+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
33559+ mm->mmap_legacy_base += mm->delta_mmap;
33560+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
33561+ }
33562+#endif
33563
33564 if (mmap_is_legacy()) {
33565 mm->mmap_base = mm->mmap_legacy_base;
33566diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
33567index 0057a7a..95c7edd 100644
33568--- a/arch/x86/mm/mmio-mod.c
33569+++ b/arch/x86/mm/mmio-mod.c
33570@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
33571 break;
33572 default:
33573 {
33574- unsigned char *ip = (unsigned char *)instptr;
33575+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
33576 my_trace->opcode = MMIO_UNKNOWN_OP;
33577 my_trace->width = 0;
33578 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
33579@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
33580 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33581 void __iomem *addr)
33582 {
33583- static atomic_t next_id;
33584+ static atomic_unchecked_t next_id;
33585 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
33586 /* These are page-unaligned. */
33587 struct mmiotrace_map map = {
33588@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33589 .private = trace
33590 },
33591 .phys = offset,
33592- .id = atomic_inc_return(&next_id)
33593+ .id = atomic_inc_return_unchecked(&next_id)
33594 };
33595 map.map_id = trace->id;
33596
33597@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
33598 ioremap_trace_core(offset, size, addr);
33599 }
33600
33601-static void iounmap_trace_core(volatile void __iomem *addr)
33602+static void iounmap_trace_core(const volatile void __iomem *addr)
33603 {
33604 struct mmiotrace_map map = {
33605 .phys = 0,
33606@@ -328,7 +328,7 @@ not_enabled:
33607 }
33608 }
33609
33610-void mmiotrace_iounmap(volatile void __iomem *addr)
33611+void mmiotrace_iounmap(const volatile void __iomem *addr)
33612 {
33613 might_sleep();
33614 if (is_enabled()) /* recheck and proper locking in *_core() */
33615diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
33616index a32b706..efb308b 100644
33617--- a/arch/x86/mm/numa.c
33618+++ b/arch/x86/mm/numa.c
33619@@ -478,7 +478,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
33620 return true;
33621 }
33622
33623-static int __init numa_register_memblks(struct numa_meminfo *mi)
33624+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
33625 {
33626 unsigned long uninitialized_var(pfn_align);
33627 int i, nid;
33628diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
33629index ae242a7..1c7998f 100644
33630--- a/arch/x86/mm/pageattr.c
33631+++ b/arch/x86/mm/pageattr.c
33632@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33633 */
33634 #ifdef CONFIG_PCI_BIOS
33635 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
33636- pgprot_val(forbidden) |= _PAGE_NX;
33637+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33638 #endif
33639
33640 /*
33641@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33642 * Does not cover __inittext since that is gone later on. On
33643 * 64bit we do not enforce !NX on the low mapping
33644 */
33645- if (within(address, (unsigned long)_text, (unsigned long)_etext))
33646- pgprot_val(forbidden) |= _PAGE_NX;
33647+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
33648+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33649
33650+#ifdef CONFIG_DEBUG_RODATA
33651 /*
33652 * The .rodata section needs to be read-only. Using the pfn
33653 * catches all aliases.
33654@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33655 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
33656 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
33657 pgprot_val(forbidden) |= _PAGE_RW;
33658+#endif
33659
33660 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
33661 /*
33662@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33663 }
33664 #endif
33665
33666+#ifdef CONFIG_PAX_KERNEXEC
33667+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
33668+ pgprot_val(forbidden) |= _PAGE_RW;
33669+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33670+ }
33671+#endif
33672+
33673 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
33674
33675 return prot;
33676@@ -420,23 +429,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
33677 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
33678 {
33679 /* change init_mm */
33680+ pax_open_kernel();
33681 set_pte_atomic(kpte, pte);
33682+
33683 #ifdef CONFIG_X86_32
33684 if (!SHARED_KERNEL_PMD) {
33685+
33686+#ifdef CONFIG_PAX_PER_CPU_PGD
33687+ unsigned long cpu;
33688+#else
33689 struct page *page;
33690+#endif
33691
33692+#ifdef CONFIG_PAX_PER_CPU_PGD
33693+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33694+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
33695+#else
33696 list_for_each_entry(page, &pgd_list, lru) {
33697- pgd_t *pgd;
33698+ pgd_t *pgd = (pgd_t *)page_address(page);
33699+#endif
33700+
33701 pud_t *pud;
33702 pmd_t *pmd;
33703
33704- pgd = (pgd_t *)page_address(page) + pgd_index(address);
33705+ pgd += pgd_index(address);
33706 pud = pud_offset(pgd, address);
33707 pmd = pmd_offset(pud, address);
33708 set_pte_atomic((pte_t *)pmd, pte);
33709 }
33710 }
33711 #endif
33712+ pax_close_kernel();
33713 }
33714
33715 static int
33716diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
33717index 6574388..87e9bef 100644
33718--- a/arch/x86/mm/pat.c
33719+++ b/arch/x86/mm/pat.c
33720@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
33721
33722 if (!entry) {
33723 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
33724- current->comm, current->pid, start, end - 1);
33725+ current->comm, task_pid_nr(current), start, end - 1);
33726 return -EINVAL;
33727 }
33728
33729@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33730
33731 while (cursor < to) {
33732 if (!devmem_is_allowed(pfn)) {
33733- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
33734- current->comm, from, to - 1);
33735+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
33736+ current->comm, from, to - 1, cursor);
33737 return 0;
33738 }
33739 cursor += PAGE_SIZE;
33740@@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
33741 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
33742 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
33743 "for [mem %#010Lx-%#010Lx]\n",
33744- current->comm, current->pid,
33745+ current->comm, task_pid_nr(current),
33746 cattr_name(flags),
33747 base, (unsigned long long)(base + size-1));
33748 return -EINVAL;
33749@@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33750 flags = lookup_memtype(paddr);
33751 if (want_flags != flags) {
33752 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
33753- current->comm, current->pid,
33754+ current->comm, task_pid_nr(current),
33755 cattr_name(want_flags),
33756 (unsigned long long)paddr,
33757 (unsigned long long)(paddr + size - 1),
33758@@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33759 free_memtype(paddr, paddr + size);
33760 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
33761 " for [mem %#010Lx-%#010Lx], got %s\n",
33762- current->comm, current->pid,
33763+ current->comm, task_pid_nr(current),
33764 cattr_name(want_flags),
33765 (unsigned long long)paddr,
33766 (unsigned long long)(paddr + size - 1),
33767diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
33768index 415f6c4..d319983 100644
33769--- a/arch/x86/mm/pat_rbtree.c
33770+++ b/arch/x86/mm/pat_rbtree.c
33771@@ -160,7 +160,7 @@ success:
33772
33773 failure:
33774 printk(KERN_INFO "%s:%d conflicting memory types "
33775- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
33776+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
33777 end, cattr_name(found_type), cattr_name(match->type));
33778 return -EBUSY;
33779 }
33780diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
33781index 9f0614d..92ae64a 100644
33782--- a/arch/x86/mm/pf_in.c
33783+++ b/arch/x86/mm/pf_in.c
33784@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
33785 int i;
33786 enum reason_type rv = OTHERS;
33787
33788- p = (unsigned char *)ins_addr;
33789+ p = (unsigned char *)ktla_ktva(ins_addr);
33790 p += skip_prefix(p, &prf);
33791 p += get_opcode(p, &opcode);
33792
33793@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
33794 struct prefix_bits prf;
33795 int i;
33796
33797- p = (unsigned char *)ins_addr;
33798+ p = (unsigned char *)ktla_ktva(ins_addr);
33799 p += skip_prefix(p, &prf);
33800 p += get_opcode(p, &opcode);
33801
33802@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
33803 struct prefix_bits prf;
33804 int i;
33805
33806- p = (unsigned char *)ins_addr;
33807+ p = (unsigned char *)ktla_ktva(ins_addr);
33808 p += skip_prefix(p, &prf);
33809 p += get_opcode(p, &opcode);
33810
33811@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
33812 struct prefix_bits prf;
33813 int i;
33814
33815- p = (unsigned char *)ins_addr;
33816+ p = (unsigned char *)ktla_ktva(ins_addr);
33817 p += skip_prefix(p, &prf);
33818 p += get_opcode(p, &opcode);
33819 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
33820@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
33821 struct prefix_bits prf;
33822 int i;
33823
33824- p = (unsigned char *)ins_addr;
33825+ p = (unsigned char *)ktla_ktva(ins_addr);
33826 p += skip_prefix(p, &prf);
33827 p += get_opcode(p, &opcode);
33828 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
33829diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
33830index 6fb6927..4fc13c0 100644
33831--- a/arch/x86/mm/pgtable.c
33832+++ b/arch/x86/mm/pgtable.c
33833@@ -97,10 +97,71 @@ static inline void pgd_list_del(pgd_t *pgd)
33834 list_del(&page->lru);
33835 }
33836
33837-#define UNSHARED_PTRS_PER_PGD \
33838- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33839+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33840+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
33841
33842+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
33843+{
33844+ unsigned int count = USER_PGD_PTRS;
33845
33846+ if (!pax_user_shadow_base)
33847+ return;
33848+
33849+ while (count--)
33850+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
33851+}
33852+#endif
33853+
33854+#ifdef CONFIG_PAX_PER_CPU_PGD
33855+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
33856+{
33857+ unsigned int count = USER_PGD_PTRS;
33858+
33859+ while (count--) {
33860+ pgd_t pgd;
33861+
33862+#ifdef CONFIG_X86_64
33863+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
33864+#else
33865+ pgd = *src++;
33866+#endif
33867+
33868+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33869+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
33870+#endif
33871+
33872+ *dst++ = pgd;
33873+ }
33874+
33875+}
33876+#endif
33877+
33878+#ifdef CONFIG_X86_64
33879+#define pxd_t pud_t
33880+#define pyd_t pgd_t
33881+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
33882+#define pgtable_pxd_page_ctor(page) true
33883+#define pgtable_pxd_page_dtor(page)
33884+#define pxd_free(mm, pud) pud_free((mm), (pud))
33885+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
33886+#define pyd_offset(mm, address) pgd_offset((mm), (address))
33887+#define PYD_SIZE PGDIR_SIZE
33888+#else
33889+#define pxd_t pmd_t
33890+#define pyd_t pud_t
33891+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
33892+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
33893+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
33894+#define pxd_free(mm, pud) pmd_free((mm), (pud))
33895+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
33896+#define pyd_offset(mm, address) pud_offset((mm), (address))
33897+#define PYD_SIZE PUD_SIZE
33898+#endif
33899+
33900+#ifdef CONFIG_PAX_PER_CPU_PGD
33901+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
33902+static inline void pgd_dtor(pgd_t *pgd) {}
33903+#else
33904 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
33905 {
33906 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
33907@@ -141,6 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
33908 pgd_list_del(pgd);
33909 spin_unlock(&pgd_lock);
33910 }
33911+#endif
33912
33913 /*
33914 * List of all pgd's needed for non-PAE so it can invalidate entries
33915@@ -153,7 +215,7 @@ static void pgd_dtor(pgd_t *pgd)
33916 * -- nyc
33917 */
33918
33919-#ifdef CONFIG_X86_PAE
33920+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
33921 /*
33922 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
33923 * updating the top-level pagetable entries to guarantee the
33924@@ -165,7 +227,7 @@ static void pgd_dtor(pgd_t *pgd)
33925 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
33926 * and initialize the kernel pmds here.
33927 */
33928-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
33929+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33930
33931 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33932 {
33933@@ -183,43 +245,45 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33934 */
33935 flush_tlb_mm(mm);
33936 }
33937+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
33938+#define PREALLOCATED_PXDS USER_PGD_PTRS
33939 #else /* !CONFIG_X86_PAE */
33940
33941 /* No need to prepopulate any pagetable entries in non-PAE modes. */
33942-#define PREALLOCATED_PMDS 0
33943+#define PREALLOCATED_PXDS 0
33944
33945 #endif /* CONFIG_X86_PAE */
33946
33947-static void free_pmds(pmd_t *pmds[])
33948+static void free_pxds(pxd_t *pxds[])
33949 {
33950 int i;
33951
33952- for(i = 0; i < PREALLOCATED_PMDS; i++)
33953- if (pmds[i]) {
33954- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
33955- free_page((unsigned long)pmds[i]);
33956+ for(i = 0; i < PREALLOCATED_PXDS; i++)
33957+ if (pxds[i]) {
33958+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
33959+ free_page((unsigned long)pxds[i]);
33960 }
33961 }
33962
33963-static int preallocate_pmds(pmd_t *pmds[])
33964+static int preallocate_pxds(pxd_t *pxds[])
33965 {
33966 int i;
33967 bool failed = false;
33968
33969- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33970- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
33971- if (!pmd)
33972+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33973+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
33974+ if (!pxd)
33975 failed = true;
33976- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
33977- free_page((unsigned long)pmd);
33978- pmd = NULL;
33979+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
33980+ free_page((unsigned long)pxd);
33981+ pxd = NULL;
33982 failed = true;
33983 }
33984- pmds[i] = pmd;
33985+ pxds[i] = pxd;
33986 }
33987
33988 if (failed) {
33989- free_pmds(pmds);
33990+ free_pxds(pxds);
33991 return -ENOMEM;
33992 }
33993
33994@@ -232,49 +296,52 @@ static int preallocate_pmds(pmd_t *pmds[])
33995 * preallocate which never got a corresponding vma will need to be
33996 * freed manually.
33997 */
33998-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
33999+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
34000 {
34001 int i;
34002
34003- for(i = 0; i < PREALLOCATED_PMDS; i++) {
34004+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
34005 pgd_t pgd = pgdp[i];
34006
34007 if (pgd_val(pgd) != 0) {
34008- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
34009+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
34010
34011- pgdp[i] = native_make_pgd(0);
34012+ set_pgd(pgdp + i, native_make_pgd(0));
34013
34014- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
34015- pmd_free(mm, pmd);
34016+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
34017+ pxd_free(mm, pxd);
34018 }
34019 }
34020 }
34021
34022-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
34023+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
34024 {
34025- pud_t *pud;
34026+ pyd_t *pyd;
34027 int i;
34028
34029- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
34030+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
34031 return;
34032
34033- pud = pud_offset(pgd, 0);
34034-
34035- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
34036- pmd_t *pmd = pmds[i];
34037+#ifdef CONFIG_X86_64
34038+ pyd = pyd_offset(mm, 0L);
34039+#else
34040+ pyd = pyd_offset(pgd, 0L);
34041+#endif
34042
34043+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
34044+ pxd_t *pxd = pxds[i];
34045 if (i >= KERNEL_PGD_BOUNDARY)
34046- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
34047- sizeof(pmd_t) * PTRS_PER_PMD);
34048+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
34049+ sizeof(pxd_t) * PTRS_PER_PMD);
34050
34051- pud_populate(mm, pud, pmd);
34052+ pyd_populate(mm, pyd, pxd);
34053 }
34054 }
34055
34056 pgd_t *pgd_alloc(struct mm_struct *mm)
34057 {
34058 pgd_t *pgd;
34059- pmd_t *pmds[PREALLOCATED_PMDS];
34060+ pxd_t *pxds[PREALLOCATED_PXDS];
34061
34062 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
34063
34064@@ -283,11 +350,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34065
34066 mm->pgd = pgd;
34067
34068- if (preallocate_pmds(pmds) != 0)
34069+ if (preallocate_pxds(pxds) != 0)
34070 goto out_free_pgd;
34071
34072 if (paravirt_pgd_alloc(mm) != 0)
34073- goto out_free_pmds;
34074+ goto out_free_pxds;
34075
34076 /*
34077 * Make sure that pre-populating the pmds is atomic with
34078@@ -297,14 +364,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34079 spin_lock(&pgd_lock);
34080
34081 pgd_ctor(mm, pgd);
34082- pgd_prepopulate_pmd(mm, pgd, pmds);
34083+ pgd_prepopulate_pxd(mm, pgd, pxds);
34084
34085 spin_unlock(&pgd_lock);
34086
34087 return pgd;
34088
34089-out_free_pmds:
34090- free_pmds(pmds);
34091+out_free_pxds:
34092+ free_pxds(pxds);
34093 out_free_pgd:
34094 free_page((unsigned long)pgd);
34095 out:
34096@@ -313,7 +380,7 @@ out:
34097
34098 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
34099 {
34100- pgd_mop_up_pmds(mm, pgd);
34101+ pgd_mop_up_pxds(mm, pgd);
34102 pgd_dtor(pgd);
34103 paravirt_pgd_free(mm, pgd);
34104 free_page((unsigned long)pgd);
34105diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
34106index 4dd8cf6..f9d143e 100644
34107--- a/arch/x86/mm/pgtable_32.c
34108+++ b/arch/x86/mm/pgtable_32.c
34109@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
34110 return;
34111 }
34112 pte = pte_offset_kernel(pmd, vaddr);
34113+
34114+ pax_open_kernel();
34115 if (pte_val(pteval))
34116 set_pte_at(&init_mm, vaddr, pte, pteval);
34117 else
34118 pte_clear(&init_mm, vaddr, pte);
34119+ pax_close_kernel();
34120
34121 /*
34122 * It's enough to flush this one mapping.
34123diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
34124index e666cbb..61788c45 100644
34125--- a/arch/x86/mm/physaddr.c
34126+++ b/arch/x86/mm/physaddr.c
34127@@ -10,7 +10,7 @@
34128 #ifdef CONFIG_X86_64
34129
34130 #ifdef CONFIG_DEBUG_VIRTUAL
34131-unsigned long __phys_addr(unsigned long x)
34132+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34133 {
34134 unsigned long y = x - __START_KERNEL_map;
34135
34136@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
34137 #else
34138
34139 #ifdef CONFIG_DEBUG_VIRTUAL
34140-unsigned long __phys_addr(unsigned long x)
34141+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34142 {
34143 unsigned long phys_addr = x - PAGE_OFFSET;
34144 /* VMALLOC_* aren't constants */
34145diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
34146index 90555bf..f5f1828 100644
34147--- a/arch/x86/mm/setup_nx.c
34148+++ b/arch/x86/mm/setup_nx.c
34149@@ -5,8 +5,10 @@
34150 #include <asm/pgtable.h>
34151 #include <asm/proto.h>
34152
34153+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34154 static int disable_nx;
34155
34156+#ifndef CONFIG_PAX_PAGEEXEC
34157 /*
34158 * noexec = on|off
34159 *
34160@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
34161 return 0;
34162 }
34163 early_param("noexec", noexec_setup);
34164+#endif
34165+
34166+#endif
34167
34168 void x86_configure_nx(void)
34169 {
34170+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34171 if (cpu_has_nx && !disable_nx)
34172 __supported_pte_mask |= _PAGE_NX;
34173 else
34174+#endif
34175 __supported_pte_mask &= ~_PAGE_NX;
34176 }
34177
34178diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
34179index dd8dda1..9e9b0f6 100644
34180--- a/arch/x86/mm/tlb.c
34181+++ b/arch/x86/mm/tlb.c
34182@@ -48,7 +48,11 @@ void leave_mm(int cpu)
34183 BUG();
34184 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
34185 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
34186+
34187+#ifndef CONFIG_PAX_PER_CPU_PGD
34188 load_cr3(swapper_pg_dir);
34189+#endif
34190+
34191 }
34192 }
34193 EXPORT_SYMBOL_GPL(leave_mm);
34194diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
34195new file mode 100644
34196index 0000000..dace51c
34197--- /dev/null
34198+++ b/arch/x86/mm/uderef_64.c
34199@@ -0,0 +1,37 @@
34200+#include <linux/mm.h>
34201+#include <asm/pgtable.h>
34202+#include <asm/uaccess.h>
34203+
34204+#ifdef CONFIG_PAX_MEMORY_UDEREF
34205+/* PaX: due to the special call convention these functions must
34206+ * - remain leaf functions under all configurations,
34207+ * - never be called directly, only dereferenced from the wrappers.
34208+ */
34209+void __pax_open_userland(void)
34210+{
34211+ unsigned int cpu;
34212+
34213+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34214+ return;
34215+
34216+ cpu = raw_get_cpu();
34217+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
34218+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
34219+ raw_put_cpu_no_resched();
34220+}
34221+EXPORT_SYMBOL(__pax_open_userland);
34222+
34223+void __pax_close_userland(void)
34224+{
34225+ unsigned int cpu;
34226+
34227+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34228+ return;
34229+
34230+ cpu = raw_get_cpu();
34231+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
34232+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
34233+ raw_put_cpu_no_resched();
34234+}
34235+EXPORT_SYMBOL(__pax_close_userland);
34236+#endif
34237diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
34238index 6440221..f746de8 100644
34239--- a/arch/x86/net/bpf_jit.S
34240+++ b/arch/x86/net/bpf_jit.S
34241@@ -9,19 +9,17 @@
34242 */
34243 #include <linux/linkage.h>
34244 #include <asm/dwarf2.h>
34245+#include <asm/alternative-asm.h>
34246
34247 /*
34248 * Calling convention :
34249- * rbx : skb pointer (callee saved)
34250+ * rdi : skb pointer
34251 * esi : offset of byte(s) to fetch in skb (can be scratched)
34252- * r10 : copy of skb->data
34253+ * r8 : copy of skb->data
34254 * r9d : hlen = skb->len - skb->data_len
34255 */
34256-#define SKBDATA %r10
34257+#define SKBDATA %r8
34258 #define SKF_MAX_NEG_OFF $(-0x200000) /* SKF_LL_OFF from filter.h */
34259-#define MAX_BPF_STACK (512 /* from filter.h */ + \
34260- 32 /* space for rbx,r13,r14,r15 */ + \
34261- 8 /* space for skb_copy_bits */)
34262
34263 sk_load_word:
34264 .globl sk_load_word
34265@@ -38,6 +36,7 @@ sk_load_word_positive_offset:
34266 jle bpf_slow_path_word
34267 mov (SKBDATA,%rsi),%eax
34268 bswap %eax /* ntohl() */
34269+ pax_force_retaddr
34270 ret
34271
34272 sk_load_half:
34273@@ -55,6 +54,7 @@ sk_load_half_positive_offset:
34274 jle bpf_slow_path_half
34275 movzwl (SKBDATA,%rsi),%eax
34276 rol $8,%ax # ntohs()
34277+ pax_force_retaddr
34278 ret
34279
34280 sk_load_byte:
34281@@ -69,45 +69,83 @@ sk_load_byte_positive_offset:
34282 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
34283 jle bpf_slow_path_byte
34284 movzbl (SKBDATA,%rsi),%eax
34285+ pax_force_retaddr
34286+ ret
34287+
34288+/**
34289+ * sk_load_byte_msh - BPF_S_LDX_B_MSH helper
34290+ *
34291+ * Implements BPF_S_LDX_B_MSH : ldxb 4*([offset]&0xf)
34292+ * Must preserve A accumulator (%eax)
34293+ * Inputs : %esi is the offset value
34294+ */
34295+sk_load_byte_msh:
34296+ .globl sk_load_byte_msh
34297+ test %esi,%esi
34298+ js bpf_slow_path_byte_msh_neg
34299+
34300+sk_load_byte_msh_positive_offset:
34301+ .globl sk_load_byte_msh_positive_offset
34302+ cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte_msh */
34303+ jle bpf_slow_path_byte_msh
34304+ movzbl (SKBDATA,%rsi),%ebx
34305+ and $15,%bl
34306+ shl $2,%bl
34307+ pax_force_retaddr
34308 ret
34309
34310 /* rsi contains offset and can be scratched */
34311 #define bpf_slow_path_common(LEN) \
34312- mov %rbx, %rdi; /* arg1 == skb */ \
34313+ push %rdi; /* save skb */ \
34314 push %r9; \
34315 push SKBDATA; \
34316 /* rsi already has offset */ \
34317 mov $LEN,%ecx; /* len */ \
34318- lea - MAX_BPF_STACK + 32(%rbp),%rdx; \
34319+ lea -12(%rbp),%rdx; \
34320 call skb_copy_bits; \
34321 test %eax,%eax; \
34322 pop SKBDATA; \
34323- pop %r9;
34324+ pop %r9; \
34325+ pop %rdi
34326
34327
34328 bpf_slow_path_word:
34329 bpf_slow_path_common(4)
34330 js bpf_error
34331- mov - MAX_BPF_STACK + 32(%rbp),%eax
34332+ mov -12(%rbp),%eax
34333 bswap %eax
34334+ pax_force_retaddr
34335 ret
34336
34337 bpf_slow_path_half:
34338 bpf_slow_path_common(2)
34339 js bpf_error
34340- mov - MAX_BPF_STACK + 32(%rbp),%ax
34341+ mov -12(%rbp),%ax
34342 rol $8,%ax
34343 movzwl %ax,%eax
34344+ pax_force_retaddr
34345 ret
34346
34347 bpf_slow_path_byte:
34348 bpf_slow_path_common(1)
34349 js bpf_error
34350- movzbl - MAX_BPF_STACK + 32(%rbp),%eax
34351+ movzbl -12(%rbp),%eax
34352+ pax_force_retaddr
34353+ ret
34354+
34355+bpf_slow_path_byte_msh:
34356+ xchg %eax,%ebx /* dont lose A , X is about to be scratched */
34357+ bpf_slow_path_common(1)
34358+ js bpf_error
34359+ movzbl -12(%rbp),%eax
34360+ and $15,%al
34361+ shl $2,%al
34362+ xchg %eax,%ebx
34363+ pax_force_retaddr
34364 ret
34365
34366 #define sk_negative_common(SIZE) \
34367- mov %rbx, %rdi; /* arg1 == skb */ \
34368+ push %rdi; /* save skb */ \
34369 push %r9; \
34370 push SKBDATA; \
34371 /* rsi already has offset */ \
34372@@ -116,8 +154,10 @@ bpf_slow_path_byte:
34373 test %rax,%rax; \
34374 pop SKBDATA; \
34375 pop %r9; \
34376+ pop %rdi; \
34377 jz bpf_error
34378
34379+
34380 bpf_slow_path_word_neg:
34381 cmp SKF_MAX_NEG_OFF, %esi /* test range */
34382 jl bpf_error /* offset lower -> error */
34383@@ -126,6 +166,7 @@ sk_load_word_negative_offset:
34384 sk_negative_common(4)
34385 mov (%rax), %eax
34386 bswap %eax
34387+ pax_force_retaddr
34388 ret
34389
34390 bpf_slow_path_half_neg:
34391@@ -137,6 +178,7 @@ sk_load_half_negative_offset:
34392 mov (%rax),%ax
34393 rol $8,%ax
34394 movzwl %ax,%eax
34395+ pax_force_retaddr
34396 ret
34397
34398 bpf_slow_path_byte_neg:
34399@@ -146,14 +188,27 @@ sk_load_byte_negative_offset:
34400 .globl sk_load_byte_negative_offset
34401 sk_negative_common(1)
34402 movzbl (%rax), %eax
34403+ pax_force_retaddr
34404+ ret
34405+
34406+bpf_slow_path_byte_msh_neg:
34407+ cmp SKF_MAX_NEG_OFF, %esi
34408+ jl bpf_error
34409+sk_load_byte_msh_negative_offset:
34410+ .globl sk_load_byte_msh_negative_offset
34411+ xchg %eax,%ebx /* dont lose A , X is about to be scratched */
34412+ sk_negative_common(1)
34413+ movzbl (%rax),%eax
34414+ and $15,%al
34415+ shl $2,%al
34416+ xchg %eax,%ebx
34417+ pax_force_retaddr
34418 ret
34419
34420 bpf_error:
34421 # force a return 0 from jit handler
34422- xor %eax,%eax
34423- mov - MAX_BPF_STACK(%rbp),%rbx
34424- mov - MAX_BPF_STACK + 8(%rbp),%r13
34425- mov - MAX_BPF_STACK + 16(%rbp),%r14
34426- mov - MAX_BPF_STACK + 24(%rbp),%r15
34427+ xor %eax,%eax
34428+ mov -8(%rbp),%rbx
34429 leaveq
34430+ pax_force_retaddr
34431 ret
34432diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
34433index 99bef86..bdfb5c6 100644
34434--- a/arch/x86/net/bpf_jit_comp.c
34435+++ b/arch/x86/net/bpf_jit_comp.c
34436@@ -1,7 +1,6 @@
34437 /* bpf_jit_comp.c : BPF JIT compiler
34438 *
34439 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
34440- * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
34441 *
34442 * This program is free software; you can redistribute it and/or
34443 * modify it under the terms of the GNU General Public License
34444@@ -15,16 +14,28 @@
34445 #include <linux/if_vlan.h>
34446 #include <linux/random.h>
34447
34448+/*
34449+ * Conventions :
34450+ * EAX : BPF A accumulator
34451+ * EBX : BPF X accumulator
34452+ * RDI : pointer to skb (first argument given to JIT function)
34453+ * RBP : frame pointer (even if CONFIG_FRAME_POINTER=n)
34454+ * ECX,EDX,ESI : scratch registers
34455+ * r9d : skb->len - skb->data_len (headlen)
34456+ * r8 : skb->data
34457+ * -8(RBP) : saved RBX value
34458+ * -16(RBP)..-80(RBP) : BPF_MEMWORDS values
34459+ */
34460 int bpf_jit_enable __read_mostly;
34461
34462 /*
34463 * assembly code in arch/x86/net/bpf_jit.S
34464 */
34465-extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
34466+extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
34467 extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
34468-extern u8 sk_load_byte_positive_offset[];
34469+extern u8 sk_load_byte_positive_offset[], sk_load_byte_msh_positive_offset[];
34470 extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
34471-extern u8 sk_load_byte_negative_offset[];
34472+extern u8 sk_load_byte_negative_offset[], sk_load_byte_msh_negative_offset[];
34473
34474 static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
34475 {
34476@@ -39,50 +50,113 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
34477 return ptr + len;
34478 }
34479
34480+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
34481+#define MAX_INSTR_CODE_SIZE 96
34482+#else
34483+#define MAX_INSTR_CODE_SIZE 64
34484+#endif
34485+
34486 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
34487
34488 #define EMIT1(b1) EMIT(b1, 1)
34489 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
34490 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
34491 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
34492-#define EMIT1_off32(b1, off) \
34493- do {EMIT1(b1); EMIT(off, 4); } while (0)
34494-#define EMIT2_off32(b1, b2, off) \
34495- do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
34496-#define EMIT3_off32(b1, b2, b3, off) \
34497- do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
34498-#define EMIT4_off32(b1, b2, b3, b4, off) \
34499- do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
34500+
34501+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
34502+/* original constant will appear in ecx */
34503+#define DILUTE_CONST_SEQUENCE(_off, _key) \
34504+do { \
34505+ /* mov ecx, randkey */ \
34506+ EMIT1(0xb9); \
34507+ EMIT(_key, 4); \
34508+ /* xor ecx, randkey ^ off */ \
34509+ EMIT2(0x81, 0xf1); \
34510+ EMIT((_key) ^ (_off), 4); \
34511+} while (0)
34512+
34513+#define EMIT1_off32(b1, _off) \
34514+do { \
34515+ switch (b1) { \
34516+ case 0x05: /* add eax, imm32 */ \
34517+ case 0x2d: /* sub eax, imm32 */ \
34518+ case 0x25: /* and eax, imm32 */ \
34519+ case 0x0d: /* or eax, imm32 */ \
34520+ case 0xb8: /* mov eax, imm32 */ \
34521+ case 0x35: /* xor eax, imm32 */ \
34522+ case 0x3d: /* cmp eax, imm32 */ \
34523+ case 0xa9: /* test eax, imm32 */ \
34524+ DILUTE_CONST_SEQUENCE(_off, randkey); \
34525+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
34526+ break; \
34527+ case 0xbb: /* mov ebx, imm32 */ \
34528+ DILUTE_CONST_SEQUENCE(_off, randkey); \
34529+ /* mov ebx, ecx */ \
34530+ EMIT2(0x89, 0xcb); \
34531+ break; \
34532+ case 0xbe: /* mov esi, imm32 */ \
34533+ DILUTE_CONST_SEQUENCE(_off, randkey); \
34534+ /* mov esi, ecx */ \
34535+ EMIT2(0x89, 0xce); \
34536+ break; \
34537+ case 0xe8: /* call rel imm32, always to known funcs */ \
34538+ EMIT1(b1); \
34539+ EMIT(_off, 4); \
34540+ break; \
34541+ case 0xe9: /* jmp rel imm32 */ \
34542+ EMIT1(b1); \
34543+ EMIT(_off, 4); \
34544+ /* prevent fall-through, we're not called if off = 0 */ \
34545+ EMIT(0xcccccccc, 4); \
34546+ EMIT(0xcccccccc, 4); \
34547+ break; \
34548+ default: \
34549+ BUILD_BUG(); \
34550+ } \
34551+} while (0)
34552+
34553+#define EMIT2_off32(b1, b2, _off) \
34554+do { \
34555+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
34556+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
34557+ EMIT(randkey, 4); \
34558+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
34559+ EMIT((_off) - randkey, 4); \
34560+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
34561+ DILUTE_CONST_SEQUENCE(_off, randkey); \
34562+ /* imul eax, ecx */ \
34563+ EMIT3(0x0f, 0xaf, 0xc1); \
34564+ } else { \
34565+ BUILD_BUG(); \
34566+ } \
34567+} while (0)
34568+#else
34569+#define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
34570+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
34571+#endif
34572+
34573+#define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
34574+#define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
34575
34576 static inline bool is_imm8(int value)
34577 {
34578 return value <= 127 && value >= -128;
34579 }
34580
34581-static inline bool is_simm32(s64 value)
34582+static inline bool is_near(int offset)
34583 {
34584- return value == (s64) (s32) value;
34585+ return offset <= 127 && offset >= -128;
34586 }
34587
34588-/* mov dst, src */
34589-#define EMIT_mov(DST, SRC) \
34590- do {if (DST != SRC) \
34591- EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
34592- } while (0)
34593-
34594-static int bpf_size_to_x86_bytes(int bpf_size)
34595-{
34596- if (bpf_size == BPF_W)
34597- return 4;
34598- else if (bpf_size == BPF_H)
34599- return 2;
34600- else if (bpf_size == BPF_B)
34601- return 1;
34602- else if (bpf_size == BPF_DW)
34603- return 4; /* imm32 */
34604- else
34605- return 0;
34606-}
34607+#define EMIT_JMP(offset) \
34608+do { \
34609+ if (offset) { \
34610+ if (is_near(offset)) \
34611+ EMIT2(0xeb, offset); /* jmp .+off8 */ \
34612+ else \
34613+ EMIT1_off32(0xe9, offset); /* jmp .+off32 */ \
34614+ } \
34615+} while (0)
34616
34617 /* list of x86 cond jumps opcodes (. + s8)
34618 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
34619@@ -93,8 +167,46 @@ static int bpf_size_to_x86_bytes(int bpf_size)
34620 #define X86_JNE 0x75
34621 #define X86_JBE 0x76
34622 #define X86_JA 0x77
34623-#define X86_JGE 0x7D
34624-#define X86_JG 0x7F
34625+
34626+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
34627+#define APPEND_FLOW_VERIFY() \
34628+do { \
34629+ /* mov ecx, randkey */ \
34630+ EMIT1(0xb9); \
34631+ EMIT(randkey, 4); \
34632+ /* cmp ecx, randkey */ \
34633+ EMIT2(0x81, 0xf9); \
34634+ EMIT(randkey, 4); \
34635+ /* jz after 8 int 3s */ \
34636+ EMIT2(0x74, 0x08); \
34637+ EMIT(0xcccccccc, 4); \
34638+ EMIT(0xcccccccc, 4); \
34639+} while (0)
34640+#else
34641+#define APPEND_FLOW_VERIFY() do { } while (0)
34642+#endif
34643+
34644+#define EMIT_COND_JMP(op, offset) \
34645+do { \
34646+ if (is_near(offset)) \
34647+ EMIT2(op, offset); /* jxx .+off8 */ \
34648+ else { \
34649+ EMIT2(0x0f, op + 0x10); \
34650+ EMIT(offset, 4); /* jxx .+off32 */ \
34651+ APPEND_FLOW_VERIFY(); \
34652+ } \
34653+} while (0)
34654+
34655+#define COND_SEL(CODE, TOP, FOP) \
34656+ case CODE: \
34657+ t_op = TOP; \
34658+ f_op = FOP; \
34659+ goto cond_branch
34660+
34661+
34662+#define SEEN_DATAREF 1 /* might call external helpers */
34663+#define SEEN_XREG 2 /* ebx is used */
34664+#define SEEN_MEM 4 /* use mem[] for temporary storage */
34665
34666 static inline void bpf_flush_icache(void *start, void *end)
34667 {
34668@@ -109,804 +221,646 @@ static inline void bpf_flush_icache(void *start, void *end)
34669 #define CHOOSE_LOAD_FUNC(K, func) \
34670 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
34671
34672-struct bpf_binary_header {
34673- unsigned int pages;
34674- /* Note : for security reasons, bpf code will follow a randomly
34675- * sized amount of int3 instructions
34676- */
34677- u8 image[];
34678-};
34679+/* Helper to find the offset of pkt_type in sk_buff
34680+ * We want to make sure its still a 3bit field starting at a byte boundary.
34681+ */
34682+#define PKT_TYPE_MAX 7
34683+static int pkt_type_offset(void)
34684+{
34685+ struct sk_buff skb_probe = {
34686+ .pkt_type = ~0,
34687+ };
34688+ char *ct = (char *)&skb_probe;
34689+ unsigned int off;
34690
34691-static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
34692+ for (off = 0; off < sizeof(struct sk_buff); off++) {
34693+ if (ct[off] == PKT_TYPE_MAX)
34694+ return off;
34695+ }
34696+ pr_err_once("Please fix pkt_type_offset(), as pkt_type couldn't be found\n");
34697+ return -1;
34698+}
34699+
34700+/* Note : for security reasons, bpf code will follow a randomly
34701+ * sized amount of int3 instructions
34702+ */
34703+static u8 *bpf_alloc_binary(unsigned int proglen,
34704 u8 **image_ptr)
34705 {
34706 unsigned int sz, hole;
34707- struct bpf_binary_header *header;
34708+ u8 *header;
34709
34710 /* Most of BPF filters are really small,
34711 * but if some of them fill a page, allow at least
34712 * 128 extra bytes to insert a random section of int3
34713 */
34714- sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
34715- header = module_alloc(sz);
34716+ sz = round_up(proglen + 128, PAGE_SIZE);
34717+ header = module_alloc_exec(sz);
34718 if (!header)
34719 return NULL;
34720
34721+ pax_open_kernel();
34722 memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
34723+ pax_close_kernel();
34724
34725- header->pages = sz / PAGE_SIZE;
34726- hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
34727+ hole = PAGE_SIZE - (proglen & ~PAGE_MASK);
34728
34729 /* insert a random number of int3 instructions before BPF code */
34730- *image_ptr = &header->image[prandom_u32() % hole];
34731+ *image_ptr = &header[prandom_u32() % hole];
34732 return header;
34733 }
34734
34735-/* pick a register outside of BPF range for JIT internal work */
34736-#define AUX_REG (MAX_BPF_REG + 1)
34737-
34738-/* the following table maps BPF registers to x64 registers.
34739- * x64 register r12 is unused, since if used as base address register
34740- * in load/store instructions, it always needs an extra byte of encoding
34741- */
34742-static const int reg2hex[] = {
34743- [BPF_REG_0] = 0, /* rax */
34744- [BPF_REG_1] = 7, /* rdi */
34745- [BPF_REG_2] = 6, /* rsi */
34746- [BPF_REG_3] = 2, /* rdx */
34747- [BPF_REG_4] = 1, /* rcx */
34748- [BPF_REG_5] = 0, /* r8 */
34749- [BPF_REG_6] = 3, /* rbx callee saved */
34750- [BPF_REG_7] = 5, /* r13 callee saved */
34751- [BPF_REG_8] = 6, /* r14 callee saved */
34752- [BPF_REG_9] = 7, /* r15 callee saved */
34753- [BPF_REG_FP] = 5, /* rbp readonly */
34754- [AUX_REG] = 3, /* r11 temp register */
34755-};
34756-
34757-/* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
34758- * which need extra byte of encoding.
34759- * rax,rcx,...,rbp have simpler encoding
34760- */
34761-static inline bool is_ereg(u32 reg)
34762-{
34763- if (reg == BPF_REG_5 || reg == AUX_REG ||
34764- (reg >= BPF_REG_7 && reg <= BPF_REG_9))
34765- return true;
34766- else
34767- return false;
34768-}
34769-
34770-/* add modifiers if 'reg' maps to x64 registers r8..r15 */
34771-static inline u8 add_1mod(u8 byte, u32 reg)
34772-{
34773- if (is_ereg(reg))
34774- byte |= 1;
34775- return byte;
34776-}
34777-
34778-static inline u8 add_2mod(u8 byte, u32 r1, u32 r2)
34779-{
34780- if (is_ereg(r1))
34781- byte |= 1;
34782- if (is_ereg(r2))
34783- byte |= 4;
34784- return byte;
34785-}
34786-
34787-/* encode 'dst_reg' register into x64 opcode 'byte' */
34788-static inline u8 add_1reg(u8 byte, u32 dst_reg)
34789-{
34790- return byte + reg2hex[dst_reg];
34791-}
34792-
34793-/* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
34794-static inline u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
34795-{
34796- return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
34797-}
34798-
34799-struct jit_context {
34800- unsigned int cleanup_addr; /* epilogue code offset */
34801- bool seen_ld_abs;
34802-};
34803-
34804-static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
34805- int oldproglen, struct jit_context *ctx)
34806-{
34807- struct sock_filter_int *insn = bpf_prog->insnsi;
34808- int insn_cnt = bpf_prog->len;
34809- u8 temp[64];
34810- int i;
34811- int proglen = 0;
34812- u8 *prog = temp;
34813- int stacksize = MAX_BPF_STACK +
34814- 32 /* space for rbx, r13, r14, r15 */ +
34815- 8 /* space for skb_copy_bits() buffer */;
34816-
34817- EMIT1(0x55); /* push rbp */
34818- EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
34819-
34820- /* sub rsp, stacksize */
34821- EMIT3_off32(0x48, 0x81, 0xEC, stacksize);
34822-
34823- /* all classic BPF filters use R6(rbx) save it */
34824-
34825- /* mov qword ptr [rbp-X],rbx */
34826- EMIT3_off32(0x48, 0x89, 0x9D, -stacksize);
34827-
34828- /* sk_convert_filter() maps classic BPF register X to R7 and uses R8
34829- * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
34830- * R8(r14). R9(r15) spill could be made conditional, but there is only
34831- * one 'bpf_error' return path out of helper functions inside bpf_jit.S
34832- * The overhead of extra spill is negligible for any filter other
34833- * than synthetic ones. Therefore not worth adding complexity.
34834- */
34835-
34836- /* mov qword ptr [rbp-X],r13 */
34837- EMIT3_off32(0x4C, 0x89, 0xAD, -stacksize + 8);
34838- /* mov qword ptr [rbp-X],r14 */
34839- EMIT3_off32(0x4C, 0x89, 0xB5, -stacksize + 16);
34840- /* mov qword ptr [rbp-X],r15 */
34841- EMIT3_off32(0x4C, 0x89, 0xBD, -stacksize + 24);
34842-
34843- /* clear A and X registers */
34844- EMIT2(0x31, 0xc0); /* xor eax, eax */
34845- EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
34846-
34847- if (ctx->seen_ld_abs) {
34848- /* r9d : skb->len - skb->data_len (headlen)
34849- * r10 : skb->data
34850- */
34851- if (is_imm8(offsetof(struct sk_buff, len)))
34852- /* mov %r9d, off8(%rdi) */
34853- EMIT4(0x44, 0x8b, 0x4f,
34854- offsetof(struct sk_buff, len));
34855- else
34856- /* mov %r9d, off32(%rdi) */
34857- EMIT3_off32(0x44, 0x8b, 0x8f,
34858- offsetof(struct sk_buff, len));
34859-
34860- if (is_imm8(offsetof(struct sk_buff, data_len)))
34861- /* sub %r9d, off8(%rdi) */
34862- EMIT4(0x44, 0x2b, 0x4f,
34863- offsetof(struct sk_buff, data_len));
34864- else
34865- EMIT3_off32(0x44, 0x2b, 0x8f,
34866- offsetof(struct sk_buff, data_len));
34867-
34868- if (is_imm8(offsetof(struct sk_buff, data)))
34869- /* mov %r10, off8(%rdi) */
34870- EMIT4(0x4c, 0x8b, 0x57,
34871- offsetof(struct sk_buff, data));
34872- else
34873- /* mov %r10, off32(%rdi) */
34874- EMIT3_off32(0x4c, 0x8b, 0x97,
34875- offsetof(struct sk_buff, data));
34876- }
34877-
34878- for (i = 0; i < insn_cnt; i++, insn++) {
34879- const s32 imm32 = insn->imm;
34880- u32 dst_reg = insn->dst_reg;
34881- u32 src_reg = insn->src_reg;
34882- u8 b1 = 0, b2 = 0, b3 = 0;
34883- s64 jmp_offset;
34884- u8 jmp_cond;
34885- int ilen;
34886- u8 *func;
34887-
34888- switch (insn->code) {
34889- /* ALU */
34890- case BPF_ALU | BPF_ADD | BPF_X:
34891- case BPF_ALU | BPF_SUB | BPF_X:
34892- case BPF_ALU | BPF_AND | BPF_X:
34893- case BPF_ALU | BPF_OR | BPF_X:
34894- case BPF_ALU | BPF_XOR | BPF_X:
34895- case BPF_ALU64 | BPF_ADD | BPF_X:
34896- case BPF_ALU64 | BPF_SUB | BPF_X:
34897- case BPF_ALU64 | BPF_AND | BPF_X:
34898- case BPF_ALU64 | BPF_OR | BPF_X:
34899- case BPF_ALU64 | BPF_XOR | BPF_X:
34900- switch (BPF_OP(insn->code)) {
34901- case BPF_ADD: b2 = 0x01; break;
34902- case BPF_SUB: b2 = 0x29; break;
34903- case BPF_AND: b2 = 0x21; break;
34904- case BPF_OR: b2 = 0x09; break;
34905- case BPF_XOR: b2 = 0x31; break;
34906- }
34907- if (BPF_CLASS(insn->code) == BPF_ALU64)
34908- EMIT1(add_2mod(0x48, dst_reg, src_reg));
34909- else if (is_ereg(dst_reg) || is_ereg(src_reg))
34910- EMIT1(add_2mod(0x40, dst_reg, src_reg));
34911- EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
34912- break;
34913-
34914- /* mov dst, src */
34915- case BPF_ALU64 | BPF_MOV | BPF_X:
34916- EMIT_mov(dst_reg, src_reg);
34917- break;
34918-
34919- /* mov32 dst, src */
34920- case BPF_ALU | BPF_MOV | BPF_X:
34921- if (is_ereg(dst_reg) || is_ereg(src_reg))
34922- EMIT1(add_2mod(0x40, dst_reg, src_reg));
34923- EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
34924- break;
34925-
34926- /* neg dst */
34927- case BPF_ALU | BPF_NEG:
34928- case BPF_ALU64 | BPF_NEG:
34929- if (BPF_CLASS(insn->code) == BPF_ALU64)
34930- EMIT1(add_1mod(0x48, dst_reg));
34931- else if (is_ereg(dst_reg))
34932- EMIT1(add_1mod(0x40, dst_reg));
34933- EMIT2(0xF7, add_1reg(0xD8, dst_reg));
34934- break;
34935-
34936- case BPF_ALU | BPF_ADD | BPF_K:
34937- case BPF_ALU | BPF_SUB | BPF_K:
34938- case BPF_ALU | BPF_AND | BPF_K:
34939- case BPF_ALU | BPF_OR | BPF_K:
34940- case BPF_ALU | BPF_XOR | BPF_K:
34941- case BPF_ALU64 | BPF_ADD | BPF_K:
34942- case BPF_ALU64 | BPF_SUB | BPF_K:
34943- case BPF_ALU64 | BPF_AND | BPF_K:
34944- case BPF_ALU64 | BPF_OR | BPF_K:
34945- case BPF_ALU64 | BPF_XOR | BPF_K:
34946- if (BPF_CLASS(insn->code) == BPF_ALU64)
34947- EMIT1(add_1mod(0x48, dst_reg));
34948- else if (is_ereg(dst_reg))
34949- EMIT1(add_1mod(0x40, dst_reg));
34950-
34951- switch (BPF_OP(insn->code)) {
34952- case BPF_ADD: b3 = 0xC0; break;
34953- case BPF_SUB: b3 = 0xE8; break;
34954- case BPF_AND: b3 = 0xE0; break;
34955- case BPF_OR: b3 = 0xC8; break;
34956- case BPF_XOR: b3 = 0xF0; break;
34957- }
34958-
34959- if (is_imm8(imm32))
34960- EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
34961- else
34962- EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
34963- break;
34964-
34965- case BPF_ALU64 | BPF_MOV | BPF_K:
34966- /* optimization: if imm32 is positive,
34967- * use 'mov eax, imm32' (which zero-extends imm32)
34968- * to save 2 bytes
34969- */
34970- if (imm32 < 0) {
34971- /* 'mov rax, imm32' sign extends imm32 */
34972- b1 = add_1mod(0x48, dst_reg);
34973- b2 = 0xC7;
34974- b3 = 0xC0;
34975- EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
34976- break;
34977- }
34978-
34979- case BPF_ALU | BPF_MOV | BPF_K:
34980- /* mov %eax, imm32 */
34981- if (is_ereg(dst_reg))
34982- EMIT1(add_1mod(0x40, dst_reg));
34983- EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
34984- break;
34985-
34986- /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
34987- case BPF_ALU | BPF_MOD | BPF_X:
34988- case BPF_ALU | BPF_DIV | BPF_X:
34989- case BPF_ALU | BPF_MOD | BPF_K:
34990- case BPF_ALU | BPF_DIV | BPF_K:
34991- case BPF_ALU64 | BPF_MOD | BPF_X:
34992- case BPF_ALU64 | BPF_DIV | BPF_X:
34993- case BPF_ALU64 | BPF_MOD | BPF_K:
34994- case BPF_ALU64 | BPF_DIV | BPF_K:
34995- EMIT1(0x50); /* push rax */
34996- EMIT1(0x52); /* push rdx */
34997-
34998- if (BPF_SRC(insn->code) == BPF_X)
34999- /* mov r11, src_reg */
35000- EMIT_mov(AUX_REG, src_reg);
35001- else
35002- /* mov r11, imm32 */
35003- EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
35004-
35005- /* mov rax, dst_reg */
35006- EMIT_mov(BPF_REG_0, dst_reg);
35007-
35008- /* xor edx, edx
35009- * equivalent to 'xor rdx, rdx', but one byte less
35010- */
35011- EMIT2(0x31, 0xd2);
35012-
35013- if (BPF_SRC(insn->code) == BPF_X) {
35014- /* if (src_reg == 0) return 0 */
35015-
35016- /* cmp r11, 0 */
35017- EMIT4(0x49, 0x83, 0xFB, 0x00);
35018-
35019- /* jne .+9 (skip over pop, pop, xor and jmp) */
35020- EMIT2(X86_JNE, 1 + 1 + 2 + 5);
35021- EMIT1(0x5A); /* pop rdx */
35022- EMIT1(0x58); /* pop rax */
35023- EMIT2(0x31, 0xc0); /* xor eax, eax */
35024-
35025- /* jmp cleanup_addr
35026- * addrs[i] - 11, because there are 11 bytes
35027- * after this insn: div, mov, pop, pop, mov
35028- */
35029- jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
35030- EMIT1_off32(0xE9, jmp_offset);
35031- }
35032-
35033- if (BPF_CLASS(insn->code) == BPF_ALU64)
35034- /* div r11 */
35035- EMIT3(0x49, 0xF7, 0xF3);
35036- else
35037- /* div r11d */
35038- EMIT3(0x41, 0xF7, 0xF3);
35039-
35040- if (BPF_OP(insn->code) == BPF_MOD)
35041- /* mov r11, rdx */
35042- EMIT3(0x49, 0x89, 0xD3);
35043- else
35044- /* mov r11, rax */
35045- EMIT3(0x49, 0x89, 0xC3);
35046-
35047- EMIT1(0x5A); /* pop rdx */
35048- EMIT1(0x58); /* pop rax */
35049-
35050- /* mov dst_reg, r11 */
35051- EMIT_mov(dst_reg, AUX_REG);
35052- break;
35053-
35054- case BPF_ALU | BPF_MUL | BPF_K:
35055- case BPF_ALU | BPF_MUL | BPF_X:
35056- case BPF_ALU64 | BPF_MUL | BPF_K:
35057- case BPF_ALU64 | BPF_MUL | BPF_X:
35058- EMIT1(0x50); /* push rax */
35059- EMIT1(0x52); /* push rdx */
35060-
35061- /* mov r11, dst_reg */
35062- EMIT_mov(AUX_REG, dst_reg);
35063-
35064- if (BPF_SRC(insn->code) == BPF_X)
35065- /* mov rax, src_reg */
35066- EMIT_mov(BPF_REG_0, src_reg);
35067- else
35068- /* mov rax, imm32 */
35069- EMIT3_off32(0x48, 0xC7, 0xC0, imm32);
35070-
35071- if (BPF_CLASS(insn->code) == BPF_ALU64)
35072- EMIT1(add_1mod(0x48, AUX_REG));
35073- else if (is_ereg(AUX_REG))
35074- EMIT1(add_1mod(0x40, AUX_REG));
35075- /* mul(q) r11 */
35076- EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
35077-
35078- /* mov r11, rax */
35079- EMIT_mov(AUX_REG, BPF_REG_0);
35080-
35081- EMIT1(0x5A); /* pop rdx */
35082- EMIT1(0x58); /* pop rax */
35083-
35084- /* mov dst_reg, r11 */
35085- EMIT_mov(dst_reg, AUX_REG);
35086- break;
35087-
35088- /* shifts */
35089- case BPF_ALU | BPF_LSH | BPF_K:
35090- case BPF_ALU | BPF_RSH | BPF_K:
35091- case BPF_ALU | BPF_ARSH | BPF_K:
35092- case BPF_ALU64 | BPF_LSH | BPF_K:
35093- case BPF_ALU64 | BPF_RSH | BPF_K:
35094- case BPF_ALU64 | BPF_ARSH | BPF_K:
35095- if (BPF_CLASS(insn->code) == BPF_ALU64)
35096- EMIT1(add_1mod(0x48, dst_reg));
35097- else if (is_ereg(dst_reg))
35098- EMIT1(add_1mod(0x40, dst_reg));
35099-
35100- switch (BPF_OP(insn->code)) {
35101- case BPF_LSH: b3 = 0xE0; break;
35102- case BPF_RSH: b3 = 0xE8; break;
35103- case BPF_ARSH: b3 = 0xF8; break;
35104- }
35105- EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
35106- break;
35107-
35108- case BPF_ALU | BPF_END | BPF_FROM_BE:
35109- switch (imm32) {
35110- case 16:
35111- /* emit 'ror %ax, 8' to swap lower 2 bytes */
35112- EMIT1(0x66);
35113- if (is_ereg(dst_reg))
35114- EMIT1(0x41);
35115- EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
35116- break;
35117- case 32:
35118- /* emit 'bswap eax' to swap lower 4 bytes */
35119- if (is_ereg(dst_reg))
35120- EMIT2(0x41, 0x0F);
35121- else
35122- EMIT1(0x0F);
35123- EMIT1(add_1reg(0xC8, dst_reg));
35124- break;
35125- case 64:
35126- /* emit 'bswap rax' to swap 8 bytes */
35127- EMIT3(add_1mod(0x48, dst_reg), 0x0F,
35128- add_1reg(0xC8, dst_reg));
35129- break;
35130- }
35131- break;
35132-
35133- case BPF_ALU | BPF_END | BPF_FROM_LE:
35134- break;
35135-
35136- /* ST: *(u8*)(dst_reg + off) = imm */
35137- case BPF_ST | BPF_MEM | BPF_B:
35138- if (is_ereg(dst_reg))
35139- EMIT2(0x41, 0xC6);
35140- else
35141- EMIT1(0xC6);
35142- goto st;
35143- case BPF_ST | BPF_MEM | BPF_H:
35144- if (is_ereg(dst_reg))
35145- EMIT3(0x66, 0x41, 0xC7);
35146- else
35147- EMIT2(0x66, 0xC7);
35148- goto st;
35149- case BPF_ST | BPF_MEM | BPF_W:
35150- if (is_ereg(dst_reg))
35151- EMIT2(0x41, 0xC7);
35152- else
35153- EMIT1(0xC7);
35154- goto st;
35155- case BPF_ST | BPF_MEM | BPF_DW:
35156- EMIT2(add_1mod(0x48, dst_reg), 0xC7);
35157-
35158-st: if (is_imm8(insn->off))
35159- EMIT2(add_1reg(0x40, dst_reg), insn->off);
35160- else
35161- EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
35162-
35163- EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
35164- break;
35165-
35166- /* STX: *(u8*)(dst_reg + off) = src_reg */
35167- case BPF_STX | BPF_MEM | BPF_B:
35168- /* emit 'mov byte ptr [rax + off], al' */
35169- if (is_ereg(dst_reg) || is_ereg(src_reg) ||
35170- /* have to add extra byte for x86 SIL, DIL regs */
35171- src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
35172- EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
35173- else
35174- EMIT1(0x88);
35175- goto stx;
35176- case BPF_STX | BPF_MEM | BPF_H:
35177- if (is_ereg(dst_reg) || is_ereg(src_reg))
35178- EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
35179- else
35180- EMIT2(0x66, 0x89);
35181- goto stx;
35182- case BPF_STX | BPF_MEM | BPF_W:
35183- if (is_ereg(dst_reg) || is_ereg(src_reg))
35184- EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
35185- else
35186- EMIT1(0x89);
35187- goto stx;
35188- case BPF_STX | BPF_MEM | BPF_DW:
35189- EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
35190-stx: if (is_imm8(insn->off))
35191- EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
35192- else
35193- EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
35194- insn->off);
35195- break;
35196-
35197- /* LDX: dst_reg = *(u8*)(src_reg + off) */
35198- case BPF_LDX | BPF_MEM | BPF_B:
35199- /* emit 'movzx rax, byte ptr [rax + off]' */
35200- EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
35201- goto ldx;
35202- case BPF_LDX | BPF_MEM | BPF_H:
35203- /* emit 'movzx rax, word ptr [rax + off]' */
35204- EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
35205- goto ldx;
35206- case BPF_LDX | BPF_MEM | BPF_W:
35207- /* emit 'mov eax, dword ptr [rax+0x14]' */
35208- if (is_ereg(dst_reg) || is_ereg(src_reg))
35209- EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
35210- else
35211- EMIT1(0x8B);
35212- goto ldx;
35213- case BPF_LDX | BPF_MEM | BPF_DW:
35214- /* emit 'mov rax, qword ptr [rax+0x14]' */
35215- EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
35216-ldx: /* if insn->off == 0 we can save one extra byte, but
35217- * special case of x86 r13 which always needs an offset
35218- * is not worth the hassle
35219- */
35220- if (is_imm8(insn->off))
35221- EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
35222- else
35223- EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
35224- insn->off);
35225- break;
35226-
35227- /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
35228- case BPF_STX | BPF_XADD | BPF_W:
35229- /* emit 'lock add dword ptr [rax + off], eax' */
35230- if (is_ereg(dst_reg) || is_ereg(src_reg))
35231- EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
35232- else
35233- EMIT2(0xF0, 0x01);
35234- goto xadd;
35235- case BPF_STX | BPF_XADD | BPF_DW:
35236- EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
35237-xadd: if (is_imm8(insn->off))
35238- EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
35239- else
35240- EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
35241- insn->off);
35242- break;
35243-
35244- /* call */
35245- case BPF_JMP | BPF_CALL:
35246- func = (u8 *) __bpf_call_base + imm32;
35247- jmp_offset = func - (image + addrs[i]);
35248- if (ctx->seen_ld_abs) {
35249- EMIT2(0x41, 0x52); /* push %r10 */
35250- EMIT2(0x41, 0x51); /* push %r9 */
35251- /* need to adjust jmp offset, since
35252- * pop %r9, pop %r10 take 4 bytes after call insn
35253- */
35254- jmp_offset += 4;
35255- }
35256- if (!imm32 || !is_simm32(jmp_offset)) {
35257- pr_err("unsupported bpf func %d addr %p image %p\n",
35258- imm32, func, image);
35259- return -EINVAL;
35260- }
35261- EMIT1_off32(0xE8, jmp_offset);
35262- if (ctx->seen_ld_abs) {
35263- EMIT2(0x41, 0x59); /* pop %r9 */
35264- EMIT2(0x41, 0x5A); /* pop %r10 */
35265- }
35266- break;
35267-
35268- /* cond jump */
35269- case BPF_JMP | BPF_JEQ | BPF_X:
35270- case BPF_JMP | BPF_JNE | BPF_X:
35271- case BPF_JMP | BPF_JGT | BPF_X:
35272- case BPF_JMP | BPF_JGE | BPF_X:
35273- case BPF_JMP | BPF_JSGT | BPF_X:
35274- case BPF_JMP | BPF_JSGE | BPF_X:
35275- /* cmp dst_reg, src_reg */
35276- EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
35277- add_2reg(0xC0, dst_reg, src_reg));
35278- goto emit_cond_jmp;
35279-
35280- case BPF_JMP | BPF_JSET | BPF_X:
35281- /* test dst_reg, src_reg */
35282- EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
35283- add_2reg(0xC0, dst_reg, src_reg));
35284- goto emit_cond_jmp;
35285-
35286- case BPF_JMP | BPF_JSET | BPF_K:
35287- /* test dst_reg, imm32 */
35288- EMIT1(add_1mod(0x48, dst_reg));
35289- EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
35290- goto emit_cond_jmp;
35291-
35292- case BPF_JMP | BPF_JEQ | BPF_K:
35293- case BPF_JMP | BPF_JNE | BPF_K:
35294- case BPF_JMP | BPF_JGT | BPF_K:
35295- case BPF_JMP | BPF_JGE | BPF_K:
35296- case BPF_JMP | BPF_JSGT | BPF_K:
35297- case BPF_JMP | BPF_JSGE | BPF_K:
35298- /* cmp dst_reg, imm8/32 */
35299- EMIT1(add_1mod(0x48, dst_reg));
35300-
35301- if (is_imm8(imm32))
35302- EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
35303- else
35304- EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
35305-
35306-emit_cond_jmp: /* convert BPF opcode to x86 */
35307- switch (BPF_OP(insn->code)) {
35308- case BPF_JEQ:
35309- jmp_cond = X86_JE;
35310- break;
35311- case BPF_JSET:
35312- case BPF_JNE:
35313- jmp_cond = X86_JNE;
35314- break;
35315- case BPF_JGT:
35316- /* GT is unsigned '>', JA in x86 */
35317- jmp_cond = X86_JA;
35318- break;
35319- case BPF_JGE:
35320- /* GE is unsigned '>=', JAE in x86 */
35321- jmp_cond = X86_JAE;
35322- break;
35323- case BPF_JSGT:
35324- /* signed '>', GT in x86 */
35325- jmp_cond = X86_JG;
35326- break;
35327- case BPF_JSGE:
35328- /* signed '>=', GE in x86 */
35329- jmp_cond = X86_JGE;
35330- break;
35331- default: /* to silence gcc warning */
35332- return -EFAULT;
35333- }
35334- jmp_offset = addrs[i + insn->off] - addrs[i];
35335- if (is_imm8(jmp_offset)) {
35336- EMIT2(jmp_cond, jmp_offset);
35337- } else if (is_simm32(jmp_offset)) {
35338- EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
35339- } else {
35340- pr_err("cond_jmp gen bug %llx\n", jmp_offset);
35341- return -EFAULT;
35342- }
35343-
35344- break;
35345-
35346- case BPF_JMP | BPF_JA:
35347- jmp_offset = addrs[i + insn->off] - addrs[i];
35348- if (!jmp_offset)
35349- /* optimize out nop jumps */
35350- break;
35351-emit_jmp:
35352- if (is_imm8(jmp_offset)) {
35353- EMIT2(0xEB, jmp_offset);
35354- } else if (is_simm32(jmp_offset)) {
35355- EMIT1_off32(0xE9, jmp_offset);
35356- } else {
35357- pr_err("jmp gen bug %llx\n", jmp_offset);
35358- return -EFAULT;
35359- }
35360- break;
35361-
35362- case BPF_LD | BPF_IND | BPF_W:
35363- func = sk_load_word;
35364- goto common_load;
35365- case BPF_LD | BPF_ABS | BPF_W:
35366- func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
35367-common_load: ctx->seen_ld_abs = true;
35368- jmp_offset = func - (image + addrs[i]);
35369- if (!func || !is_simm32(jmp_offset)) {
35370- pr_err("unsupported bpf func %d addr %p image %p\n",
35371- imm32, func, image);
35372- return -EINVAL;
35373- }
35374- if (BPF_MODE(insn->code) == BPF_ABS) {
35375- /* mov %esi, imm32 */
35376- EMIT1_off32(0xBE, imm32);
35377- } else {
35378- /* mov %rsi, src_reg */
35379- EMIT_mov(BPF_REG_2, src_reg);
35380- if (imm32) {
35381- if (is_imm8(imm32))
35382- /* add %esi, imm8 */
35383- EMIT3(0x83, 0xC6, imm32);
35384- else
35385- /* add %esi, imm32 */
35386- EMIT2_off32(0x81, 0xC6, imm32);
35387- }
35388- }
35389- /* skb pointer is in R6 (%rbx), it will be copied into
35390- * %rdi if skb_copy_bits() call is necessary.
35391- * sk_load_* helpers also use %r10 and %r9d.
35392- * See bpf_jit.S
35393- */
35394- EMIT1_off32(0xE8, jmp_offset); /* call */
35395- break;
35396-
35397- case BPF_LD | BPF_IND | BPF_H:
35398- func = sk_load_half;
35399- goto common_load;
35400- case BPF_LD | BPF_ABS | BPF_H:
35401- func = CHOOSE_LOAD_FUNC(imm32, sk_load_half);
35402- goto common_load;
35403- case BPF_LD | BPF_IND | BPF_B:
35404- func = sk_load_byte;
35405- goto common_load;
35406- case BPF_LD | BPF_ABS | BPF_B:
35407- func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte);
35408- goto common_load;
35409-
35410- case BPF_JMP | BPF_EXIT:
35411- if (i != insn_cnt - 1) {
35412- jmp_offset = ctx->cleanup_addr - addrs[i];
35413- goto emit_jmp;
35414- }
35415- /* update cleanup_addr */
35416- ctx->cleanup_addr = proglen;
35417- /* mov rbx, qword ptr [rbp-X] */
35418- EMIT3_off32(0x48, 0x8B, 0x9D, -stacksize);
35419- /* mov r13, qword ptr [rbp-X] */
35420- EMIT3_off32(0x4C, 0x8B, 0xAD, -stacksize + 8);
35421- /* mov r14, qword ptr [rbp-X] */
35422- EMIT3_off32(0x4C, 0x8B, 0xB5, -stacksize + 16);
35423- /* mov r15, qword ptr [rbp-X] */
35424- EMIT3_off32(0x4C, 0x8B, 0xBD, -stacksize + 24);
35425-
35426- EMIT1(0xC9); /* leave */
35427- EMIT1(0xC3); /* ret */
35428- break;
35429-
35430- default:
35431- /* By design x64 JIT should support all BPF instructions
35432- * This error will be seen if new instruction was added
35433- * to interpreter, but not to JIT
35434- * or if there is junk in sk_filter
35435- */
35436- pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
35437- return -EINVAL;
35438- }
35439-
35440- ilen = prog - temp;
35441- if (image) {
35442- if (unlikely(proglen + ilen > oldproglen)) {
35443- pr_err("bpf_jit_compile fatal error\n");
35444- return -EFAULT;
35445- }
35446- memcpy(image + proglen, temp, ilen);
35447- }
35448- proglen += ilen;
35449- addrs[i] = proglen;
35450- prog = temp;
35451- }
35452- return proglen;
35453-}
35454-
35455-void bpf_jit_compile(struct sk_filter *prog)
35456-{
35457-}
35458-
35459-void bpf_int_jit_compile(struct sk_filter *prog)
35460-{
35461- struct bpf_binary_header *header = NULL;
35462- int proglen, oldproglen = 0;
35463- struct jit_context ctx = {};
35464+void bpf_jit_compile(struct sk_filter *fp)
35465+{
35466+ u8 temp[MAX_INSTR_CODE_SIZE];
35467+ u8 *prog;
35468+ unsigned int proglen, oldproglen = 0;
35469+ int ilen, i;
35470+ int t_offset, f_offset;
35471+ u8 t_op, f_op, seen = 0, pass;
35472 u8 *image = NULL;
35473- int *addrs;
35474- int pass;
35475- int i;
35476+ u8 *header = NULL;
35477+ u8 *func;
35478+ int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
35479+ unsigned int cleanup_addr; /* epilogue code offset */
35480+ unsigned int *addrs;
35481+ const struct sock_filter *filter = fp->insns;
35482+ int flen = fp->len;
35483+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
35484+ unsigned int randkey;
35485+#endif
35486
35487 if (!bpf_jit_enable)
35488 return;
35489
35490- if (!prog || !prog->len)
35491- return;
35492-
35493- addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
35494- if (!addrs)
35495+ addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
35496+ if (addrs == NULL)
35497 return;
35498
35499 /* Before first pass, make a rough estimation of addrs[]
35500- * each bpf instruction is translated to less than 64 bytes
35501+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
35502 */
35503- for (proglen = 0, i = 0; i < prog->len; i++) {
35504- proglen += 64;
35505+ for (proglen = 0, i = 0; i < flen; i++) {
35506+ proglen += MAX_INSTR_CODE_SIZE;
35507 addrs[i] = proglen;
35508 }
35509- ctx.cleanup_addr = proglen;
35510+ cleanup_addr = proglen; /* epilogue address */
35511
35512 for (pass = 0; pass < 10; pass++) {
35513- proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
35514- if (proglen <= 0) {
35515- image = NULL;
35516- if (header)
35517- module_free(NULL, header);
35518- goto out;
35519+ u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
35520+ /* no prologue/epilogue for trivial filters (RET something) */
35521+ proglen = 0;
35522+ prog = temp;
35523+
35524+ if (seen_or_pass0) {
35525+ EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
35526+ EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */
35527+ /* note : must save %rbx in case bpf_error is hit */
35528+ if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF))
35529+ EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
35530+ if (seen_or_pass0 & SEEN_XREG)
35531+ CLEAR_X(); /* make sure we dont leek kernel memory */
35532+
35533+ /*
35534+ * If this filter needs to access skb data,
35535+ * loads r9 and r8 with :
35536+ * r9 = skb->len - skb->data_len
35537+ * r8 = skb->data
35538+ */
35539+ if (seen_or_pass0 & SEEN_DATAREF) {
35540+ if (offsetof(struct sk_buff, len) <= 127)
35541+ /* mov off8(%rdi),%r9d */
35542+ EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
35543+ else {
35544+ /* mov off32(%rdi),%r9d */
35545+ EMIT3(0x44, 0x8b, 0x8f);
35546+ EMIT(offsetof(struct sk_buff, len), 4);
35547+ }
35548+ if (is_imm8(offsetof(struct sk_buff, data_len)))
35549+ /* sub off8(%rdi),%r9d */
35550+ EMIT4(0x44, 0x2b, 0x4f, offsetof(struct sk_buff, data_len));
35551+ else {
35552+ EMIT3(0x44, 0x2b, 0x8f);
35553+ EMIT(offsetof(struct sk_buff, data_len), 4);
35554+ }
35555+
35556+ if (is_imm8(offsetof(struct sk_buff, data)))
35557+ /* mov off8(%rdi),%r8 */
35558+ EMIT4(0x4c, 0x8b, 0x47, offsetof(struct sk_buff, data));
35559+ else {
35560+ /* mov off32(%rdi),%r8 */
35561+ EMIT3(0x4c, 0x8b, 0x87);
35562+ EMIT(offsetof(struct sk_buff, data), 4);
35563+ }
35564+ }
35565 }
35566+
35567+ switch (filter[0].code) {
35568+ case BPF_S_RET_K:
35569+ case BPF_S_LD_W_LEN:
35570+ case BPF_S_ANC_PROTOCOL:
35571+ case BPF_S_ANC_IFINDEX:
35572+ case BPF_S_ANC_MARK:
35573+ case BPF_S_ANC_RXHASH:
35574+ case BPF_S_ANC_CPU:
35575+ case BPF_S_ANC_VLAN_TAG:
35576+ case BPF_S_ANC_VLAN_TAG_PRESENT:
35577+ case BPF_S_ANC_QUEUE:
35578+ case BPF_S_ANC_PKTTYPE:
35579+ case BPF_S_LD_W_ABS:
35580+ case BPF_S_LD_H_ABS:
35581+ case BPF_S_LD_B_ABS:
35582+ /* first instruction sets A register (or is RET 'constant') */
35583+ break;
35584+ default:
35585+ /* make sure we dont leak kernel information to user */
35586+ CLEAR_A(); /* A = 0 */
35587+ }
35588+
35589+ for (i = 0; i < flen; i++) {
35590+ unsigned int K = filter[i].k;
35591+
35592+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
35593+ randkey = prandom_u32();
35594+#endif
35595+
35596+ switch (filter[i].code) {
35597+ case BPF_S_ALU_ADD_X: /* A += X; */
35598+ seen |= SEEN_XREG;
35599+ EMIT2(0x01, 0xd8); /* add %ebx,%eax */
35600+ break;
35601+ case BPF_S_ALU_ADD_K: /* A += K; */
35602+ if (!K)
35603+ break;
35604+ if (is_imm8(K))
35605+ EMIT3(0x83, 0xc0, K); /* add imm8,%eax */
35606+ else
35607+ EMIT1_off32(0x05, K); /* add imm32,%eax */
35608+ break;
35609+ case BPF_S_ALU_SUB_X: /* A -= X; */
35610+ seen |= SEEN_XREG;
35611+ EMIT2(0x29, 0xd8); /* sub %ebx,%eax */
35612+ break;
35613+ case BPF_S_ALU_SUB_K: /* A -= K */
35614+ if (!K)
35615+ break;
35616+ if (is_imm8(K))
35617+ EMIT3(0x83, 0xe8, K); /* sub imm8,%eax */
35618+ else
35619+ EMIT1_off32(0x2d, K); /* sub imm32,%eax */
35620+ break;
35621+ case BPF_S_ALU_MUL_X: /* A *= X; */
35622+ seen |= SEEN_XREG;
35623+ EMIT3(0x0f, 0xaf, 0xc3); /* imul %ebx,%eax */
35624+ break;
35625+ case BPF_S_ALU_MUL_K: /* A *= K */
35626+ if (is_imm8(K))
35627+ EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
35628+ else
35629+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
35630+ break;
35631+ case BPF_S_ALU_DIV_X: /* A /= X; */
35632+ seen |= SEEN_XREG;
35633+ EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
35634+ if (pc_ret0 > 0) {
35635+ /* addrs[pc_ret0 - 1] is start address of target
35636+ * (addrs[i] - 4) is the address following this jmp
35637+ * ("xor %edx,%edx; div %ebx" being 4 bytes long)
35638+ */
35639+ EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
35640+ (addrs[i] - 4));
35641+ } else {
35642+ EMIT_COND_JMP(X86_JNE, 2 + 5);
35643+ CLEAR_A();
35644+ EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
35645+ }
35646+ EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */
35647+ break;
35648+ case BPF_S_ALU_MOD_X: /* A %= X; */
35649+ seen |= SEEN_XREG;
35650+ EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
35651+ if (pc_ret0 > 0) {
35652+ /* addrs[pc_ret0 - 1] is start address of target
35653+ * (addrs[i] - 6) is the address following this jmp
35654+ * ("xor %edx,%edx; div %ebx;mov %edx,%eax" being 6 bytes long)
35655+ */
35656+ EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
35657+ (addrs[i] - 6));
35658+ } else {
35659+ EMIT_COND_JMP(X86_JNE, 2 + 5);
35660+ CLEAR_A();
35661+ EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 6)); /* jmp .+off32 */
35662+ }
35663+ EMIT2(0x31, 0xd2); /* xor %edx,%edx */
35664+ EMIT2(0xf7, 0xf3); /* div %ebx */
35665+ EMIT2(0x89, 0xd0); /* mov %edx,%eax */
35666+ break;
35667+ case BPF_S_ALU_MOD_K: /* A %= K; */
35668+ if (K == 1) {
35669+ CLEAR_A();
35670+ break;
35671+ }
35672+ EMIT2(0x31, 0xd2); /* xor %edx,%edx */
35673+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
35674+ DILUTE_CONST_SEQUENCE(K, randkey);
35675+#else
35676+ EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
35677+#endif
35678+ EMIT2(0xf7, 0xf1); /* div %ecx */
35679+ EMIT2(0x89, 0xd0); /* mov %edx,%eax */
35680+ break;
35681+ case BPF_S_ALU_DIV_K: /* A /= K */
35682+ if (K == 1)
35683+ break;
35684+ EMIT2(0x31, 0xd2); /* xor %edx,%edx */
35685+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
35686+ DILUTE_CONST_SEQUENCE(K, randkey);
35687+#else
35688+ EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
35689+#endif
35690+ EMIT2(0xf7, 0xf1); /* div %ecx */
35691+ break;
35692+ case BPF_S_ALU_AND_X:
35693+ seen |= SEEN_XREG;
35694+ EMIT2(0x21, 0xd8); /* and %ebx,%eax */
35695+ break;
35696+ case BPF_S_ALU_AND_K:
35697+ if (K >= 0xFFFFFF00) {
35698+ EMIT2(0x24, K & 0xFF); /* and imm8,%al */
35699+ } else if (K >= 0xFFFF0000) {
35700+ EMIT2(0x66, 0x25); /* and imm16,%ax */
35701+ EMIT(K, 2);
35702+ } else {
35703+ EMIT1_off32(0x25, K); /* and imm32,%eax */
35704+ }
35705+ break;
35706+ case BPF_S_ALU_OR_X:
35707+ seen |= SEEN_XREG;
35708+ EMIT2(0x09, 0xd8); /* or %ebx,%eax */
35709+ break;
35710+ case BPF_S_ALU_OR_K:
35711+ if (is_imm8(K))
35712+ EMIT3(0x83, 0xc8, K); /* or imm8,%eax */
35713+ else
35714+ EMIT1_off32(0x0d, K); /* or imm32,%eax */
35715+ break;
35716+ case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
35717+ case BPF_S_ALU_XOR_X:
35718+ seen |= SEEN_XREG;
35719+ EMIT2(0x31, 0xd8); /* xor %ebx,%eax */
35720+ break;
35721+ case BPF_S_ALU_XOR_K: /* A ^= K; */
35722+ if (K == 0)
35723+ break;
35724+ if (is_imm8(K))
35725+ EMIT3(0x83, 0xf0, K); /* xor imm8,%eax */
35726+ else
35727+ EMIT1_off32(0x35, K); /* xor imm32,%eax */
35728+ break;
35729+ case BPF_S_ALU_LSH_X: /* A <<= X; */
35730+ seen |= SEEN_XREG;
35731+ EMIT4(0x89, 0xd9, 0xd3, 0xe0); /* mov %ebx,%ecx; shl %cl,%eax */
35732+ break;
35733+ case BPF_S_ALU_LSH_K:
35734+ if (K == 0)
35735+ break;
35736+ else if (K == 1)
35737+ EMIT2(0xd1, 0xe0); /* shl %eax */
35738+ else
35739+ EMIT3(0xc1, 0xe0, K);
35740+ break;
35741+ case BPF_S_ALU_RSH_X: /* A >>= X; */
35742+ seen |= SEEN_XREG;
35743+ EMIT4(0x89, 0xd9, 0xd3, 0xe8); /* mov %ebx,%ecx; shr %cl,%eax */
35744+ break;
35745+ case BPF_S_ALU_RSH_K: /* A >>= K; */
35746+ if (K == 0)
35747+ break;
35748+ else if (K == 1)
35749+ EMIT2(0xd1, 0xe8); /* shr %eax */
35750+ else
35751+ EMIT3(0xc1, 0xe8, K);
35752+ break;
35753+ case BPF_S_ALU_NEG:
35754+ EMIT2(0xf7, 0xd8); /* neg %eax */
35755+ break;
35756+ case BPF_S_RET_K:
35757+ if (!K) {
35758+ if (pc_ret0 == -1)
35759+ pc_ret0 = i;
35760+ CLEAR_A();
35761+ } else {
35762+ EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
35763+ }
35764+ /* fallinto */
35765+ case BPF_S_RET_A:
35766+ if (seen_or_pass0) {
35767+ if (i != flen - 1) {
35768+ EMIT_JMP(cleanup_addr - addrs[i]);
35769+ break;
35770+ }
35771+ if (seen_or_pass0 & SEEN_XREG)
35772+ EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */
35773+ EMIT1(0xc9); /* leaveq */
35774+ }
35775+ EMIT1(0xc3); /* ret */
35776+ break;
35777+ case BPF_S_MISC_TAX: /* X = A */
35778+ seen |= SEEN_XREG;
35779+ EMIT2(0x89, 0xc3); /* mov %eax,%ebx */
35780+ break;
35781+ case BPF_S_MISC_TXA: /* A = X */
35782+ seen |= SEEN_XREG;
35783+ EMIT2(0x89, 0xd8); /* mov %ebx,%eax */
35784+ break;
35785+ case BPF_S_LD_IMM: /* A = K */
35786+ if (!K)
35787+ CLEAR_A();
35788+ else
35789+ EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
35790+ break;
35791+ case BPF_S_LDX_IMM: /* X = K */
35792+ seen |= SEEN_XREG;
35793+ if (!K)
35794+ CLEAR_X();
35795+ else
35796+ EMIT1_off32(0xbb, K); /* mov $imm32,%ebx */
35797+ break;
35798+ case BPF_S_LD_MEM: /* A = mem[K] : mov off8(%rbp),%eax */
35799+ seen |= SEEN_MEM;
35800+ EMIT3(0x8b, 0x45, 0xf0 - K*4);
35801+ break;
35802+ case BPF_S_LDX_MEM: /* X = mem[K] : mov off8(%rbp),%ebx */
35803+ seen |= SEEN_XREG | SEEN_MEM;
35804+ EMIT3(0x8b, 0x5d, 0xf0 - K*4);
35805+ break;
35806+ case BPF_S_ST: /* mem[K] = A : mov %eax,off8(%rbp) */
35807+ seen |= SEEN_MEM;
35808+ EMIT3(0x89, 0x45, 0xf0 - K*4);
35809+ break;
35810+ case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
35811+ seen |= SEEN_XREG | SEEN_MEM;
35812+ EMIT3(0x89, 0x5d, 0xf0 - K*4);
35813+ break;
35814+ case BPF_S_LD_W_LEN: /* A = skb->len; */
35815+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
35816+ if (is_imm8(offsetof(struct sk_buff, len)))
35817+ /* mov off8(%rdi),%eax */
35818+ EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len));
35819+ else {
35820+ EMIT2(0x8b, 0x87);
35821+ EMIT(offsetof(struct sk_buff, len), 4);
35822+ }
35823+ break;
35824+ case BPF_S_LDX_W_LEN: /* X = skb->len; */
35825+ seen |= SEEN_XREG;
35826+ if (is_imm8(offsetof(struct sk_buff, len)))
35827+ /* mov off8(%rdi),%ebx */
35828+ EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len));
35829+ else {
35830+ EMIT2(0x8b, 0x9f);
35831+ EMIT(offsetof(struct sk_buff, len), 4);
35832+ }
35833+ break;
35834+ case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
35835+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
35836+ if (is_imm8(offsetof(struct sk_buff, protocol))) {
35837+ /* movzwl off8(%rdi),%eax */
35838+ EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol));
35839+ } else {
35840+ EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
35841+ EMIT(offsetof(struct sk_buff, protocol), 4);
35842+ }
35843+ EMIT2(0x86, 0xc4); /* ntohs() : xchg %al,%ah */
35844+ break;
35845+ case BPF_S_ANC_IFINDEX:
35846+ if (is_imm8(offsetof(struct sk_buff, dev))) {
35847+ /* movq off8(%rdi),%rax */
35848+ EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev));
35849+ } else {
35850+ EMIT3(0x48, 0x8b, 0x87); /* movq off32(%rdi),%rax */
35851+ EMIT(offsetof(struct sk_buff, dev), 4);
35852+ }
35853+ EMIT3(0x48, 0x85, 0xc0); /* test %rax,%rax */
35854+ EMIT_COND_JMP(X86_JE, cleanup_addr - (addrs[i] - 6));
35855+ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
35856+ EMIT2(0x8b, 0x80); /* mov off32(%rax),%eax */
35857+ EMIT(offsetof(struct net_device, ifindex), 4);
35858+ break;
35859+ case BPF_S_ANC_MARK:
35860+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
35861+ if (is_imm8(offsetof(struct sk_buff, mark))) {
35862+ /* mov off8(%rdi),%eax */
35863+ EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark));
35864+ } else {
35865+ EMIT2(0x8b, 0x87);
35866+ EMIT(offsetof(struct sk_buff, mark), 4);
35867+ }
35868+ break;
35869+ case BPF_S_ANC_RXHASH:
35870+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
35871+ if (is_imm8(offsetof(struct sk_buff, hash))) {
35872+ /* mov off8(%rdi),%eax */
35873+ EMIT3(0x8b, 0x47, offsetof(struct sk_buff, hash));
35874+ } else {
35875+ EMIT2(0x8b, 0x87);
35876+ EMIT(offsetof(struct sk_buff, hash), 4);
35877+ }
35878+ break;
35879+ case BPF_S_ANC_QUEUE:
35880+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
35881+ if (is_imm8(offsetof(struct sk_buff, queue_mapping))) {
35882+ /* movzwl off8(%rdi),%eax */
35883+ EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping));
35884+ } else {
35885+ EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
35886+ EMIT(offsetof(struct sk_buff, queue_mapping), 4);
35887+ }
35888+ break;
35889+ case BPF_S_ANC_CPU:
35890+#ifdef CONFIG_SMP
35891+ EMIT4(0x65, 0x8b, 0x04, 0x25); /* mov %gs:off32,%eax */
35892+ EMIT((u32)(unsigned long)&cpu_number, 4); /* A = smp_processor_id(); */
35893+#else
35894+ CLEAR_A();
35895+#endif
35896+ break;
35897+ case BPF_S_ANC_VLAN_TAG:
35898+ case BPF_S_ANC_VLAN_TAG_PRESENT:
35899+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
35900+ if (is_imm8(offsetof(struct sk_buff, vlan_tci))) {
35901+ /* movzwl off8(%rdi),%eax */
35902+ EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, vlan_tci));
35903+ } else {
35904+ EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
35905+ EMIT(offsetof(struct sk_buff, vlan_tci), 4);
35906+ }
35907+ BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
35908+ if (filter[i].code == BPF_S_ANC_VLAN_TAG) {
35909+ EMIT3(0x80, 0xe4, 0xef); /* and $0xef,%ah */
35910+ } else {
35911+ EMIT3(0xc1, 0xe8, 0x0c); /* shr $0xc,%eax */
35912+ EMIT3(0x83, 0xe0, 0x01); /* and $0x1,%eax */
35913+ }
35914+ break;
35915+ case BPF_S_ANC_PKTTYPE:
35916+ {
35917+ int off = pkt_type_offset();
35918+
35919+ if (off < 0)
35920+ goto out;
35921+ if (is_imm8(off)) {
35922+ /* movzbl off8(%rdi),%eax */
35923+ EMIT4(0x0f, 0xb6, 0x47, off);
35924+ } else {
35925+ /* movbl off32(%rdi),%eax */
35926+ EMIT3(0x0f, 0xb6, 0x87);
35927+ EMIT(off, 4);
35928+ }
35929+ EMIT3(0x83, 0xe0, PKT_TYPE_MAX); /* and $0x7,%eax */
35930+ break;
35931+ }
35932+ case BPF_S_LD_W_ABS:
35933+ func = CHOOSE_LOAD_FUNC(K, sk_load_word);
35934+common_load: seen |= SEEN_DATAREF;
35935+ t_offset = func - (image + addrs[i]);
35936+ EMIT1_off32(0xbe, K); /* mov imm32,%esi */
35937+ EMIT1_off32(0xe8, t_offset); /* call */
35938+ break;
35939+ case BPF_S_LD_H_ABS:
35940+ func = CHOOSE_LOAD_FUNC(K, sk_load_half);
35941+ goto common_load;
35942+ case BPF_S_LD_B_ABS:
35943+ func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
35944+ goto common_load;
35945+ case BPF_S_LDX_B_MSH:
35946+ func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
35947+ seen |= SEEN_DATAREF | SEEN_XREG;
35948+ t_offset = func - (image + addrs[i]);
35949+ EMIT1_off32(0xbe, K); /* mov imm32,%esi */
35950+ EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */
35951+ break;
35952+ case BPF_S_LD_W_IND:
35953+ func = sk_load_word;
35954+common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
35955+ t_offset = func - (image + addrs[i]);
35956+ if (K) {
35957+ if (is_imm8(K)) {
35958+ EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
35959+ } else {
35960+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
35961+ }
35962+ } else {
35963+ EMIT2(0x89,0xde); /* mov %ebx,%esi */
35964+ }
35965+ EMIT1_off32(0xe8, t_offset); /* call sk_load_xxx_ind */
35966+ break;
35967+ case BPF_S_LD_H_IND:
35968+ func = sk_load_half;
35969+ goto common_load_ind;
35970+ case BPF_S_LD_B_IND:
35971+ func = sk_load_byte;
35972+ goto common_load_ind;
35973+ case BPF_S_JMP_JA:
35974+ t_offset = addrs[i + K] - addrs[i];
35975+ EMIT_JMP(t_offset);
35976+ break;
35977+ COND_SEL(BPF_S_JMP_JGT_K, X86_JA, X86_JBE);
35978+ COND_SEL(BPF_S_JMP_JGE_K, X86_JAE, X86_JB);
35979+ COND_SEL(BPF_S_JMP_JEQ_K, X86_JE, X86_JNE);
35980+ COND_SEL(BPF_S_JMP_JSET_K,X86_JNE, X86_JE);
35981+ COND_SEL(BPF_S_JMP_JGT_X, X86_JA, X86_JBE);
35982+ COND_SEL(BPF_S_JMP_JGE_X, X86_JAE, X86_JB);
35983+ COND_SEL(BPF_S_JMP_JEQ_X, X86_JE, X86_JNE);
35984+ COND_SEL(BPF_S_JMP_JSET_X,X86_JNE, X86_JE);
35985+
35986+cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
35987+ t_offset = addrs[i + filter[i].jt] - addrs[i];
35988+
35989+ /* same targets, can avoid doing the test :) */
35990+ if (filter[i].jt == filter[i].jf) {
35991+ EMIT_JMP(t_offset);
35992+ break;
35993+ }
35994+
35995+ switch (filter[i].code) {
35996+ case BPF_S_JMP_JGT_X:
35997+ case BPF_S_JMP_JGE_X:
35998+ case BPF_S_JMP_JEQ_X:
35999+ seen |= SEEN_XREG;
36000+ EMIT2(0x39, 0xd8); /* cmp %ebx,%eax */
36001+ break;
36002+ case BPF_S_JMP_JSET_X:
36003+ seen |= SEEN_XREG;
36004+ EMIT2(0x85, 0xd8); /* test %ebx,%eax */
36005+ break;
36006+ case BPF_S_JMP_JEQ_K:
36007+ if (K == 0) {
36008+ EMIT2(0x85, 0xc0); /* test %eax,%eax */
36009+ break;
36010+ }
36011+ case BPF_S_JMP_JGT_K:
36012+ case BPF_S_JMP_JGE_K:
36013+ if (K <= 127)
36014+ EMIT3(0x83, 0xf8, K); /* cmp imm8,%eax */
36015+ else
36016+ EMIT1_off32(0x3d, K); /* cmp imm32,%eax */
36017+ break;
36018+ case BPF_S_JMP_JSET_K:
36019+ if (K <= 0xFF)
36020+ EMIT2(0xa8, K); /* test imm8,%al */
36021+ else if (!(K & 0xFFFF00FF))
36022+ EMIT3(0xf6, 0xc4, K >> 8); /* test imm8,%ah */
36023+ else if (K <= 0xFFFF) {
36024+ EMIT2(0x66, 0xa9); /* test imm16,%ax */
36025+ EMIT(K, 2);
36026+ } else {
36027+ EMIT1_off32(0xa9, K); /* test imm32,%eax */
36028+ }
36029+ break;
36030+ }
36031+ if (filter[i].jt != 0) {
36032+ if (filter[i].jf && f_offset)
36033+ t_offset += is_near(f_offset) ? 2 : 5;
36034+ EMIT_COND_JMP(t_op, t_offset);
36035+ if (filter[i].jf)
36036+ EMIT_JMP(f_offset);
36037+ break;
36038+ }
36039+ EMIT_COND_JMP(f_op, f_offset);
36040+ break;
36041+ default:
36042+ /* hmm, too complex filter, give up with jit compiler */
36043+ goto out;
36044+ }
36045+ ilen = prog - temp;
36046+ if (image) {
36047+ if (unlikely(proglen + ilen > oldproglen)) {
36048+ pr_err("bpb_jit_compile fatal error\n");
36049+ kfree(addrs);
36050+ module_free_exec(NULL, image);
36051+ return;
36052+ }
36053+ pax_open_kernel();
36054+ memcpy(image + proglen, temp, ilen);
36055+ pax_close_kernel();
36056+ }
36057+ proglen += ilen;
36058+ addrs[i] = proglen;
36059+ prog = temp;
36060+ }
36061+ /* last bpf instruction is always a RET :
36062+ * use it to give the cleanup instruction(s) addr
36063+ */
36064+ cleanup_addr = proglen - 1; /* ret */
36065+ if (seen_or_pass0)
36066+ cleanup_addr -= 1; /* leaveq */
36067+ if (seen_or_pass0 & SEEN_XREG)
36068+ cleanup_addr -= 4; /* mov -8(%rbp),%rbx */
36069+
36070 if (image) {
36071 if (proglen != oldproglen)
36072- pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
36073- proglen, oldproglen);
36074+ pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
36075 break;
36076 }
36077 if (proglen == oldproglen) {
36078@@ -918,32 +872,30 @@ void bpf_int_jit_compile(struct sk_filter *prog)
36079 }
36080
36081 if (bpf_jit_enable > 1)
36082- bpf_jit_dump(prog->len, proglen, 0, image);
36083+ bpf_jit_dump(flen, proglen, pass, image);
36084
36085 if (image) {
36086 bpf_flush_icache(header, image + proglen);
36087- set_memory_ro((unsigned long)header, header->pages);
36088- prog->bpf_func = (void *)image;
36089- prog->jited = 1;
36090+ fp->bpf_func = (void *)image;
36091 }
36092 out:
36093 kfree(addrs);
36094+ return;
36095 }
36096
36097 static void bpf_jit_free_deferred(struct work_struct *work)
36098 {
36099 struct sk_filter *fp = container_of(work, struct sk_filter, work);
36100 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
36101- struct bpf_binary_header *header = (void *)addr;
36102
36103- set_memory_rw(addr, header->pages);
36104- module_free(NULL, header);
36105+ set_memory_rw(addr, 1);
36106+ module_free_exec(NULL, (void *)addr);
36107 kfree(fp);
36108 }
36109
36110 void bpf_jit_free(struct sk_filter *fp)
36111 {
36112- if (fp->jited) {
36113+ if (fp->bpf_func != sk_run_filter) {
36114 INIT_WORK(&fp->work, bpf_jit_free_deferred);
36115 schedule_work(&fp->work);
36116 } else {
36117diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
36118index 5d04be5..2beeaa2 100644
36119--- a/arch/x86/oprofile/backtrace.c
36120+++ b/arch/x86/oprofile/backtrace.c
36121@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
36122 struct stack_frame_ia32 *fp;
36123 unsigned long bytes;
36124
36125- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
36126+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
36127 if (bytes != 0)
36128 return NULL;
36129
36130- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
36131+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
36132
36133 oprofile_add_trace(bufhead[0].return_address);
36134
36135@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
36136 struct stack_frame bufhead[2];
36137 unsigned long bytes;
36138
36139- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
36140+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
36141 if (bytes != 0)
36142 return NULL;
36143
36144@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
36145 {
36146 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
36147
36148- if (!user_mode_vm(regs)) {
36149+ if (!user_mode(regs)) {
36150 unsigned long stack = kernel_stack_pointer(regs);
36151 if (depth)
36152 dump_trace(NULL, regs, (unsigned long *)stack, 0,
36153diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
36154index 379e8bd..6386e09 100644
36155--- a/arch/x86/oprofile/nmi_int.c
36156+++ b/arch/x86/oprofile/nmi_int.c
36157@@ -23,6 +23,7 @@
36158 #include <asm/nmi.h>
36159 #include <asm/msr.h>
36160 #include <asm/apic.h>
36161+#include <asm/pgtable.h>
36162
36163 #include "op_counter.h"
36164 #include "op_x86_model.h"
36165@@ -785,8 +786,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
36166 if (ret)
36167 return ret;
36168
36169- if (!model->num_virt_counters)
36170- model->num_virt_counters = model->num_counters;
36171+ if (!model->num_virt_counters) {
36172+ pax_open_kernel();
36173+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
36174+ pax_close_kernel();
36175+ }
36176
36177 mux_init(ops);
36178
36179diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
36180index 50d86c0..7985318 100644
36181--- a/arch/x86/oprofile/op_model_amd.c
36182+++ b/arch/x86/oprofile/op_model_amd.c
36183@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
36184 num_counters = AMD64_NUM_COUNTERS;
36185 }
36186
36187- op_amd_spec.num_counters = num_counters;
36188- op_amd_spec.num_controls = num_counters;
36189- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
36190+ pax_open_kernel();
36191+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
36192+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
36193+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
36194+ pax_close_kernel();
36195
36196 return 0;
36197 }
36198diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
36199index d90528e..0127e2b 100644
36200--- a/arch/x86/oprofile/op_model_ppro.c
36201+++ b/arch/x86/oprofile/op_model_ppro.c
36202@@ -19,6 +19,7 @@
36203 #include <asm/msr.h>
36204 #include <asm/apic.h>
36205 #include <asm/nmi.h>
36206+#include <asm/pgtable.h>
36207
36208 #include "op_x86_model.h"
36209 #include "op_counter.h"
36210@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
36211
36212 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
36213
36214- op_arch_perfmon_spec.num_counters = num_counters;
36215- op_arch_perfmon_spec.num_controls = num_counters;
36216+ pax_open_kernel();
36217+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
36218+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
36219+ pax_close_kernel();
36220 }
36221
36222 static int arch_perfmon_init(struct oprofile_operations *ignore)
36223diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
36224index 71e8a67..6a313bb 100644
36225--- a/arch/x86/oprofile/op_x86_model.h
36226+++ b/arch/x86/oprofile/op_x86_model.h
36227@@ -52,7 +52,7 @@ struct op_x86_model_spec {
36228 void (*switch_ctrl)(struct op_x86_model_spec const *model,
36229 struct op_msrs const * const msrs);
36230 #endif
36231-};
36232+} __do_const;
36233
36234 struct op_counter_config;
36235
36236diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
36237index 84b9d67..260e5ff 100644
36238--- a/arch/x86/pci/intel_mid_pci.c
36239+++ b/arch/x86/pci/intel_mid_pci.c
36240@@ -245,7 +245,7 @@ int __init intel_mid_pci_init(void)
36241 pr_info("Intel MID platform detected, using MID PCI ops\n");
36242 pci_mmcfg_late_init();
36243 pcibios_enable_irq = intel_mid_pci_irq_enable;
36244- pci_root_ops = intel_mid_pci_ops;
36245+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
36246 pci_soc_mode = 1;
36247 /* Continue with standard init */
36248 return 1;
36249diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
36250index 84112f5..6334d60 100644
36251--- a/arch/x86/pci/irq.c
36252+++ b/arch/x86/pci/irq.c
36253@@ -50,7 +50,7 @@ struct irq_router {
36254 struct irq_router_handler {
36255 u16 vendor;
36256 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
36257-};
36258+} __do_const;
36259
36260 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
36261 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
36262@@ -790,7 +790,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
36263 return 0;
36264 }
36265
36266-static __initdata struct irq_router_handler pirq_routers[] = {
36267+static __initconst const struct irq_router_handler pirq_routers[] = {
36268 { PCI_VENDOR_ID_INTEL, intel_router_probe },
36269 { PCI_VENDOR_ID_AL, ali_router_probe },
36270 { PCI_VENDOR_ID_ITE, ite_router_probe },
36271@@ -817,7 +817,7 @@ static struct pci_dev *pirq_router_dev;
36272 static void __init pirq_find_router(struct irq_router *r)
36273 {
36274 struct irq_routing_table *rt = pirq_table;
36275- struct irq_router_handler *h;
36276+ const struct irq_router_handler *h;
36277
36278 #ifdef CONFIG_PCI_BIOS
36279 if (!rt->signature) {
36280@@ -1090,7 +1090,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
36281 return 0;
36282 }
36283
36284-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
36285+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
36286 {
36287 .callback = fix_broken_hp_bios_irq9,
36288 .ident = "HP Pavilion N5400 Series Laptop",
36289diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
36290index c77b24a..c979855 100644
36291--- a/arch/x86/pci/pcbios.c
36292+++ b/arch/x86/pci/pcbios.c
36293@@ -79,7 +79,7 @@ union bios32 {
36294 static struct {
36295 unsigned long address;
36296 unsigned short segment;
36297-} bios32_indirect = { 0, __KERNEL_CS };
36298+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
36299
36300 /*
36301 * Returns the entry point for the given service, NULL on error
36302@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
36303 unsigned long length; /* %ecx */
36304 unsigned long entry; /* %edx */
36305 unsigned long flags;
36306+ struct desc_struct d, *gdt;
36307
36308 local_irq_save(flags);
36309- __asm__("lcall *(%%edi); cld"
36310+
36311+ gdt = get_cpu_gdt_table(smp_processor_id());
36312+
36313+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
36314+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
36315+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
36316+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
36317+
36318+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
36319 : "=a" (return_code),
36320 "=b" (address),
36321 "=c" (length),
36322 "=d" (entry)
36323 : "0" (service),
36324 "1" (0),
36325- "D" (&bios32_indirect));
36326+ "D" (&bios32_indirect),
36327+ "r"(__PCIBIOS_DS)
36328+ : "memory");
36329+
36330+ pax_open_kernel();
36331+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
36332+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
36333+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
36334+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
36335+ pax_close_kernel();
36336+
36337 local_irq_restore(flags);
36338
36339 switch (return_code) {
36340- case 0:
36341- return address + entry;
36342- case 0x80: /* Not present */
36343- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
36344- return 0;
36345- default: /* Shouldn't happen */
36346- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
36347- service, return_code);
36348+ case 0: {
36349+ int cpu;
36350+ unsigned char flags;
36351+
36352+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
36353+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
36354+ printk(KERN_WARNING "bios32_service: not valid\n");
36355 return 0;
36356+ }
36357+ address = address + PAGE_OFFSET;
36358+ length += 16UL; /* some BIOSs underreport this... */
36359+ flags = 4;
36360+ if (length >= 64*1024*1024) {
36361+ length >>= PAGE_SHIFT;
36362+ flags |= 8;
36363+ }
36364+
36365+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
36366+ gdt = get_cpu_gdt_table(cpu);
36367+ pack_descriptor(&d, address, length, 0x9b, flags);
36368+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
36369+ pack_descriptor(&d, address, length, 0x93, flags);
36370+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
36371+ }
36372+ return entry;
36373+ }
36374+ case 0x80: /* Not present */
36375+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
36376+ return 0;
36377+ default: /* Shouldn't happen */
36378+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
36379+ service, return_code);
36380+ return 0;
36381 }
36382 }
36383
36384 static struct {
36385 unsigned long address;
36386 unsigned short segment;
36387-} pci_indirect = { 0, __KERNEL_CS };
36388+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
36389
36390-static int pci_bios_present;
36391+static int pci_bios_present __read_only;
36392
36393 static int check_pcibios(void)
36394 {
36395@@ -131,11 +174,13 @@ static int check_pcibios(void)
36396 unsigned long flags, pcibios_entry;
36397
36398 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
36399- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
36400+ pci_indirect.address = pcibios_entry;
36401
36402 local_irq_save(flags);
36403- __asm__(
36404- "lcall *(%%edi); cld\n\t"
36405+ __asm__("movw %w6, %%ds\n\t"
36406+ "lcall *%%ss:(%%edi); cld\n\t"
36407+ "push %%ss\n\t"
36408+ "pop %%ds\n\t"
36409 "jc 1f\n\t"
36410 "xor %%ah, %%ah\n"
36411 "1:"
36412@@ -144,7 +189,8 @@ static int check_pcibios(void)
36413 "=b" (ebx),
36414 "=c" (ecx)
36415 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
36416- "D" (&pci_indirect)
36417+ "D" (&pci_indirect),
36418+ "r" (__PCIBIOS_DS)
36419 : "memory");
36420 local_irq_restore(flags);
36421
36422@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36423
36424 switch (len) {
36425 case 1:
36426- __asm__("lcall *(%%esi); cld\n\t"
36427+ __asm__("movw %w6, %%ds\n\t"
36428+ "lcall *%%ss:(%%esi); cld\n\t"
36429+ "push %%ss\n\t"
36430+ "pop %%ds\n\t"
36431 "jc 1f\n\t"
36432 "xor %%ah, %%ah\n"
36433 "1:"
36434@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36435 : "1" (PCIBIOS_READ_CONFIG_BYTE),
36436 "b" (bx),
36437 "D" ((long)reg),
36438- "S" (&pci_indirect));
36439+ "S" (&pci_indirect),
36440+ "r" (__PCIBIOS_DS));
36441 /*
36442 * Zero-extend the result beyond 8 bits, do not trust the
36443 * BIOS having done it:
36444@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36445 *value &= 0xff;
36446 break;
36447 case 2:
36448- __asm__("lcall *(%%esi); cld\n\t"
36449+ __asm__("movw %w6, %%ds\n\t"
36450+ "lcall *%%ss:(%%esi); cld\n\t"
36451+ "push %%ss\n\t"
36452+ "pop %%ds\n\t"
36453 "jc 1f\n\t"
36454 "xor %%ah, %%ah\n"
36455 "1:"
36456@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36457 : "1" (PCIBIOS_READ_CONFIG_WORD),
36458 "b" (bx),
36459 "D" ((long)reg),
36460- "S" (&pci_indirect));
36461+ "S" (&pci_indirect),
36462+ "r" (__PCIBIOS_DS));
36463 /*
36464 * Zero-extend the result beyond 16 bits, do not trust the
36465 * BIOS having done it:
36466@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36467 *value &= 0xffff;
36468 break;
36469 case 4:
36470- __asm__("lcall *(%%esi); cld\n\t"
36471+ __asm__("movw %w6, %%ds\n\t"
36472+ "lcall *%%ss:(%%esi); cld\n\t"
36473+ "push %%ss\n\t"
36474+ "pop %%ds\n\t"
36475 "jc 1f\n\t"
36476 "xor %%ah, %%ah\n"
36477 "1:"
36478@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36479 : "1" (PCIBIOS_READ_CONFIG_DWORD),
36480 "b" (bx),
36481 "D" ((long)reg),
36482- "S" (&pci_indirect));
36483+ "S" (&pci_indirect),
36484+ "r" (__PCIBIOS_DS));
36485 break;
36486 }
36487
36488@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
36489
36490 switch (len) {
36491 case 1:
36492- __asm__("lcall *(%%esi); cld\n\t"
36493+ __asm__("movw %w6, %%ds\n\t"
36494+ "lcall *%%ss:(%%esi); cld\n\t"
36495+ "push %%ss\n\t"
36496+ "pop %%ds\n\t"
36497 "jc 1f\n\t"
36498 "xor %%ah, %%ah\n"
36499 "1:"
36500@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
36501 "c" (value),
36502 "b" (bx),
36503 "D" ((long)reg),
36504- "S" (&pci_indirect));
36505+ "S" (&pci_indirect),
36506+ "r" (__PCIBIOS_DS));
36507 break;
36508 case 2:
36509- __asm__("lcall *(%%esi); cld\n\t"
36510+ __asm__("movw %w6, %%ds\n\t"
36511+ "lcall *%%ss:(%%esi); cld\n\t"
36512+ "push %%ss\n\t"
36513+ "pop %%ds\n\t"
36514 "jc 1f\n\t"
36515 "xor %%ah, %%ah\n"
36516 "1:"
36517@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
36518 "c" (value),
36519 "b" (bx),
36520 "D" ((long)reg),
36521- "S" (&pci_indirect));
36522+ "S" (&pci_indirect),
36523+ "r" (__PCIBIOS_DS));
36524 break;
36525 case 4:
36526- __asm__("lcall *(%%esi); cld\n\t"
36527+ __asm__("movw %w6, %%ds\n\t"
36528+ "lcall *%%ss:(%%esi); cld\n\t"
36529+ "push %%ss\n\t"
36530+ "pop %%ds\n\t"
36531 "jc 1f\n\t"
36532 "xor %%ah, %%ah\n"
36533 "1:"
36534@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
36535 "c" (value),
36536 "b" (bx),
36537 "D" ((long)reg),
36538- "S" (&pci_indirect));
36539+ "S" (&pci_indirect),
36540+ "r" (__PCIBIOS_DS));
36541 break;
36542 }
36543
36544@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
36545
36546 DBG("PCI: Fetching IRQ routing table... ");
36547 __asm__("push %%es\n\t"
36548+ "movw %w8, %%ds\n\t"
36549 "push %%ds\n\t"
36550 "pop %%es\n\t"
36551- "lcall *(%%esi); cld\n\t"
36552+ "lcall *%%ss:(%%esi); cld\n\t"
36553 "pop %%es\n\t"
36554+ "push %%ss\n\t"
36555+ "pop %%ds\n"
36556 "jc 1f\n\t"
36557 "xor %%ah, %%ah\n"
36558 "1:"
36559@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
36560 "1" (0),
36561 "D" ((long) &opt),
36562 "S" (&pci_indirect),
36563- "m" (opt)
36564+ "m" (opt),
36565+ "r" (__PCIBIOS_DS)
36566 : "memory");
36567 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
36568 if (ret & 0xff00)
36569@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
36570 {
36571 int ret;
36572
36573- __asm__("lcall *(%%esi); cld\n\t"
36574+ __asm__("movw %w5, %%ds\n\t"
36575+ "lcall *%%ss:(%%esi); cld\n\t"
36576+ "push %%ss\n\t"
36577+ "pop %%ds\n"
36578 "jc 1f\n\t"
36579 "xor %%ah, %%ah\n"
36580 "1:"
36581@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
36582 : "0" (PCIBIOS_SET_PCI_HW_INT),
36583 "b" ((dev->bus->number << 8) | dev->devfn),
36584 "c" ((irq << 8) | (pin + 10)),
36585- "S" (&pci_indirect));
36586+ "S" (&pci_indirect),
36587+ "r" (__PCIBIOS_DS));
36588 return !(ret & 0xff00);
36589 }
36590 EXPORT_SYMBOL(pcibios_set_irq_routing);
36591diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
36592index 9ee3491..872192f 100644
36593--- a/arch/x86/platform/efi/efi_32.c
36594+++ b/arch/x86/platform/efi/efi_32.c
36595@@ -59,11 +59,22 @@ void efi_call_phys_prelog(void)
36596 {
36597 struct desc_ptr gdt_descr;
36598
36599+#ifdef CONFIG_PAX_KERNEXEC
36600+ struct desc_struct d;
36601+#endif
36602+
36603 local_irq_save(efi_rt_eflags);
36604
36605 load_cr3(initial_page_table);
36606 __flush_tlb_all();
36607
36608+#ifdef CONFIG_PAX_KERNEXEC
36609+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
36610+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
36611+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
36612+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
36613+#endif
36614+
36615 gdt_descr.address = __pa(get_cpu_gdt_table(0));
36616 gdt_descr.size = GDT_SIZE - 1;
36617 load_gdt(&gdt_descr);
36618@@ -73,11 +84,24 @@ void efi_call_phys_epilog(void)
36619 {
36620 struct desc_ptr gdt_descr;
36621
36622+#ifdef CONFIG_PAX_KERNEXEC
36623+ struct desc_struct d;
36624+
36625+ memset(&d, 0, sizeof d);
36626+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
36627+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
36628+#endif
36629+
36630 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
36631 gdt_descr.size = GDT_SIZE - 1;
36632 load_gdt(&gdt_descr);
36633
36634+#ifdef CONFIG_PAX_PER_CPU_PGD
36635+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
36636+#else
36637 load_cr3(swapper_pg_dir);
36638+#endif
36639+
36640 __flush_tlb_all();
36641
36642 local_irq_restore(efi_rt_eflags);
36643diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
36644index 290d397..3906bcd 100644
36645--- a/arch/x86/platform/efi/efi_64.c
36646+++ b/arch/x86/platform/efi/efi_64.c
36647@@ -99,6 +99,11 @@ void __init efi_call_phys_prelog(void)
36648 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
36649 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
36650 }
36651+
36652+#ifdef CONFIG_PAX_PER_CPU_PGD
36653+ load_cr3(swapper_pg_dir);
36654+#endif
36655+
36656 __flush_tlb_all();
36657 }
36658
36659@@ -116,6 +121,11 @@ void __init efi_call_phys_epilog(void)
36660 for (pgd = 0; pgd < n_pgds; pgd++)
36661 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
36662 kfree(save_pgd);
36663+
36664+#ifdef CONFIG_PAX_PER_CPU_PGD
36665+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
36666+#endif
36667+
36668 __flush_tlb_all();
36669 local_irq_restore(efi_flags);
36670 early_code_mapping_set_exec(0);
36671diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
36672index fbe66e6..eae5e38 100644
36673--- a/arch/x86/platform/efi/efi_stub_32.S
36674+++ b/arch/x86/platform/efi/efi_stub_32.S
36675@@ -6,7 +6,9 @@
36676 */
36677
36678 #include <linux/linkage.h>
36679+#include <linux/init.h>
36680 #include <asm/page_types.h>
36681+#include <asm/segment.h>
36682
36683 /*
36684 * efi_call_phys(void *, ...) is a function with variable parameters.
36685@@ -20,7 +22,7 @@
36686 * service functions will comply with gcc calling convention, too.
36687 */
36688
36689-.text
36690+__INIT
36691 ENTRY(efi_call_phys)
36692 /*
36693 * 0. The function can only be called in Linux kernel. So CS has been
36694@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
36695 * The mapping of lower virtual memory has been created in prelog and
36696 * epilog.
36697 */
36698- movl $1f, %edx
36699- subl $__PAGE_OFFSET, %edx
36700- jmp *%edx
36701+#ifdef CONFIG_PAX_KERNEXEC
36702+ movl $(__KERNEXEC_EFI_DS), %edx
36703+ mov %edx, %ds
36704+ mov %edx, %es
36705+ mov %edx, %ss
36706+ addl $2f,(1f)
36707+ ljmp *(1f)
36708+
36709+__INITDATA
36710+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
36711+.previous
36712+
36713+2:
36714+ subl $2b,(1b)
36715+#else
36716+ jmp 1f-__PAGE_OFFSET
36717 1:
36718+#endif
36719
36720 /*
36721 * 2. Now on the top of stack is the return
36722@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
36723 * parameter 2, ..., param n. To make things easy, we save the return
36724 * address of efi_call_phys in a global variable.
36725 */
36726- popl %edx
36727- movl %edx, saved_return_addr
36728- /* get the function pointer into ECX*/
36729- popl %ecx
36730- movl %ecx, efi_rt_function_ptr
36731- movl $2f, %edx
36732- subl $__PAGE_OFFSET, %edx
36733- pushl %edx
36734+ popl (saved_return_addr)
36735+ popl (efi_rt_function_ptr)
36736
36737 /*
36738 * 3. Clear PG bit in %CR0.
36739@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
36740 /*
36741 * 5. Call the physical function.
36742 */
36743- jmp *%ecx
36744+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
36745
36746-2:
36747 /*
36748 * 6. After EFI runtime service returns, control will return to
36749 * following instruction. We'd better readjust stack pointer first.
36750@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
36751 movl %cr0, %edx
36752 orl $0x80000000, %edx
36753 movl %edx, %cr0
36754- jmp 1f
36755-1:
36756+
36757 /*
36758 * 8. Now restore the virtual mode from flat mode by
36759 * adding EIP with PAGE_OFFSET.
36760 */
36761- movl $1f, %edx
36762- jmp *%edx
36763+#ifdef CONFIG_PAX_KERNEXEC
36764+ movl $(__KERNEL_DS), %edx
36765+ mov %edx, %ds
36766+ mov %edx, %es
36767+ mov %edx, %ss
36768+ ljmp $(__KERNEL_CS),$1f
36769+#else
36770+ jmp 1f+__PAGE_OFFSET
36771+#endif
36772 1:
36773
36774 /*
36775 * 9. Balance the stack. And because EAX contain the return value,
36776 * we'd better not clobber it.
36777 */
36778- leal efi_rt_function_ptr, %edx
36779- movl (%edx), %ecx
36780- pushl %ecx
36781+ pushl (efi_rt_function_ptr)
36782
36783 /*
36784- * 10. Push the saved return address onto the stack and return.
36785+ * 10. Return to the saved return address.
36786 */
36787- leal saved_return_addr, %edx
36788- movl (%edx), %ecx
36789- pushl %ecx
36790- ret
36791+ jmpl *(saved_return_addr)
36792 ENDPROC(efi_call_phys)
36793 .previous
36794
36795-.data
36796+__INITDATA
36797 saved_return_addr:
36798 .long 0
36799 efi_rt_function_ptr:
36800diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
36801index 5fcda72..cd4dc41 100644
36802--- a/arch/x86/platform/efi/efi_stub_64.S
36803+++ b/arch/x86/platform/efi/efi_stub_64.S
36804@@ -11,6 +11,7 @@
36805 #include <asm/msr.h>
36806 #include <asm/processor-flags.h>
36807 #include <asm/page_types.h>
36808+#include <asm/alternative-asm.h>
36809
36810 #define SAVE_XMM \
36811 mov %rsp, %rax; \
36812@@ -88,6 +89,7 @@ ENTRY(efi_call)
36813 RESTORE_PGT
36814 addq $48, %rsp
36815 RESTORE_XMM
36816+ pax_force_retaddr 0, 1
36817 ret
36818 ENDPROC(efi_call)
36819
36820@@ -245,8 +247,8 @@ efi_gdt64:
36821 .long 0 /* Filled out by user */
36822 .word 0
36823 .quad 0x0000000000000000 /* NULL descriptor */
36824- .quad 0x00af9a000000ffff /* __KERNEL_CS */
36825- .quad 0x00cf92000000ffff /* __KERNEL_DS */
36826+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
36827+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
36828 .quad 0x0080890000000000 /* TS descriptor */
36829 .quad 0x0000000000000000 /* TS continued */
36830 efi_gdt64_end:
36831diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
36832index 1bbedc4..eb795b5 100644
36833--- a/arch/x86/platform/intel-mid/intel-mid.c
36834+++ b/arch/x86/platform/intel-mid/intel-mid.c
36835@@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
36836 {
36837 };
36838
36839-static void intel_mid_reboot(void)
36840+static void __noreturn intel_mid_reboot(void)
36841 {
36842 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
36843+ BUG();
36844 }
36845
36846 static unsigned long __init intel_mid_calibrate_tsc(void)
36847diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
36848index d6ee929..3637cb5 100644
36849--- a/arch/x86/platform/olpc/olpc_dt.c
36850+++ b/arch/x86/platform/olpc/olpc_dt.c
36851@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
36852 return res;
36853 }
36854
36855-static struct of_pdt_ops prom_olpc_ops __initdata = {
36856+static struct of_pdt_ops prom_olpc_ops __initconst = {
36857 .nextprop = olpc_dt_nextprop,
36858 .getproplen = olpc_dt_getproplen,
36859 .getproperty = olpc_dt_getproperty,
36860diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
36861index 424f4c9..f2a2988 100644
36862--- a/arch/x86/power/cpu.c
36863+++ b/arch/x86/power/cpu.c
36864@@ -137,11 +137,8 @@ static void do_fpu_end(void)
36865 static void fix_processor_context(void)
36866 {
36867 int cpu = smp_processor_id();
36868- struct tss_struct *t = &per_cpu(init_tss, cpu);
36869-#ifdef CONFIG_X86_64
36870- struct desc_struct *desc = get_cpu_gdt_table(cpu);
36871- tss_desc tss;
36872-#endif
36873+ struct tss_struct *t = init_tss + cpu;
36874+
36875 set_tss_desc(cpu, t); /*
36876 * This just modifies memory; should not be
36877 * necessary. But... This is necessary, because
36878@@ -150,10 +147,6 @@ static void fix_processor_context(void)
36879 */
36880
36881 #ifdef CONFIG_X86_64
36882- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
36883- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
36884- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
36885-
36886 syscall_init(); /* This sets MSR_*STAR and related */
36887 #endif
36888 load_TR_desc(); /* This does ltr */
36889diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
36890index bad628a..a102610 100644
36891--- a/arch/x86/realmode/init.c
36892+++ b/arch/x86/realmode/init.c
36893@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
36894 __va(real_mode_header->trampoline_header);
36895
36896 #ifdef CONFIG_X86_32
36897- trampoline_header->start = __pa_symbol(startup_32_smp);
36898+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
36899+
36900+#ifdef CONFIG_PAX_KERNEXEC
36901+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
36902+#endif
36903+
36904+ trampoline_header->boot_cs = __BOOT_CS;
36905 trampoline_header->gdt_limit = __BOOT_DS + 7;
36906 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
36907 #else
36908@@ -84,7 +90,7 @@ void __init setup_real_mode(void)
36909 *trampoline_cr4_features = read_cr4();
36910
36911 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
36912- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
36913+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
36914 trampoline_pgd[511] = init_level4_pgt[511].pgd;
36915 #endif
36916 }
36917diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
36918index 7c0d7be..d24dc88 100644
36919--- a/arch/x86/realmode/rm/Makefile
36920+++ b/arch/x86/realmode/rm/Makefile
36921@@ -67,5 +67,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
36922
36923 KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
36924 -I$(srctree)/arch/x86/boot
36925+ifdef CONSTIFY_PLUGIN
36926+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
36927+endif
36928 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
36929 GCOV_PROFILE := n
36930diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
36931index a28221d..93c40f1 100644
36932--- a/arch/x86/realmode/rm/header.S
36933+++ b/arch/x86/realmode/rm/header.S
36934@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
36935 #endif
36936 /* APM/BIOS reboot */
36937 .long pa_machine_real_restart_asm
36938-#ifdef CONFIG_X86_64
36939+#ifdef CONFIG_X86_32
36940+ .long __KERNEL_CS
36941+#else
36942 .long __KERNEL32_CS
36943 #endif
36944 END(real_mode_header)
36945diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
36946index 48ddd76..c26749f 100644
36947--- a/arch/x86/realmode/rm/trampoline_32.S
36948+++ b/arch/x86/realmode/rm/trampoline_32.S
36949@@ -24,6 +24,12 @@
36950 #include <asm/page_types.h>
36951 #include "realmode.h"
36952
36953+#ifdef CONFIG_PAX_KERNEXEC
36954+#define ta(X) (X)
36955+#else
36956+#define ta(X) (pa_ ## X)
36957+#endif
36958+
36959 .text
36960 .code16
36961
36962@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
36963
36964 cli # We should be safe anyway
36965
36966- movl tr_start, %eax # where we need to go
36967-
36968 movl $0xA5A5A5A5, trampoline_status
36969 # write marker for master knows we're running
36970
36971@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
36972 movw $1, %dx # protected mode (PE) bit
36973 lmsw %dx # into protected mode
36974
36975- ljmpl $__BOOT_CS, $pa_startup_32
36976+ ljmpl *(trampoline_header)
36977
36978 .section ".text32","ax"
36979 .code32
36980@@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
36981 .balign 8
36982 GLOBAL(trampoline_header)
36983 tr_start: .space 4
36984- tr_gdt_pad: .space 2
36985+ tr_boot_cs: .space 2
36986 tr_gdt: .space 6
36987 END(trampoline_header)
36988
36989diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
36990index dac7b20..72dbaca 100644
36991--- a/arch/x86/realmode/rm/trampoline_64.S
36992+++ b/arch/x86/realmode/rm/trampoline_64.S
36993@@ -93,6 +93,7 @@ ENTRY(startup_32)
36994 movl %edx, %gs
36995
36996 movl pa_tr_cr4, %eax
36997+ andl $~X86_CR4_PCIDE, %eax
36998 movl %eax, %cr4 # Enable PAE mode
36999
37000 # Setup trampoline 4 level pagetables
37001@@ -106,7 +107,7 @@ ENTRY(startup_32)
37002 wrmsr
37003
37004 # Enable paging and in turn activate Long Mode
37005- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
37006+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
37007 movl %eax, %cr0
37008
37009 /*
37010diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
37011index 9e7e147..25a4158 100644
37012--- a/arch/x86/realmode/rm/wakeup_asm.S
37013+++ b/arch/x86/realmode/rm/wakeup_asm.S
37014@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
37015 lgdtl pmode_gdt
37016
37017 /* This really couldn't... */
37018- movl pmode_entry, %eax
37019 movl pmode_cr0, %ecx
37020 movl %ecx, %cr0
37021- ljmpl $__KERNEL_CS, $pa_startup_32
37022- /* -> jmp *%eax in trampoline_32.S */
37023+
37024+ ljmpl *pmode_entry
37025 #else
37026 jmp trampoline_start
37027 #endif
37028diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
37029index 604a37e..e49702a 100644
37030--- a/arch/x86/tools/Makefile
37031+++ b/arch/x86/tools/Makefile
37032@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
37033
37034 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
37035
37036-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
37037+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
37038 hostprogs-y += relocs
37039 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
37040 PHONY += relocs
37041diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
37042index bbb1d22..e505211 100644
37043--- a/arch/x86/tools/relocs.c
37044+++ b/arch/x86/tools/relocs.c
37045@@ -1,5 +1,7 @@
37046 /* This is included from relocs_32/64.c */
37047
37048+#include "../../../include/generated/autoconf.h"
37049+
37050 #define ElfW(type) _ElfW(ELF_BITS, type)
37051 #define _ElfW(bits, type) __ElfW(bits, type)
37052 #define __ElfW(bits, type) Elf##bits##_##type
37053@@ -11,6 +13,7 @@
37054 #define Elf_Sym ElfW(Sym)
37055
37056 static Elf_Ehdr ehdr;
37057+static Elf_Phdr *phdr;
37058
37059 struct relocs {
37060 uint32_t *offset;
37061@@ -383,9 +386,39 @@ static void read_ehdr(FILE *fp)
37062 }
37063 }
37064
37065+static void read_phdrs(FILE *fp)
37066+{
37067+ unsigned int i;
37068+
37069+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
37070+ if (!phdr) {
37071+ die("Unable to allocate %d program headers\n",
37072+ ehdr.e_phnum);
37073+ }
37074+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
37075+ die("Seek to %d failed: %s\n",
37076+ ehdr.e_phoff, strerror(errno));
37077+ }
37078+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
37079+ die("Cannot read ELF program headers: %s\n",
37080+ strerror(errno));
37081+ }
37082+ for(i = 0; i < ehdr.e_phnum; i++) {
37083+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
37084+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
37085+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
37086+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
37087+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
37088+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
37089+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
37090+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
37091+ }
37092+
37093+}
37094+
37095 static void read_shdrs(FILE *fp)
37096 {
37097- int i;
37098+ unsigned int i;
37099 Elf_Shdr shdr;
37100
37101 secs = calloc(ehdr.e_shnum, sizeof(struct section));
37102@@ -420,7 +453,7 @@ static void read_shdrs(FILE *fp)
37103
37104 static void read_strtabs(FILE *fp)
37105 {
37106- int i;
37107+ unsigned int i;
37108 for (i = 0; i < ehdr.e_shnum; i++) {
37109 struct section *sec = &secs[i];
37110 if (sec->shdr.sh_type != SHT_STRTAB) {
37111@@ -445,7 +478,7 @@ static void read_strtabs(FILE *fp)
37112
37113 static void read_symtabs(FILE *fp)
37114 {
37115- int i,j;
37116+ unsigned int i,j;
37117 for (i = 0; i < ehdr.e_shnum; i++) {
37118 struct section *sec = &secs[i];
37119 if (sec->shdr.sh_type != SHT_SYMTAB) {
37120@@ -476,9 +509,11 @@ static void read_symtabs(FILE *fp)
37121 }
37122
37123
37124-static void read_relocs(FILE *fp)
37125+static void read_relocs(FILE *fp, int use_real_mode)
37126 {
37127- int i,j;
37128+ unsigned int i,j;
37129+ uint32_t base;
37130+
37131 for (i = 0; i < ehdr.e_shnum; i++) {
37132 struct section *sec = &secs[i];
37133 if (sec->shdr.sh_type != SHT_REL_TYPE) {
37134@@ -498,9 +533,22 @@ static void read_relocs(FILE *fp)
37135 die("Cannot read symbol table: %s\n",
37136 strerror(errno));
37137 }
37138+ base = 0;
37139+
37140+#ifdef CONFIG_X86_32
37141+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
37142+ if (phdr[j].p_type != PT_LOAD )
37143+ continue;
37144+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
37145+ continue;
37146+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
37147+ break;
37148+ }
37149+#endif
37150+
37151 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
37152 Elf_Rel *rel = &sec->reltab[j];
37153- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
37154+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
37155 rel->r_info = elf_xword_to_cpu(rel->r_info);
37156 #if (SHT_REL_TYPE == SHT_RELA)
37157 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
37158@@ -512,7 +560,7 @@ static void read_relocs(FILE *fp)
37159
37160 static void print_absolute_symbols(void)
37161 {
37162- int i;
37163+ unsigned int i;
37164 const char *format;
37165
37166 if (ELF_BITS == 64)
37167@@ -525,7 +573,7 @@ static void print_absolute_symbols(void)
37168 for (i = 0; i < ehdr.e_shnum; i++) {
37169 struct section *sec = &secs[i];
37170 char *sym_strtab;
37171- int j;
37172+ unsigned int j;
37173
37174 if (sec->shdr.sh_type != SHT_SYMTAB) {
37175 continue;
37176@@ -552,7 +600,7 @@ static void print_absolute_symbols(void)
37177
37178 static void print_absolute_relocs(void)
37179 {
37180- int i, printed = 0;
37181+ unsigned int i, printed = 0;
37182 const char *format;
37183
37184 if (ELF_BITS == 64)
37185@@ -565,7 +613,7 @@ static void print_absolute_relocs(void)
37186 struct section *sec_applies, *sec_symtab;
37187 char *sym_strtab;
37188 Elf_Sym *sh_symtab;
37189- int j;
37190+ unsigned int j;
37191 if (sec->shdr.sh_type != SHT_REL_TYPE) {
37192 continue;
37193 }
37194@@ -642,13 +690,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
37195 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
37196 Elf_Sym *sym, const char *symname))
37197 {
37198- int i;
37199+ unsigned int i;
37200 /* Walk through the relocations */
37201 for (i = 0; i < ehdr.e_shnum; i++) {
37202 char *sym_strtab;
37203 Elf_Sym *sh_symtab;
37204 struct section *sec_applies, *sec_symtab;
37205- int j;
37206+ unsigned int j;
37207 struct section *sec = &secs[i];
37208
37209 if (sec->shdr.sh_type != SHT_REL_TYPE) {
37210@@ -822,6 +870,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
37211 {
37212 unsigned r_type = ELF32_R_TYPE(rel->r_info);
37213 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
37214+ char *sym_strtab = sec->link->link->strtab;
37215+
37216+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
37217+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
37218+ return 0;
37219+
37220+#ifdef CONFIG_PAX_KERNEXEC
37221+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
37222+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
37223+ return 0;
37224+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
37225+ return 0;
37226+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
37227+ return 0;
37228+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
37229+ return 0;
37230+#endif
37231
37232 switch (r_type) {
37233 case R_386_NONE:
37234@@ -960,7 +1025,7 @@ static int write32_as_text(uint32_t v, FILE *f)
37235
37236 static void emit_relocs(int as_text, int use_real_mode)
37237 {
37238- int i;
37239+ unsigned int i;
37240 int (*write_reloc)(uint32_t, FILE *) = write32;
37241 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
37242 const char *symname);
37243@@ -1060,10 +1125,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
37244 {
37245 regex_init(use_real_mode);
37246 read_ehdr(fp);
37247+ read_phdrs(fp);
37248 read_shdrs(fp);
37249 read_strtabs(fp);
37250 read_symtabs(fp);
37251- read_relocs(fp);
37252+ read_relocs(fp, use_real_mode);
37253 if (ELF_BITS == 64)
37254 percpu_init();
37255 if (show_absolute_syms) {
37256diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
37257index f40281e..92728c9 100644
37258--- a/arch/x86/um/mem_32.c
37259+++ b/arch/x86/um/mem_32.c
37260@@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
37261 gate_vma.vm_start = FIXADDR_USER_START;
37262 gate_vma.vm_end = FIXADDR_USER_END;
37263 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
37264- gate_vma.vm_page_prot = __P101;
37265+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
37266
37267 return 0;
37268 }
37269diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
37270index 80ffa5b..a33bd15 100644
37271--- a/arch/x86/um/tls_32.c
37272+++ b/arch/x86/um/tls_32.c
37273@@ -260,7 +260,7 @@ out:
37274 if (unlikely(task == current &&
37275 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
37276 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
37277- "without flushed TLS.", current->pid);
37278+ "without flushed TLS.", task_pid_nr(current));
37279 }
37280
37281 return 0;
37282diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
37283index 61b04fe..3134230 100644
37284--- a/arch/x86/vdso/Makefile
37285+++ b/arch/x86/vdso/Makefile
37286@@ -170,7 +170,7 @@ quiet_cmd_vdso = VDSO $@
37287 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
37288 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
37289
37290-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
37291+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
37292 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
37293 GCOV_PROFILE := n
37294
37295diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
37296index e4f7781..ab5ab26 100644
37297--- a/arch/x86/vdso/vdso32-setup.c
37298+++ b/arch/x86/vdso/vdso32-setup.c
37299@@ -14,6 +14,7 @@
37300 #include <asm/cpufeature.h>
37301 #include <asm/processor.h>
37302 #include <asm/vdso.h>
37303+#include <asm/mman.h>
37304
37305 #ifdef CONFIG_COMPAT_VDSO
37306 #define VDSO_DEFAULT 0
37307diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
37308index 5a5176d..e570acd 100644
37309--- a/arch/x86/vdso/vma.c
37310+++ b/arch/x86/vdso/vma.c
37311@@ -16,10 +16,9 @@
37312 #include <asm/vdso.h>
37313 #include <asm/page.h>
37314 #include <asm/hpet.h>
37315+#include <asm/mman.h>
37316
37317 #if defined(CONFIG_X86_64)
37318-unsigned int __read_mostly vdso64_enabled = 1;
37319-
37320 extern unsigned short vdso_sync_cpuid;
37321 #endif
37322
37323@@ -101,6 +100,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
37324 .pages = no_pages,
37325 };
37326
37327+#ifdef CONFIG_PAX_RANDMMAP
37328+ if (mm->pax_flags & MF_PAX_RANDMMAP)
37329+ calculate_addr = false;
37330+#endif
37331+
37332 if (calculate_addr) {
37333 addr = vdso_addr(current->mm->start_stack,
37334 image->sym_end_mapping);
37335@@ -110,13 +114,13 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
37336
37337 down_write(&mm->mmap_sem);
37338
37339- addr = get_unmapped_area(NULL, addr, image->sym_end_mapping, 0, 0);
37340+ addr = get_unmapped_area(NULL, addr, image->sym_end_mapping, 0, MAP_EXECUTABLE);
37341 if (IS_ERR_VALUE(addr)) {
37342 ret = addr;
37343 goto up_fail;
37344 }
37345
37346- current->mm->context.vdso = (void __user *)addr;
37347+ mm->context.vdso = addr;
37348
37349 /*
37350 * MAYWRITE to allow gdb to COW and set breakpoints
37351@@ -161,15 +165,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
37352 hpet_address >> PAGE_SHIFT,
37353 PAGE_SIZE,
37354 pgprot_noncached(PAGE_READONLY));
37355-
37356- if (ret)
37357- goto up_fail;
37358 }
37359 #endif
37360
37361 up_fail:
37362 if (ret)
37363- current->mm->context.vdso = NULL;
37364+ current->mm->context.vdso = 0;
37365
37366 up_write(&mm->mmap_sem);
37367 return ret;
37368@@ -189,8 +190,8 @@ static int load_vdso32(void)
37369
37370 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
37371 current_thread_info()->sysenter_return =
37372- current->mm->context.vdso +
37373- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
37374+ (void __force_user *)(current->mm->context.vdso +
37375+ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
37376
37377 return 0;
37378 }
37379@@ -199,9 +200,6 @@ static int load_vdso32(void)
37380 #ifdef CONFIG_X86_64
37381 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
37382 {
37383- if (!vdso64_enabled)
37384- return 0;
37385-
37386 return map_vdso(&vdso_image_64, true);
37387 }
37388
37389@@ -210,12 +208,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
37390 int uses_interp)
37391 {
37392 #ifdef CONFIG_X86_X32_ABI
37393- if (test_thread_flag(TIF_X32)) {
37394- if (!vdso64_enabled)
37395- return 0;
37396-
37397+ if (test_thread_flag(TIF_X32))
37398 return map_vdso(&vdso_image_x32, true);
37399- }
37400 #endif
37401
37402 return load_vdso32();
37403@@ -227,12 +221,3 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
37404 return load_vdso32();
37405 }
37406 #endif
37407-
37408-#ifdef CONFIG_X86_64
37409-static __init int vdso_setup(char *s)
37410-{
37411- vdso64_enabled = simple_strtoul(s, NULL, 0);
37412- return 0;
37413-}
37414-__setup("vdso=", vdso_setup);
37415-#endif
37416diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
37417index e88fda8..76ce7ce 100644
37418--- a/arch/x86/xen/Kconfig
37419+++ b/arch/x86/xen/Kconfig
37420@@ -9,6 +9,7 @@ config XEN
37421 select XEN_HAVE_PVMMU
37422 depends on X86_64 || (X86_32 && X86_PAE)
37423 depends on X86_TSC
37424+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
37425 help
37426 This is the Linux Xen port. Enabling this will allow the
37427 kernel to boot in a paravirtualized environment under the
37428diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
37429index ffb101e..98c0ecf 100644
37430--- a/arch/x86/xen/enlighten.c
37431+++ b/arch/x86/xen/enlighten.c
37432@@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
37433
37434 struct shared_info xen_dummy_shared_info;
37435
37436-void *xen_initial_gdt;
37437-
37438 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
37439 __read_mostly int xen_have_vector_callback;
37440 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
37441@@ -542,8 +540,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
37442 {
37443 unsigned long va = dtr->address;
37444 unsigned int size = dtr->size + 1;
37445- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
37446- unsigned long frames[pages];
37447+ unsigned long frames[65536 / PAGE_SIZE];
37448 int f;
37449
37450 /*
37451@@ -591,8 +588,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
37452 {
37453 unsigned long va = dtr->address;
37454 unsigned int size = dtr->size + 1;
37455- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
37456- unsigned long frames[pages];
37457+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
37458 int f;
37459
37460 /*
37461@@ -600,7 +596,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
37462 * 8-byte entries, or 16 4k pages..
37463 */
37464
37465- BUG_ON(size > 65536);
37466+ BUG_ON(size > GDT_SIZE);
37467 BUG_ON(va & ~PAGE_MASK);
37468
37469 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
37470@@ -989,7 +985,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
37471 return 0;
37472 }
37473
37474-static void set_xen_basic_apic_ops(void)
37475+static void __init set_xen_basic_apic_ops(void)
37476 {
37477 apic->read = xen_apic_read;
37478 apic->write = xen_apic_write;
37479@@ -1295,30 +1291,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
37480 #endif
37481 };
37482
37483-static void xen_reboot(int reason)
37484+static __noreturn void xen_reboot(int reason)
37485 {
37486 struct sched_shutdown r = { .reason = reason };
37487
37488- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
37489- BUG();
37490+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
37491+ BUG();
37492 }
37493
37494-static void xen_restart(char *msg)
37495+static __noreturn void xen_restart(char *msg)
37496 {
37497 xen_reboot(SHUTDOWN_reboot);
37498 }
37499
37500-static void xen_emergency_restart(void)
37501+static __noreturn void xen_emergency_restart(void)
37502 {
37503 xen_reboot(SHUTDOWN_reboot);
37504 }
37505
37506-static void xen_machine_halt(void)
37507+static __noreturn void xen_machine_halt(void)
37508 {
37509 xen_reboot(SHUTDOWN_poweroff);
37510 }
37511
37512-static void xen_machine_power_off(void)
37513+static __noreturn void xen_machine_power_off(void)
37514 {
37515 if (pm_power_off)
37516 pm_power_off();
37517@@ -1568,7 +1564,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
37518 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
37519
37520 /* Work out if we support NX */
37521- x86_configure_nx();
37522+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
37523+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
37524+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
37525+ unsigned l, h;
37526+
37527+ __supported_pte_mask |= _PAGE_NX;
37528+ rdmsr(MSR_EFER, l, h);
37529+ l |= EFER_NX;
37530+ wrmsr(MSR_EFER, l, h);
37531+ }
37532+#endif
37533
37534 /* Get mfn list */
37535 xen_build_dynamic_phys_to_machine();
37536@@ -1596,13 +1602,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
37537
37538 machine_ops = xen_machine_ops;
37539
37540- /*
37541- * The only reliable way to retain the initial address of the
37542- * percpu gdt_page is to remember it here, so we can go and
37543- * mark it RW later, when the initial percpu area is freed.
37544- */
37545- xen_initial_gdt = &per_cpu(gdt_page, 0);
37546-
37547 xen_smp_init();
37548
37549 #ifdef CONFIG_ACPI_NUMA
37550diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
37551index e8a1201..046c66c 100644
37552--- a/arch/x86/xen/mmu.c
37553+++ b/arch/x86/xen/mmu.c
37554@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
37555 return val;
37556 }
37557
37558-static pteval_t pte_pfn_to_mfn(pteval_t val)
37559+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
37560 {
37561 if (val & _PAGE_PRESENT) {
37562 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
37563@@ -1904,6 +1904,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
37564 /* L3_k[510] -> level2_kernel_pgt
37565 * L3_i[511] -> level2_fixmap_pgt */
37566 convert_pfn_mfn(level3_kernel_pgt);
37567+ convert_pfn_mfn(level3_vmalloc_start_pgt);
37568+ convert_pfn_mfn(level3_vmalloc_end_pgt);
37569+ convert_pfn_mfn(level3_vmemmap_pgt);
37570 }
37571 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
37572 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
37573@@ -1933,8 +1936,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
37574 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
37575 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
37576 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
37577+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
37578+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
37579+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
37580 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
37581 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
37582+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
37583 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
37584 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
37585
37586@@ -2120,6 +2127,7 @@ static void __init xen_post_allocator_init(void)
37587 pv_mmu_ops.set_pud = xen_set_pud;
37588 #if PAGETABLE_LEVELS == 4
37589 pv_mmu_ops.set_pgd = xen_set_pgd;
37590+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
37591 #endif
37592
37593 /* This will work as long as patching hasn't happened yet
37594@@ -2198,6 +2206,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
37595 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
37596 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
37597 .set_pgd = xen_set_pgd_hyper,
37598+ .set_pgd_batched = xen_set_pgd_hyper,
37599
37600 .alloc_pud = xen_alloc_pmd_init,
37601 .release_pud = xen_release_pmd_init,
37602diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
37603index 7005974..54fb05f 100644
37604--- a/arch/x86/xen/smp.c
37605+++ b/arch/x86/xen/smp.c
37606@@ -283,17 +283,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
37607
37608 if (xen_pv_domain()) {
37609 if (!xen_feature(XENFEAT_writable_page_tables))
37610- /* We've switched to the "real" per-cpu gdt, so make
37611- * sure the old memory can be recycled. */
37612- make_lowmem_page_readwrite(xen_initial_gdt);
37613-
37614 #ifdef CONFIG_X86_32
37615 /*
37616 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
37617 * expects __USER_DS
37618 */
37619- loadsegment(ds, __USER_DS);
37620- loadsegment(es, __USER_DS);
37621+ loadsegment(ds, __KERNEL_DS);
37622+ loadsegment(es, __KERNEL_DS);
37623 #endif
37624
37625 xen_filter_cpu_maps();
37626@@ -372,7 +368,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
37627 #ifdef CONFIG_X86_32
37628 /* Note: PVH is not yet supported on x86_32. */
37629 ctxt->user_regs.fs = __KERNEL_PERCPU;
37630- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
37631+ savesegment(gs, ctxt->user_regs.gs);
37632 #endif
37633 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
37634
37635@@ -381,8 +377,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
37636 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
37637 ctxt->flags = VGCF_IN_KERNEL;
37638 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
37639- ctxt->user_regs.ds = __USER_DS;
37640- ctxt->user_regs.es = __USER_DS;
37641+ ctxt->user_regs.ds = __KERNEL_DS;
37642+ ctxt->user_regs.es = __KERNEL_DS;
37643 ctxt->user_regs.ss = __KERNEL_DS;
37644
37645 xen_copy_trap_info(ctxt->trap_ctxt);
37646@@ -437,14 +433,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
37647 int rc;
37648
37649 per_cpu(current_task, cpu) = idle;
37650+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
37651 #ifdef CONFIG_X86_32
37652 irq_ctx_init(cpu);
37653 #else
37654 clear_tsk_thread_flag(idle, TIF_FORK);
37655 #endif
37656- per_cpu(kernel_stack, cpu) =
37657- (unsigned long)task_stack_page(idle) -
37658- KERNEL_STACK_OFFSET + THREAD_SIZE;
37659+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
37660
37661 xen_setup_runstate_info(cpu);
37662 xen_setup_timer(cpu);
37663@@ -720,7 +715,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
37664
37665 void __init xen_smp_init(void)
37666 {
37667- smp_ops = xen_smp_ops;
37668+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
37669 xen_fill_possible_map();
37670 }
37671
37672diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
37673index fd92a64..1f72641 100644
37674--- a/arch/x86/xen/xen-asm_32.S
37675+++ b/arch/x86/xen/xen-asm_32.S
37676@@ -99,7 +99,7 @@ ENTRY(xen_iret)
37677 pushw %fs
37678 movl $(__KERNEL_PERCPU), %eax
37679 movl %eax, %fs
37680- movl %fs:xen_vcpu, %eax
37681+ mov PER_CPU_VAR(xen_vcpu), %eax
37682 POP_FS
37683 #else
37684 movl %ss:xen_vcpu, %eax
37685diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
37686index 485b695..fda3e7c 100644
37687--- a/arch/x86/xen/xen-head.S
37688+++ b/arch/x86/xen/xen-head.S
37689@@ -39,6 +39,17 @@ ENTRY(startup_xen)
37690 #ifdef CONFIG_X86_32
37691 mov %esi,xen_start_info
37692 mov $init_thread_union+THREAD_SIZE,%esp
37693+#ifdef CONFIG_SMP
37694+ movl $cpu_gdt_table,%edi
37695+ movl $__per_cpu_load,%eax
37696+ movw %ax,__KERNEL_PERCPU + 2(%edi)
37697+ rorl $16,%eax
37698+ movb %al,__KERNEL_PERCPU + 4(%edi)
37699+ movb %ah,__KERNEL_PERCPU + 7(%edi)
37700+ movl $__per_cpu_end - 1,%eax
37701+ subl $__per_cpu_start,%eax
37702+ movw %ax,__KERNEL_PERCPU + 0(%edi)
37703+#endif
37704 #else
37705 mov %rsi,xen_start_info
37706 mov $init_thread_union+THREAD_SIZE,%rsp
37707diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
37708index 97d8765..c4526ec 100644
37709--- a/arch/x86/xen/xen-ops.h
37710+++ b/arch/x86/xen/xen-ops.h
37711@@ -10,8 +10,6 @@
37712 extern const char xen_hypervisor_callback[];
37713 extern const char xen_failsafe_callback[];
37714
37715-extern void *xen_initial_gdt;
37716-
37717 struct trap_info;
37718 void xen_copy_trap_info(struct trap_info *traps);
37719
37720diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
37721index 525bd3d..ef888b1 100644
37722--- a/arch/xtensa/variants/dc232b/include/variant/core.h
37723+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
37724@@ -119,9 +119,9 @@
37725 ----------------------------------------------------------------------*/
37726
37727 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
37728-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
37729 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
37730 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
37731+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
37732
37733 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
37734 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
37735diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
37736index 2f33760..835e50a 100644
37737--- a/arch/xtensa/variants/fsf/include/variant/core.h
37738+++ b/arch/xtensa/variants/fsf/include/variant/core.h
37739@@ -11,6 +11,7 @@
37740 #ifndef _XTENSA_CORE_H
37741 #define _XTENSA_CORE_H
37742
37743+#include <linux/const.h>
37744
37745 /****************************************************************************
37746 Parameters Useful for Any Code, USER or PRIVILEGED
37747@@ -112,9 +113,9 @@
37748 ----------------------------------------------------------------------*/
37749
37750 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
37751-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
37752 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
37753 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
37754+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
37755
37756 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
37757 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
37758diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
37759index af00795..2bb8105 100644
37760--- a/arch/xtensa/variants/s6000/include/variant/core.h
37761+++ b/arch/xtensa/variants/s6000/include/variant/core.h
37762@@ -11,6 +11,7 @@
37763 #ifndef _XTENSA_CORE_CONFIGURATION_H
37764 #define _XTENSA_CORE_CONFIGURATION_H
37765
37766+#include <linux/const.h>
37767
37768 /****************************************************************************
37769 Parameters Useful for Any Code, USER or PRIVILEGED
37770@@ -118,9 +119,9 @@
37771 ----------------------------------------------------------------------*/
37772
37773 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
37774-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
37775 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
37776 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
37777+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
37778
37779 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
37780 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
37781diff --git a/block/bio.c b/block/bio.c
37782index 0ec61c9..93b94060 100644
37783--- a/block/bio.c
37784+++ b/block/bio.c
37785@@ -1159,7 +1159,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
37786 /*
37787 * Overflow, abort
37788 */
37789- if (end < start)
37790+ if (end < start || end - start > INT_MAX - nr_pages)
37791 return ERR_PTR(-EINVAL);
37792
37793 nr_pages += end - start;
37794@@ -1293,7 +1293,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
37795 /*
37796 * Overflow, abort
37797 */
37798- if (end < start)
37799+ if (end < start || end - start > INT_MAX - nr_pages)
37800 return ERR_PTR(-EINVAL);
37801
37802 nr_pages += end - start;
37803@@ -1555,7 +1555,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
37804 const int read = bio_data_dir(bio) == READ;
37805 struct bio_map_data *bmd = bio->bi_private;
37806 int i;
37807- char *p = bmd->sgvecs[0].iov_base;
37808+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
37809
37810 bio_for_each_segment_all(bvec, bio, i) {
37811 char *addr = page_address(bvec->bv_page);
37812diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
37813index 28d227c..d4c0bad 100644
37814--- a/block/blk-cgroup.c
37815+++ b/block/blk-cgroup.c
37816@@ -822,7 +822,7 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
37817 static struct cgroup_subsys_state *
37818 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
37819 {
37820- static atomic64_t id_seq = ATOMIC64_INIT(0);
37821+ static atomic64_unchecked_t id_seq = ATOMIC64_INIT(0);
37822 struct blkcg *blkcg;
37823
37824 if (!parent_css) {
37825@@ -836,7 +836,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
37826
37827 blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
37828 blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
37829- blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
37830+ blkcg->id = atomic64_inc_return_unchecked(&id_seq); /* root is 0, start from 1 */
37831 done:
37832 spin_lock_init(&blkcg->lock);
37833 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
37834diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
37835index 0736729..2ec3b48 100644
37836--- a/block/blk-iopoll.c
37837+++ b/block/blk-iopoll.c
37838@@ -74,7 +74,7 @@ void blk_iopoll_complete(struct blk_iopoll *iop)
37839 }
37840 EXPORT_SYMBOL(blk_iopoll_complete);
37841
37842-static void blk_iopoll_softirq(struct softirq_action *h)
37843+static __latent_entropy void blk_iopoll_softirq(void)
37844 {
37845 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
37846 int rearm = 0, budget = blk_iopoll_budget;
37847diff --git a/block/blk-map.c b/block/blk-map.c
37848index f890d43..97b0482 100644
37849--- a/block/blk-map.c
37850+++ b/block/blk-map.c
37851@@ -300,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
37852 if (!len || !kbuf)
37853 return -EINVAL;
37854
37855- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
37856+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
37857 if (do_copy)
37858 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
37859 else
37860diff --git a/block/blk-softirq.c b/block/blk-softirq.c
37861index 53b1737..08177d2e 100644
37862--- a/block/blk-softirq.c
37863+++ b/block/blk-softirq.c
37864@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
37865 * Softirq action handler - move entries to local list and loop over them
37866 * while passing them to the queue registered handler.
37867 */
37868-static void blk_done_softirq(struct softirq_action *h)
37869+static __latent_entropy void blk_done_softirq(void)
37870 {
37871 struct list_head *cpu_list, local_list;
37872
37873diff --git a/block/bsg.c b/block/bsg.c
37874index ff46add..c4ba8ee 100644
37875--- a/block/bsg.c
37876+++ b/block/bsg.c
37877@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
37878 struct sg_io_v4 *hdr, struct bsg_device *bd,
37879 fmode_t has_write_perm)
37880 {
37881+ unsigned char tmpcmd[sizeof(rq->__cmd)];
37882+ unsigned char *cmdptr;
37883+
37884 if (hdr->request_len > BLK_MAX_CDB) {
37885 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
37886 if (!rq->cmd)
37887 return -ENOMEM;
37888- }
37889+ cmdptr = rq->cmd;
37890+ } else
37891+ cmdptr = tmpcmd;
37892
37893- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
37894+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
37895 hdr->request_len))
37896 return -EFAULT;
37897
37898+ if (cmdptr != rq->cmd)
37899+ memcpy(rq->cmd, cmdptr, hdr->request_len);
37900+
37901 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
37902 if (blk_verify_command(rq->cmd, has_write_perm))
37903 return -EPERM;
37904diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
37905index a0926a6..b2b14b2 100644
37906--- a/block/compat_ioctl.c
37907+++ b/block/compat_ioctl.c
37908@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
37909 cgc = compat_alloc_user_space(sizeof(*cgc));
37910 cgc32 = compat_ptr(arg);
37911
37912- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
37913+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
37914 get_user(data, &cgc32->buffer) ||
37915 put_user(compat_ptr(data), &cgc->buffer) ||
37916 copy_in_user(&cgc->buflen, &cgc32->buflen,
37917@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
37918 err |= __get_user(f->spec1, &uf->spec1);
37919 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
37920 err |= __get_user(name, &uf->name);
37921- f->name = compat_ptr(name);
37922+ f->name = (void __force_kernel *)compat_ptr(name);
37923 if (err) {
37924 err = -EFAULT;
37925 goto out;
37926diff --git a/block/genhd.c b/block/genhd.c
37927index 791f419..89f21c4 100644
37928--- a/block/genhd.c
37929+++ b/block/genhd.c
37930@@ -467,21 +467,24 @@ static char *bdevt_str(dev_t devt, char *buf)
37931
37932 /*
37933 * Register device numbers dev..(dev+range-1)
37934- * range must be nonzero
37935+ * Noop if @range is zero.
37936 * The hash chain is sorted on range, so that subranges can override.
37937 */
37938 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
37939 struct kobject *(*probe)(dev_t, int *, void *),
37940 int (*lock)(dev_t, void *), void *data)
37941 {
37942- kobj_map(bdev_map, devt, range, module, probe, lock, data);
37943+ if (range)
37944+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
37945 }
37946
37947 EXPORT_SYMBOL(blk_register_region);
37948
37949+/* undo blk_register_region(), noop if @range is zero */
37950 void blk_unregister_region(dev_t devt, unsigned long range)
37951 {
37952- kobj_unmap(bdev_map, devt, range);
37953+ if (range)
37954+ kobj_unmap(bdev_map, devt, range);
37955 }
37956
37957 EXPORT_SYMBOL(blk_unregister_region);
37958diff --git a/block/partitions/efi.c b/block/partitions/efi.c
37959index dc51f46..d5446a8 100644
37960--- a/block/partitions/efi.c
37961+++ b/block/partitions/efi.c
37962@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
37963 if (!gpt)
37964 return NULL;
37965
37966+ if (!le32_to_cpu(gpt->num_partition_entries))
37967+ return NULL;
37968+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
37969+ if (!pte)
37970+ return NULL;
37971+
37972 count = le32_to_cpu(gpt->num_partition_entries) *
37973 le32_to_cpu(gpt->sizeof_partition_entry);
37974- if (!count)
37975- return NULL;
37976- pte = kmalloc(count, GFP_KERNEL);
37977- if (!pte)
37978- return NULL;
37979-
37980 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
37981 (u8 *) pte, count) < count) {
37982 kfree(pte);
37983diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
37984index 14695c6..27a4636 100644
37985--- a/block/scsi_ioctl.c
37986+++ b/block/scsi_ioctl.c
37987@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
37988 return put_user(0, p);
37989 }
37990
37991-static int sg_get_timeout(struct request_queue *q)
37992+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
37993 {
37994 return jiffies_to_clock_t(q->sg_timeout);
37995 }
37996@@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
37997 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
37998 struct sg_io_hdr *hdr, fmode_t mode)
37999 {
38000- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
38001+ unsigned char tmpcmd[sizeof(rq->__cmd)];
38002+ unsigned char *cmdptr;
38003+
38004+ if (rq->cmd != rq->__cmd)
38005+ cmdptr = rq->cmd;
38006+ else
38007+ cmdptr = tmpcmd;
38008+
38009+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
38010 return -EFAULT;
38011+
38012+ if (cmdptr != rq->cmd)
38013+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
38014+
38015 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
38016 return -EPERM;
38017
38018@@ -413,6 +425,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
38019 int err;
38020 unsigned int in_len, out_len, bytes, opcode, cmdlen;
38021 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
38022+ unsigned char tmpcmd[sizeof(rq->__cmd)];
38023+ unsigned char *cmdptr;
38024
38025 if (!sic)
38026 return -EINVAL;
38027@@ -446,9 +460,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
38028 */
38029 err = -EFAULT;
38030 rq->cmd_len = cmdlen;
38031- if (copy_from_user(rq->cmd, sic->data, cmdlen))
38032+
38033+ if (rq->cmd != rq->__cmd)
38034+ cmdptr = rq->cmd;
38035+ else
38036+ cmdptr = tmpcmd;
38037+
38038+ if (copy_from_user(cmdptr, sic->data, cmdlen))
38039 goto error;
38040
38041+ if (rq->cmd != cmdptr)
38042+ memcpy(rq->cmd, cmdptr, cmdlen);
38043+
38044 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
38045 goto error;
38046
38047diff --git a/crypto/cryptd.c b/crypto/cryptd.c
38048index 7bdd61b..afec999 100644
38049--- a/crypto/cryptd.c
38050+++ b/crypto/cryptd.c
38051@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
38052
38053 struct cryptd_blkcipher_request_ctx {
38054 crypto_completion_t complete;
38055-};
38056+} __no_const;
38057
38058 struct cryptd_hash_ctx {
38059 struct crypto_shash *child;
38060@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
38061
38062 struct cryptd_aead_request_ctx {
38063 crypto_completion_t complete;
38064-};
38065+} __no_const;
38066
38067 static void cryptd_queue_worker(struct work_struct *work);
38068
38069diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
38070index 309d345..1632720 100644
38071--- a/crypto/pcrypt.c
38072+++ b/crypto/pcrypt.c
38073@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
38074 int ret;
38075
38076 pinst->kobj.kset = pcrypt_kset;
38077- ret = kobject_add(&pinst->kobj, NULL, name);
38078+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
38079 if (!ret)
38080 kobject_uevent(&pinst->kobj, KOBJ_ADD);
38081
38082diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
38083index 6921c7f..78e1af7 100644
38084--- a/drivers/acpi/acpica/hwxfsleep.c
38085+++ b/drivers/acpi/acpica/hwxfsleep.c
38086@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
38087 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
38088
38089 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
38090- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
38091- acpi_hw_extended_sleep},
38092- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
38093- acpi_hw_extended_wake_prep},
38094- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
38095+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
38096+ .extended_function = acpi_hw_extended_sleep},
38097+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
38098+ .extended_function = acpi_hw_extended_wake_prep},
38099+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
38100+ .extended_function = acpi_hw_extended_wake}
38101 };
38102
38103 /*
38104diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
38105index e5bcd91..74f050d 100644
38106--- a/drivers/acpi/apei/apei-internal.h
38107+++ b/drivers/acpi/apei/apei-internal.h
38108@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
38109 struct apei_exec_ins_type {
38110 u32 flags;
38111 apei_exec_ins_func_t run;
38112-};
38113+} __do_const;
38114
38115 struct apei_exec_context {
38116 u32 ip;
38117diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
38118index dab7cb7..f0d2994 100644
38119--- a/drivers/acpi/apei/ghes.c
38120+++ b/drivers/acpi/apei/ghes.c
38121@@ -500,7 +500,7 @@ static void __ghes_print_estatus(const char *pfx,
38122 const struct acpi_hest_generic *generic,
38123 const struct acpi_generic_status *estatus)
38124 {
38125- static atomic_t seqno;
38126+ static atomic_unchecked_t seqno;
38127 unsigned int curr_seqno;
38128 char pfx_seq[64];
38129
38130@@ -511,7 +511,7 @@ static void __ghes_print_estatus(const char *pfx,
38131 else
38132 pfx = KERN_ERR;
38133 }
38134- curr_seqno = atomic_inc_return(&seqno);
38135+ curr_seqno = atomic_inc_return_unchecked(&seqno);
38136 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
38137 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
38138 pfx_seq, generic->header.source_id);
38139diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
38140index a83e3c6..c3d617f 100644
38141--- a/drivers/acpi/bgrt.c
38142+++ b/drivers/acpi/bgrt.c
38143@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
38144 if (!bgrt_image)
38145 return -ENODEV;
38146
38147- bin_attr_image.private = bgrt_image;
38148- bin_attr_image.size = bgrt_image_size;
38149+ pax_open_kernel();
38150+ *(void **)&bin_attr_image.private = bgrt_image;
38151+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
38152+ pax_close_kernel();
38153
38154 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
38155 if (!bgrt_kobj)
38156diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
38157index 3d8413d..95f638c 100644
38158--- a/drivers/acpi/blacklist.c
38159+++ b/drivers/acpi/blacklist.c
38160@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
38161 u32 is_critical_error;
38162 };
38163
38164-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
38165+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
38166
38167 /*
38168 * POLICY: If *anything* doesn't work, put it on the blacklist.
38169@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
38170 return 0;
38171 }
38172
38173-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
38174+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
38175 {
38176 .callback = dmi_disable_osi_vista,
38177 .ident = "Fujitsu Siemens",
38178diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
38179index c68e724..e863008 100644
38180--- a/drivers/acpi/custom_method.c
38181+++ b/drivers/acpi/custom_method.c
38182@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
38183 struct acpi_table_header table;
38184 acpi_status status;
38185
38186+#ifdef CONFIG_GRKERNSEC_KMEM
38187+ return -EPERM;
38188+#endif
38189+
38190 if (!(*ppos)) {
38191 /* parse the table header to get the table length */
38192 if (count <= sizeof(struct acpi_table_header))
38193diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
38194index 3dca36d..abaf070 100644
38195--- a/drivers/acpi/processor_idle.c
38196+++ b/drivers/acpi/processor_idle.c
38197@@ -952,7 +952,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
38198 {
38199 int i, count = CPUIDLE_DRIVER_STATE_START;
38200 struct acpi_processor_cx *cx;
38201- struct cpuidle_state *state;
38202+ cpuidle_state_no_const *state;
38203 struct cpuidle_driver *drv = &acpi_idle_driver;
38204
38205 if (!pr->flags.power_setup_done)
38206diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
38207index 38cb978..352c761 100644
38208--- a/drivers/acpi/sysfs.c
38209+++ b/drivers/acpi/sysfs.c
38210@@ -423,11 +423,11 @@ static u32 num_counters;
38211 static struct attribute **all_attrs;
38212 static u32 acpi_gpe_count;
38213
38214-static struct attribute_group interrupt_stats_attr_group = {
38215+static attribute_group_no_const interrupt_stats_attr_group = {
38216 .name = "interrupts",
38217 };
38218
38219-static struct kobj_attribute *counter_attrs;
38220+static kobj_attribute_no_const *counter_attrs;
38221
38222 static void delete_gpe_attr_array(void)
38223 {
38224diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
38225index d72ce04..d6ab3c2 100644
38226--- a/drivers/ata/libahci.c
38227+++ b/drivers/ata/libahci.c
38228@@ -1257,7 +1257,7 @@ int ahci_kick_engine(struct ata_port *ap)
38229 }
38230 EXPORT_SYMBOL_GPL(ahci_kick_engine);
38231
38232-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
38233+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
38234 struct ata_taskfile *tf, int is_cmd, u16 flags,
38235 unsigned long timeout_msec)
38236 {
38237diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
38238index 677c0c1..354b89b 100644
38239--- a/drivers/ata/libata-core.c
38240+++ b/drivers/ata/libata-core.c
38241@@ -98,7 +98,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
38242 static void ata_dev_xfermask(struct ata_device *dev);
38243 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
38244
38245-atomic_t ata_print_id = ATOMIC_INIT(0);
38246+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
38247
38248 struct ata_force_param {
38249 const char *name;
38250@@ -4863,7 +4863,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
38251 struct ata_port *ap;
38252 unsigned int tag;
38253
38254- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
38255+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
38256 ap = qc->ap;
38257
38258 qc->flags = 0;
38259@@ -4879,7 +4879,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
38260 struct ata_port *ap;
38261 struct ata_link *link;
38262
38263- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
38264+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
38265 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
38266 ap = qc->ap;
38267 link = qc->dev->link;
38268@@ -5983,6 +5983,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
38269 return;
38270
38271 spin_lock(&lock);
38272+ pax_open_kernel();
38273
38274 for (cur = ops->inherits; cur; cur = cur->inherits) {
38275 void **inherit = (void **)cur;
38276@@ -5996,8 +5997,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
38277 if (IS_ERR(*pp))
38278 *pp = NULL;
38279
38280- ops->inherits = NULL;
38281+ *(struct ata_port_operations **)&ops->inherits = NULL;
38282
38283+ pax_close_kernel();
38284 spin_unlock(&lock);
38285 }
38286
38287@@ -6193,7 +6195,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
38288
38289 /* give ports names and add SCSI hosts */
38290 for (i = 0; i < host->n_ports; i++) {
38291- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
38292+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
38293 host->ports[i]->local_port_no = i + 1;
38294 }
38295
38296diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
38297index 72691fd..ad104c0 100644
38298--- a/drivers/ata/libata-scsi.c
38299+++ b/drivers/ata/libata-scsi.c
38300@@ -4151,7 +4151,7 @@ int ata_sas_port_init(struct ata_port *ap)
38301
38302 if (rc)
38303 return rc;
38304- ap->print_id = atomic_inc_return(&ata_print_id);
38305+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
38306 return 0;
38307 }
38308 EXPORT_SYMBOL_GPL(ata_sas_port_init);
38309diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
38310index 45b5ab3..98446b8 100644
38311--- a/drivers/ata/libata.h
38312+++ b/drivers/ata/libata.h
38313@@ -53,7 +53,7 @@ enum {
38314 ATA_DNXFER_QUIET = (1 << 31),
38315 };
38316
38317-extern atomic_t ata_print_id;
38318+extern atomic_unchecked_t ata_print_id;
38319 extern int atapi_passthru16;
38320 extern int libata_fua;
38321 extern int libata_noacpi;
38322diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
38323index 4edb1a8..84e1658 100644
38324--- a/drivers/ata/pata_arasan_cf.c
38325+++ b/drivers/ata/pata_arasan_cf.c
38326@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
38327 /* Handle platform specific quirks */
38328 if (quirk) {
38329 if (quirk & CF_BROKEN_PIO) {
38330- ap->ops->set_piomode = NULL;
38331+ pax_open_kernel();
38332+ *(void **)&ap->ops->set_piomode = NULL;
38333+ pax_close_kernel();
38334 ap->pio_mask = 0;
38335 }
38336 if (quirk & CF_BROKEN_MWDMA)
38337diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
38338index f9b983a..887b9d8 100644
38339--- a/drivers/atm/adummy.c
38340+++ b/drivers/atm/adummy.c
38341@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
38342 vcc->pop(vcc, skb);
38343 else
38344 dev_kfree_skb_any(skb);
38345- atomic_inc(&vcc->stats->tx);
38346+ atomic_inc_unchecked(&vcc->stats->tx);
38347
38348 return 0;
38349 }
38350diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
38351index f1a9198..f466a4a 100644
38352--- a/drivers/atm/ambassador.c
38353+++ b/drivers/atm/ambassador.c
38354@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
38355 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
38356
38357 // VC layer stats
38358- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
38359+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
38360
38361 // free the descriptor
38362 kfree (tx_descr);
38363@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
38364 dump_skb ("<<<", vc, skb);
38365
38366 // VC layer stats
38367- atomic_inc(&atm_vcc->stats->rx);
38368+ atomic_inc_unchecked(&atm_vcc->stats->rx);
38369 __net_timestamp(skb);
38370 // end of our responsibility
38371 atm_vcc->push (atm_vcc, skb);
38372@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
38373 } else {
38374 PRINTK (KERN_INFO, "dropped over-size frame");
38375 // should we count this?
38376- atomic_inc(&atm_vcc->stats->rx_drop);
38377+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
38378 }
38379
38380 } else {
38381@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
38382 }
38383
38384 if (check_area (skb->data, skb->len)) {
38385- atomic_inc(&atm_vcc->stats->tx_err);
38386+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
38387 return -ENOMEM; // ?
38388 }
38389
38390diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
38391index 0e3f8f9..765a7a5 100644
38392--- a/drivers/atm/atmtcp.c
38393+++ b/drivers/atm/atmtcp.c
38394@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
38395 if (vcc->pop) vcc->pop(vcc,skb);
38396 else dev_kfree_skb(skb);
38397 if (dev_data) return 0;
38398- atomic_inc(&vcc->stats->tx_err);
38399+ atomic_inc_unchecked(&vcc->stats->tx_err);
38400 return -ENOLINK;
38401 }
38402 size = skb->len+sizeof(struct atmtcp_hdr);
38403@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
38404 if (!new_skb) {
38405 if (vcc->pop) vcc->pop(vcc,skb);
38406 else dev_kfree_skb(skb);
38407- atomic_inc(&vcc->stats->tx_err);
38408+ atomic_inc_unchecked(&vcc->stats->tx_err);
38409 return -ENOBUFS;
38410 }
38411 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
38412@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
38413 if (vcc->pop) vcc->pop(vcc,skb);
38414 else dev_kfree_skb(skb);
38415 out_vcc->push(out_vcc,new_skb);
38416- atomic_inc(&vcc->stats->tx);
38417- atomic_inc(&out_vcc->stats->rx);
38418+ atomic_inc_unchecked(&vcc->stats->tx);
38419+ atomic_inc_unchecked(&out_vcc->stats->rx);
38420 return 0;
38421 }
38422
38423@@ -299,7 +299,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
38424 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
38425 read_unlock(&vcc_sklist_lock);
38426 if (!out_vcc) {
38427- atomic_inc(&vcc->stats->tx_err);
38428+ atomic_inc_unchecked(&vcc->stats->tx_err);
38429 goto done;
38430 }
38431 skb_pull(skb,sizeof(struct atmtcp_hdr));
38432@@ -311,8 +311,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
38433 __net_timestamp(new_skb);
38434 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
38435 out_vcc->push(out_vcc,new_skb);
38436- atomic_inc(&vcc->stats->tx);
38437- atomic_inc(&out_vcc->stats->rx);
38438+ atomic_inc_unchecked(&vcc->stats->tx);
38439+ atomic_inc_unchecked(&out_vcc->stats->rx);
38440 done:
38441 if (vcc->pop) vcc->pop(vcc,skb);
38442 else dev_kfree_skb(skb);
38443diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
38444index b1955ba..b179940 100644
38445--- a/drivers/atm/eni.c
38446+++ b/drivers/atm/eni.c
38447@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
38448 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
38449 vcc->dev->number);
38450 length = 0;
38451- atomic_inc(&vcc->stats->rx_err);
38452+ atomic_inc_unchecked(&vcc->stats->rx_err);
38453 }
38454 else {
38455 length = ATM_CELL_SIZE-1; /* no HEC */
38456@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
38457 size);
38458 }
38459 eff = length = 0;
38460- atomic_inc(&vcc->stats->rx_err);
38461+ atomic_inc_unchecked(&vcc->stats->rx_err);
38462 }
38463 else {
38464 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
38465@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
38466 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
38467 vcc->dev->number,vcc->vci,length,size << 2,descr);
38468 length = eff = 0;
38469- atomic_inc(&vcc->stats->rx_err);
38470+ atomic_inc_unchecked(&vcc->stats->rx_err);
38471 }
38472 }
38473 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
38474@@ -767,7 +767,7 @@ rx_dequeued++;
38475 vcc->push(vcc,skb);
38476 pushed++;
38477 }
38478- atomic_inc(&vcc->stats->rx);
38479+ atomic_inc_unchecked(&vcc->stats->rx);
38480 }
38481 wake_up(&eni_dev->rx_wait);
38482 }
38483@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
38484 PCI_DMA_TODEVICE);
38485 if (vcc->pop) vcc->pop(vcc,skb);
38486 else dev_kfree_skb_irq(skb);
38487- atomic_inc(&vcc->stats->tx);
38488+ atomic_inc_unchecked(&vcc->stats->tx);
38489 wake_up(&eni_dev->tx_wait);
38490 dma_complete++;
38491 }
38492diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
38493index 82f2ae0..f205c02 100644
38494--- a/drivers/atm/firestream.c
38495+++ b/drivers/atm/firestream.c
38496@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
38497 }
38498 }
38499
38500- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
38501+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
38502
38503 fs_dprintk (FS_DEBUG_TXMEM, "i");
38504 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
38505@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
38506 #endif
38507 skb_put (skb, qe->p1 & 0xffff);
38508 ATM_SKB(skb)->vcc = atm_vcc;
38509- atomic_inc(&atm_vcc->stats->rx);
38510+ atomic_inc_unchecked(&atm_vcc->stats->rx);
38511 __net_timestamp(skb);
38512 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
38513 atm_vcc->push (atm_vcc, skb);
38514@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
38515 kfree (pe);
38516 }
38517 if (atm_vcc)
38518- atomic_inc(&atm_vcc->stats->rx_drop);
38519+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
38520 break;
38521 case 0x1f: /* Reassembly abort: no buffers. */
38522 /* Silently increment error counter. */
38523 if (atm_vcc)
38524- atomic_inc(&atm_vcc->stats->rx_drop);
38525+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
38526 break;
38527 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
38528 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
38529diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
38530index d4725fc..2d4ea65 100644
38531--- a/drivers/atm/fore200e.c
38532+++ b/drivers/atm/fore200e.c
38533@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
38534 #endif
38535 /* check error condition */
38536 if (*entry->status & STATUS_ERROR)
38537- atomic_inc(&vcc->stats->tx_err);
38538+ atomic_inc_unchecked(&vcc->stats->tx_err);
38539 else
38540- atomic_inc(&vcc->stats->tx);
38541+ atomic_inc_unchecked(&vcc->stats->tx);
38542 }
38543 }
38544
38545@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
38546 if (skb == NULL) {
38547 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
38548
38549- atomic_inc(&vcc->stats->rx_drop);
38550+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38551 return -ENOMEM;
38552 }
38553
38554@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
38555
38556 dev_kfree_skb_any(skb);
38557
38558- atomic_inc(&vcc->stats->rx_drop);
38559+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38560 return -ENOMEM;
38561 }
38562
38563 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
38564
38565 vcc->push(vcc, skb);
38566- atomic_inc(&vcc->stats->rx);
38567+ atomic_inc_unchecked(&vcc->stats->rx);
38568
38569 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
38570
38571@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
38572 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
38573 fore200e->atm_dev->number,
38574 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
38575- atomic_inc(&vcc->stats->rx_err);
38576+ atomic_inc_unchecked(&vcc->stats->rx_err);
38577 }
38578 }
38579
38580@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
38581 goto retry_here;
38582 }
38583
38584- atomic_inc(&vcc->stats->tx_err);
38585+ atomic_inc_unchecked(&vcc->stats->tx_err);
38586
38587 fore200e->tx_sat++;
38588 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
38589diff --git a/drivers/atm/he.c b/drivers/atm/he.c
38590index aa6be26..f70a785 100644
38591--- a/drivers/atm/he.c
38592+++ b/drivers/atm/he.c
38593@@ -1690,7 +1690,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
38594
38595 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
38596 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
38597- atomic_inc(&vcc->stats->rx_drop);
38598+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38599 goto return_host_buffers;
38600 }
38601
38602@@ -1717,7 +1717,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
38603 RBRQ_LEN_ERR(he_dev->rbrq_head)
38604 ? "LEN_ERR" : "",
38605 vcc->vpi, vcc->vci);
38606- atomic_inc(&vcc->stats->rx_err);
38607+ atomic_inc_unchecked(&vcc->stats->rx_err);
38608 goto return_host_buffers;
38609 }
38610
38611@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
38612 vcc->push(vcc, skb);
38613 spin_lock(&he_dev->global_lock);
38614
38615- atomic_inc(&vcc->stats->rx);
38616+ atomic_inc_unchecked(&vcc->stats->rx);
38617
38618 return_host_buffers:
38619 ++pdus_assembled;
38620@@ -2095,7 +2095,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
38621 tpd->vcc->pop(tpd->vcc, tpd->skb);
38622 else
38623 dev_kfree_skb_any(tpd->skb);
38624- atomic_inc(&tpd->vcc->stats->tx_err);
38625+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
38626 }
38627 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
38628 return;
38629@@ -2507,7 +2507,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38630 vcc->pop(vcc, skb);
38631 else
38632 dev_kfree_skb_any(skb);
38633- atomic_inc(&vcc->stats->tx_err);
38634+ atomic_inc_unchecked(&vcc->stats->tx_err);
38635 return -EINVAL;
38636 }
38637
38638@@ -2518,7 +2518,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38639 vcc->pop(vcc, skb);
38640 else
38641 dev_kfree_skb_any(skb);
38642- atomic_inc(&vcc->stats->tx_err);
38643+ atomic_inc_unchecked(&vcc->stats->tx_err);
38644 return -EINVAL;
38645 }
38646 #endif
38647@@ -2530,7 +2530,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38648 vcc->pop(vcc, skb);
38649 else
38650 dev_kfree_skb_any(skb);
38651- atomic_inc(&vcc->stats->tx_err);
38652+ atomic_inc_unchecked(&vcc->stats->tx_err);
38653 spin_unlock_irqrestore(&he_dev->global_lock, flags);
38654 return -ENOMEM;
38655 }
38656@@ -2572,7 +2572,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38657 vcc->pop(vcc, skb);
38658 else
38659 dev_kfree_skb_any(skb);
38660- atomic_inc(&vcc->stats->tx_err);
38661+ atomic_inc_unchecked(&vcc->stats->tx_err);
38662 spin_unlock_irqrestore(&he_dev->global_lock, flags);
38663 return -ENOMEM;
38664 }
38665@@ -2603,7 +2603,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38666 __enqueue_tpd(he_dev, tpd, cid);
38667 spin_unlock_irqrestore(&he_dev->global_lock, flags);
38668
38669- atomic_inc(&vcc->stats->tx);
38670+ atomic_inc_unchecked(&vcc->stats->tx);
38671
38672 return 0;
38673 }
38674diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
38675index 1dc0519..1aadaf7 100644
38676--- a/drivers/atm/horizon.c
38677+++ b/drivers/atm/horizon.c
38678@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
38679 {
38680 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
38681 // VC layer stats
38682- atomic_inc(&vcc->stats->rx);
38683+ atomic_inc_unchecked(&vcc->stats->rx);
38684 __net_timestamp(skb);
38685 // end of our responsibility
38686 vcc->push (vcc, skb);
38687@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
38688 dev->tx_iovec = NULL;
38689
38690 // VC layer stats
38691- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
38692+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
38693
38694 // free the skb
38695 hrz_kfree_skb (skb);
38696diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
38697index b621f56..1e3a799 100644
38698--- a/drivers/atm/idt77252.c
38699+++ b/drivers/atm/idt77252.c
38700@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
38701 else
38702 dev_kfree_skb(skb);
38703
38704- atomic_inc(&vcc->stats->tx);
38705+ atomic_inc_unchecked(&vcc->stats->tx);
38706 }
38707
38708 atomic_dec(&scq->used);
38709@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38710 if ((sb = dev_alloc_skb(64)) == NULL) {
38711 printk("%s: Can't allocate buffers for aal0.\n",
38712 card->name);
38713- atomic_add(i, &vcc->stats->rx_drop);
38714+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
38715 break;
38716 }
38717 if (!atm_charge(vcc, sb->truesize)) {
38718 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
38719 card->name);
38720- atomic_add(i - 1, &vcc->stats->rx_drop);
38721+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
38722 dev_kfree_skb(sb);
38723 break;
38724 }
38725@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38726 ATM_SKB(sb)->vcc = vcc;
38727 __net_timestamp(sb);
38728 vcc->push(vcc, sb);
38729- atomic_inc(&vcc->stats->rx);
38730+ atomic_inc_unchecked(&vcc->stats->rx);
38731
38732 cell += ATM_CELL_PAYLOAD;
38733 }
38734@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38735 "(CDC: %08x)\n",
38736 card->name, len, rpp->len, readl(SAR_REG_CDC));
38737 recycle_rx_pool_skb(card, rpp);
38738- atomic_inc(&vcc->stats->rx_err);
38739+ atomic_inc_unchecked(&vcc->stats->rx_err);
38740 return;
38741 }
38742 if (stat & SAR_RSQE_CRC) {
38743 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
38744 recycle_rx_pool_skb(card, rpp);
38745- atomic_inc(&vcc->stats->rx_err);
38746+ atomic_inc_unchecked(&vcc->stats->rx_err);
38747 return;
38748 }
38749 if (skb_queue_len(&rpp->queue) > 1) {
38750@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38751 RXPRINTK("%s: Can't alloc RX skb.\n",
38752 card->name);
38753 recycle_rx_pool_skb(card, rpp);
38754- atomic_inc(&vcc->stats->rx_err);
38755+ atomic_inc_unchecked(&vcc->stats->rx_err);
38756 return;
38757 }
38758 if (!atm_charge(vcc, skb->truesize)) {
38759@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38760 __net_timestamp(skb);
38761
38762 vcc->push(vcc, skb);
38763- atomic_inc(&vcc->stats->rx);
38764+ atomic_inc_unchecked(&vcc->stats->rx);
38765
38766 return;
38767 }
38768@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38769 __net_timestamp(skb);
38770
38771 vcc->push(vcc, skb);
38772- atomic_inc(&vcc->stats->rx);
38773+ atomic_inc_unchecked(&vcc->stats->rx);
38774
38775 if (skb->truesize > SAR_FB_SIZE_3)
38776 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
38777@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
38778 if (vcc->qos.aal != ATM_AAL0) {
38779 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
38780 card->name, vpi, vci);
38781- atomic_inc(&vcc->stats->rx_drop);
38782+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38783 goto drop;
38784 }
38785
38786 if ((sb = dev_alloc_skb(64)) == NULL) {
38787 printk("%s: Can't allocate buffers for AAL0.\n",
38788 card->name);
38789- atomic_inc(&vcc->stats->rx_err);
38790+ atomic_inc_unchecked(&vcc->stats->rx_err);
38791 goto drop;
38792 }
38793
38794@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
38795 ATM_SKB(sb)->vcc = vcc;
38796 __net_timestamp(sb);
38797 vcc->push(vcc, sb);
38798- atomic_inc(&vcc->stats->rx);
38799+ atomic_inc_unchecked(&vcc->stats->rx);
38800
38801 drop:
38802 skb_pull(queue, 64);
38803@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
38804
38805 if (vc == NULL) {
38806 printk("%s: NULL connection in send().\n", card->name);
38807- atomic_inc(&vcc->stats->tx_err);
38808+ atomic_inc_unchecked(&vcc->stats->tx_err);
38809 dev_kfree_skb(skb);
38810 return -EINVAL;
38811 }
38812 if (!test_bit(VCF_TX, &vc->flags)) {
38813 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
38814- atomic_inc(&vcc->stats->tx_err);
38815+ atomic_inc_unchecked(&vcc->stats->tx_err);
38816 dev_kfree_skb(skb);
38817 return -EINVAL;
38818 }
38819@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
38820 break;
38821 default:
38822 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
38823- atomic_inc(&vcc->stats->tx_err);
38824+ atomic_inc_unchecked(&vcc->stats->tx_err);
38825 dev_kfree_skb(skb);
38826 return -EINVAL;
38827 }
38828
38829 if (skb_shinfo(skb)->nr_frags != 0) {
38830 printk("%s: No scatter-gather yet.\n", card->name);
38831- atomic_inc(&vcc->stats->tx_err);
38832+ atomic_inc_unchecked(&vcc->stats->tx_err);
38833 dev_kfree_skb(skb);
38834 return -EINVAL;
38835 }
38836@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
38837
38838 err = queue_skb(card, vc, skb, oam);
38839 if (err) {
38840- atomic_inc(&vcc->stats->tx_err);
38841+ atomic_inc_unchecked(&vcc->stats->tx_err);
38842 dev_kfree_skb(skb);
38843 return err;
38844 }
38845@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
38846 skb = dev_alloc_skb(64);
38847 if (!skb) {
38848 printk("%s: Out of memory in send_oam().\n", card->name);
38849- atomic_inc(&vcc->stats->tx_err);
38850+ atomic_inc_unchecked(&vcc->stats->tx_err);
38851 return -ENOMEM;
38852 }
38853 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
38854diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
38855index 4217f29..88f547a 100644
38856--- a/drivers/atm/iphase.c
38857+++ b/drivers/atm/iphase.c
38858@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
38859 status = (u_short) (buf_desc_ptr->desc_mode);
38860 if (status & (RX_CER | RX_PTE | RX_OFL))
38861 {
38862- atomic_inc(&vcc->stats->rx_err);
38863+ atomic_inc_unchecked(&vcc->stats->rx_err);
38864 IF_ERR(printk("IA: bad packet, dropping it");)
38865 if (status & RX_CER) {
38866 IF_ERR(printk(" cause: packet CRC error\n");)
38867@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
38868 len = dma_addr - buf_addr;
38869 if (len > iadev->rx_buf_sz) {
38870 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
38871- atomic_inc(&vcc->stats->rx_err);
38872+ atomic_inc_unchecked(&vcc->stats->rx_err);
38873 goto out_free_desc;
38874 }
38875
38876@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
38877 ia_vcc = INPH_IA_VCC(vcc);
38878 if (ia_vcc == NULL)
38879 {
38880- atomic_inc(&vcc->stats->rx_err);
38881+ atomic_inc_unchecked(&vcc->stats->rx_err);
38882 atm_return(vcc, skb->truesize);
38883 dev_kfree_skb_any(skb);
38884 goto INCR_DLE;
38885@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
38886 if ((length > iadev->rx_buf_sz) || (length >
38887 (skb->len - sizeof(struct cpcs_trailer))))
38888 {
38889- atomic_inc(&vcc->stats->rx_err);
38890+ atomic_inc_unchecked(&vcc->stats->rx_err);
38891 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
38892 length, skb->len);)
38893 atm_return(vcc, skb->truesize);
38894@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
38895
38896 IF_RX(printk("rx_dle_intr: skb push");)
38897 vcc->push(vcc,skb);
38898- atomic_inc(&vcc->stats->rx);
38899+ atomic_inc_unchecked(&vcc->stats->rx);
38900 iadev->rx_pkt_cnt++;
38901 }
38902 INCR_DLE:
38903@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
38904 {
38905 struct k_sonet_stats *stats;
38906 stats = &PRIV(_ia_dev[board])->sonet_stats;
38907- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
38908- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
38909- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
38910- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
38911- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
38912- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
38913- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
38914- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
38915- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
38916+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
38917+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
38918+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
38919+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
38920+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
38921+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
38922+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
38923+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
38924+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
38925 }
38926 ia_cmds.status = 0;
38927 break;
38928@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
38929 if ((desc == 0) || (desc > iadev->num_tx_desc))
38930 {
38931 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
38932- atomic_inc(&vcc->stats->tx);
38933+ atomic_inc_unchecked(&vcc->stats->tx);
38934 if (vcc->pop)
38935 vcc->pop(vcc, skb);
38936 else
38937@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
38938 ATM_DESC(skb) = vcc->vci;
38939 skb_queue_tail(&iadev->tx_dma_q, skb);
38940
38941- atomic_inc(&vcc->stats->tx);
38942+ atomic_inc_unchecked(&vcc->stats->tx);
38943 iadev->tx_pkt_cnt++;
38944 /* Increment transaction counter */
38945 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
38946
38947 #if 0
38948 /* add flow control logic */
38949- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
38950+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
38951 if (iavcc->vc_desc_cnt > 10) {
38952 vcc->tx_quota = vcc->tx_quota * 3 / 4;
38953 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
38954diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
38955index fa7d7019..1e404c7 100644
38956--- a/drivers/atm/lanai.c
38957+++ b/drivers/atm/lanai.c
38958@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
38959 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
38960 lanai_endtx(lanai, lvcc);
38961 lanai_free_skb(lvcc->tx.atmvcc, skb);
38962- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
38963+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
38964 }
38965
38966 /* Try to fill the buffer - don't call unless there is backlog */
38967@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
38968 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
38969 __net_timestamp(skb);
38970 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
38971- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
38972+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
38973 out:
38974 lvcc->rx.buf.ptr = end;
38975 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
38976@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38977 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
38978 "vcc %d\n", lanai->number, (unsigned int) s, vci);
38979 lanai->stats.service_rxnotaal5++;
38980- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38981+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38982 return 0;
38983 }
38984 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
38985@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38986 int bytes;
38987 read_unlock(&vcc_sklist_lock);
38988 DPRINTK("got trashed rx pdu on vci %d\n", vci);
38989- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38990+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38991 lvcc->stats.x.aal5.service_trash++;
38992 bytes = (SERVICE_GET_END(s) * 16) -
38993 (((unsigned long) lvcc->rx.buf.ptr) -
38994@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38995 }
38996 if (s & SERVICE_STREAM) {
38997 read_unlock(&vcc_sklist_lock);
38998- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38999+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
39000 lvcc->stats.x.aal5.service_stream++;
39001 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
39002 "PDU on VCI %d!\n", lanai->number, vci);
39003@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
39004 return 0;
39005 }
39006 DPRINTK("got rx crc error on vci %d\n", vci);
39007- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
39008+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
39009 lvcc->stats.x.aal5.service_rxcrc++;
39010 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
39011 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
39012diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
39013index 9988ac9..7c52585 100644
39014--- a/drivers/atm/nicstar.c
39015+++ b/drivers/atm/nicstar.c
39016@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
39017 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
39018 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
39019 card->index);
39020- atomic_inc(&vcc->stats->tx_err);
39021+ atomic_inc_unchecked(&vcc->stats->tx_err);
39022 dev_kfree_skb_any(skb);
39023 return -EINVAL;
39024 }
39025@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
39026 if (!vc->tx) {
39027 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
39028 card->index);
39029- atomic_inc(&vcc->stats->tx_err);
39030+ atomic_inc_unchecked(&vcc->stats->tx_err);
39031 dev_kfree_skb_any(skb);
39032 return -EINVAL;
39033 }
39034@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
39035 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
39036 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
39037 card->index);
39038- atomic_inc(&vcc->stats->tx_err);
39039+ atomic_inc_unchecked(&vcc->stats->tx_err);
39040 dev_kfree_skb_any(skb);
39041 return -EINVAL;
39042 }
39043
39044 if (skb_shinfo(skb)->nr_frags != 0) {
39045 printk("nicstar%d: No scatter-gather yet.\n", card->index);
39046- atomic_inc(&vcc->stats->tx_err);
39047+ atomic_inc_unchecked(&vcc->stats->tx_err);
39048 dev_kfree_skb_any(skb);
39049 return -EINVAL;
39050 }
39051@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
39052 }
39053
39054 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
39055- atomic_inc(&vcc->stats->tx_err);
39056+ atomic_inc_unchecked(&vcc->stats->tx_err);
39057 dev_kfree_skb_any(skb);
39058 return -EIO;
39059 }
39060- atomic_inc(&vcc->stats->tx);
39061+ atomic_inc_unchecked(&vcc->stats->tx);
39062
39063 return 0;
39064 }
39065@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39066 printk
39067 ("nicstar%d: Can't allocate buffers for aal0.\n",
39068 card->index);
39069- atomic_add(i, &vcc->stats->rx_drop);
39070+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
39071 break;
39072 }
39073 if (!atm_charge(vcc, sb->truesize)) {
39074 RXPRINTK
39075 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
39076 card->index);
39077- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
39078+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
39079 dev_kfree_skb_any(sb);
39080 break;
39081 }
39082@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39083 ATM_SKB(sb)->vcc = vcc;
39084 __net_timestamp(sb);
39085 vcc->push(vcc, sb);
39086- atomic_inc(&vcc->stats->rx);
39087+ atomic_inc_unchecked(&vcc->stats->rx);
39088 cell += ATM_CELL_PAYLOAD;
39089 }
39090
39091@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39092 if (iovb == NULL) {
39093 printk("nicstar%d: Out of iovec buffers.\n",
39094 card->index);
39095- atomic_inc(&vcc->stats->rx_drop);
39096+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39097 recycle_rx_buf(card, skb);
39098 return;
39099 }
39100@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39101 small or large buffer itself. */
39102 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
39103 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
39104- atomic_inc(&vcc->stats->rx_err);
39105+ atomic_inc_unchecked(&vcc->stats->rx_err);
39106 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
39107 NS_MAX_IOVECS);
39108 NS_PRV_IOVCNT(iovb) = 0;
39109@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39110 ("nicstar%d: Expected a small buffer, and this is not one.\n",
39111 card->index);
39112 which_list(card, skb);
39113- atomic_inc(&vcc->stats->rx_err);
39114+ atomic_inc_unchecked(&vcc->stats->rx_err);
39115 recycle_rx_buf(card, skb);
39116 vc->rx_iov = NULL;
39117 recycle_iov_buf(card, iovb);
39118@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39119 ("nicstar%d: Expected a large buffer, and this is not one.\n",
39120 card->index);
39121 which_list(card, skb);
39122- atomic_inc(&vcc->stats->rx_err);
39123+ atomic_inc_unchecked(&vcc->stats->rx_err);
39124 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
39125 NS_PRV_IOVCNT(iovb));
39126 vc->rx_iov = NULL;
39127@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39128 printk(" - PDU size mismatch.\n");
39129 else
39130 printk(".\n");
39131- atomic_inc(&vcc->stats->rx_err);
39132+ atomic_inc_unchecked(&vcc->stats->rx_err);
39133 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
39134 NS_PRV_IOVCNT(iovb));
39135 vc->rx_iov = NULL;
39136@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39137 /* skb points to a small buffer */
39138 if (!atm_charge(vcc, skb->truesize)) {
39139 push_rxbufs(card, skb);
39140- atomic_inc(&vcc->stats->rx_drop);
39141+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39142 } else {
39143 skb_put(skb, len);
39144 dequeue_sm_buf(card, skb);
39145@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39146 ATM_SKB(skb)->vcc = vcc;
39147 __net_timestamp(skb);
39148 vcc->push(vcc, skb);
39149- atomic_inc(&vcc->stats->rx);
39150+ atomic_inc_unchecked(&vcc->stats->rx);
39151 }
39152 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
39153 struct sk_buff *sb;
39154@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39155 if (len <= NS_SMBUFSIZE) {
39156 if (!atm_charge(vcc, sb->truesize)) {
39157 push_rxbufs(card, sb);
39158- atomic_inc(&vcc->stats->rx_drop);
39159+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39160 } else {
39161 skb_put(sb, len);
39162 dequeue_sm_buf(card, sb);
39163@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39164 ATM_SKB(sb)->vcc = vcc;
39165 __net_timestamp(sb);
39166 vcc->push(vcc, sb);
39167- atomic_inc(&vcc->stats->rx);
39168+ atomic_inc_unchecked(&vcc->stats->rx);
39169 }
39170
39171 push_rxbufs(card, skb);
39172@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39173
39174 if (!atm_charge(vcc, skb->truesize)) {
39175 push_rxbufs(card, skb);
39176- atomic_inc(&vcc->stats->rx_drop);
39177+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39178 } else {
39179 dequeue_lg_buf(card, skb);
39180 #ifdef NS_USE_DESTRUCTORS
39181@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39182 ATM_SKB(skb)->vcc = vcc;
39183 __net_timestamp(skb);
39184 vcc->push(vcc, skb);
39185- atomic_inc(&vcc->stats->rx);
39186+ atomic_inc_unchecked(&vcc->stats->rx);
39187 }
39188
39189 push_rxbufs(card, sb);
39190@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39191 printk
39192 ("nicstar%d: Out of huge buffers.\n",
39193 card->index);
39194- atomic_inc(&vcc->stats->rx_drop);
39195+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39196 recycle_iovec_rx_bufs(card,
39197 (struct iovec *)
39198 iovb->data,
39199@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39200 card->hbpool.count++;
39201 } else
39202 dev_kfree_skb_any(hb);
39203- atomic_inc(&vcc->stats->rx_drop);
39204+ atomic_inc_unchecked(&vcc->stats->rx_drop);
39205 } else {
39206 /* Copy the small buffer to the huge buffer */
39207 sb = (struct sk_buff *)iov->iov_base;
39208@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39209 #endif /* NS_USE_DESTRUCTORS */
39210 __net_timestamp(hb);
39211 vcc->push(vcc, hb);
39212- atomic_inc(&vcc->stats->rx);
39213+ atomic_inc_unchecked(&vcc->stats->rx);
39214 }
39215 }
39216
39217diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
39218index 943cf0d..37d15d5 100644
39219--- a/drivers/atm/solos-pci.c
39220+++ b/drivers/atm/solos-pci.c
39221@@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
39222 }
39223 atm_charge(vcc, skb->truesize);
39224 vcc->push(vcc, skb);
39225- atomic_inc(&vcc->stats->rx);
39226+ atomic_inc_unchecked(&vcc->stats->rx);
39227 break;
39228
39229 case PKT_STATUS:
39230@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
39231 vcc = SKB_CB(oldskb)->vcc;
39232
39233 if (vcc) {
39234- atomic_inc(&vcc->stats->tx);
39235+ atomic_inc_unchecked(&vcc->stats->tx);
39236 solos_pop(vcc, oldskb);
39237 } else {
39238 dev_kfree_skb_irq(oldskb);
39239diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
39240index 0215934..ce9f5b1 100644
39241--- a/drivers/atm/suni.c
39242+++ b/drivers/atm/suni.c
39243@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
39244
39245
39246 #define ADD_LIMITED(s,v) \
39247- atomic_add((v),&stats->s); \
39248- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
39249+ atomic_add_unchecked((v),&stats->s); \
39250+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
39251
39252
39253 static void suni_hz(unsigned long from_timer)
39254diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
39255index 5120a96..e2572bd 100644
39256--- a/drivers/atm/uPD98402.c
39257+++ b/drivers/atm/uPD98402.c
39258@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
39259 struct sonet_stats tmp;
39260 int error = 0;
39261
39262- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
39263+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
39264 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
39265 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
39266 if (zero && !error) {
39267@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
39268
39269
39270 #define ADD_LIMITED(s,v) \
39271- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
39272- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
39273- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
39274+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
39275+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
39276+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
39277
39278
39279 static void stat_event(struct atm_dev *dev)
39280@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
39281 if (reason & uPD98402_INT_PFM) stat_event(dev);
39282 if (reason & uPD98402_INT_PCO) {
39283 (void) GET(PCOCR); /* clear interrupt cause */
39284- atomic_add(GET(HECCT),
39285+ atomic_add_unchecked(GET(HECCT),
39286 &PRIV(dev)->sonet_stats.uncorr_hcs);
39287 }
39288 if ((reason & uPD98402_INT_RFO) &&
39289@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
39290 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
39291 uPD98402_INT_LOS),PIMR); /* enable them */
39292 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
39293- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
39294- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
39295- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
39296+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
39297+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
39298+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
39299 return 0;
39300 }
39301
39302diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
39303index 969c3c2..9b72956 100644
39304--- a/drivers/atm/zatm.c
39305+++ b/drivers/atm/zatm.c
39306@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
39307 }
39308 if (!size) {
39309 dev_kfree_skb_irq(skb);
39310- if (vcc) atomic_inc(&vcc->stats->rx_err);
39311+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
39312 continue;
39313 }
39314 if (!atm_charge(vcc,skb->truesize)) {
39315@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
39316 skb->len = size;
39317 ATM_SKB(skb)->vcc = vcc;
39318 vcc->push(vcc,skb);
39319- atomic_inc(&vcc->stats->rx);
39320+ atomic_inc_unchecked(&vcc->stats->rx);
39321 }
39322 zout(pos & 0xffff,MTA(mbx));
39323 #if 0 /* probably a stupid idea */
39324@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
39325 skb_queue_head(&zatm_vcc->backlog,skb);
39326 break;
39327 }
39328- atomic_inc(&vcc->stats->tx);
39329+ atomic_inc_unchecked(&vcc->stats->tx);
39330 wake_up(&zatm_vcc->tx_wait);
39331 }
39332
39333diff --git a/drivers/base/bus.c b/drivers/base/bus.c
39334index 83e910a..b224a73 100644
39335--- a/drivers/base/bus.c
39336+++ b/drivers/base/bus.c
39337@@ -1124,7 +1124,7 @@ int subsys_interface_register(struct subsys_interface *sif)
39338 return -EINVAL;
39339
39340 mutex_lock(&subsys->p->mutex);
39341- list_add_tail(&sif->node, &subsys->p->interfaces);
39342+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
39343 if (sif->add_dev) {
39344 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
39345 while ((dev = subsys_dev_iter_next(&iter)))
39346@@ -1149,7 +1149,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
39347 subsys = sif->subsys;
39348
39349 mutex_lock(&subsys->p->mutex);
39350- list_del_init(&sif->node);
39351+ pax_list_del_init((struct list_head *)&sif->node);
39352 if (sif->remove_dev) {
39353 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
39354 while ((dev = subsys_dev_iter_next(&iter)))
39355diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
39356index 25798db..15f130e 100644
39357--- a/drivers/base/devtmpfs.c
39358+++ b/drivers/base/devtmpfs.c
39359@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
39360 if (!thread)
39361 return 0;
39362
39363- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
39364+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
39365 if (err)
39366 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
39367 else
39368@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
39369 *err = sys_unshare(CLONE_NEWNS);
39370 if (*err)
39371 goto out;
39372- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
39373+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
39374 if (*err)
39375 goto out;
39376- sys_chdir("/.."); /* will traverse into overmounted root */
39377- sys_chroot(".");
39378+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
39379+ sys_chroot((char __force_user *)".");
39380 complete(&setup_done);
39381 while (1) {
39382 spin_lock(&req_lock);
39383diff --git a/drivers/base/node.c b/drivers/base/node.c
39384index 8f7ed99..700dd0c 100644
39385--- a/drivers/base/node.c
39386+++ b/drivers/base/node.c
39387@@ -624,7 +624,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
39388 struct node_attr {
39389 struct device_attribute attr;
39390 enum node_states state;
39391-};
39392+} __do_const;
39393
39394 static ssize_t show_node_state(struct device *dev,
39395 struct device_attribute *attr, char *buf)
39396diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
39397index eee55c1..b8c9393 100644
39398--- a/drivers/base/power/domain.c
39399+++ b/drivers/base/power/domain.c
39400@@ -1821,9 +1821,9 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
39401
39402 if (dev->power.subsys_data->domain_data) {
39403 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
39404- gpd_data->ops = (struct gpd_dev_ops){ NULL };
39405+ memset(&gpd_data->ops, 0, sizeof(gpd_data->ops));
39406 if (clear_td)
39407- gpd_data->td = (struct gpd_timing_data){ 0 };
39408+ memset(&gpd_data->td, 0, sizeof(gpd_data->td));
39409
39410 if (--gpd_data->refcount == 0) {
39411 dev->power.subsys_data->domain_data = NULL;
39412@@ -1862,7 +1862,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
39413 {
39414 struct cpuidle_driver *cpuidle_drv;
39415 struct gpd_cpu_data *cpu_data;
39416- struct cpuidle_state *idle_state;
39417+ cpuidle_state_no_const *idle_state;
39418 int ret = 0;
39419
39420 if (IS_ERR_OR_NULL(genpd) || state < 0)
39421@@ -1930,7 +1930,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
39422 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
39423 {
39424 struct gpd_cpu_data *cpu_data;
39425- struct cpuidle_state *idle_state;
39426+ cpuidle_state_no_const *idle_state;
39427 int ret = 0;
39428
39429 if (IS_ERR_OR_NULL(genpd))
39430diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
39431index 95b181d1..c4f0e19 100644
39432--- a/drivers/base/power/sysfs.c
39433+++ b/drivers/base/power/sysfs.c
39434@@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct device *dev,
39435 return -EIO;
39436 }
39437 }
39438- return sprintf(buf, p);
39439+ return sprintf(buf, "%s", p);
39440 }
39441
39442 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
39443diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
39444index eb1bd2e..2667d3a 100644
39445--- a/drivers/base/power/wakeup.c
39446+++ b/drivers/base/power/wakeup.c
39447@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
39448 * They need to be modified together atomically, so it's better to use one
39449 * atomic variable to hold them both.
39450 */
39451-static atomic_t combined_event_count = ATOMIC_INIT(0);
39452+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
39453
39454 #define IN_PROGRESS_BITS (sizeof(int) * 4)
39455 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
39456
39457 static void split_counters(unsigned int *cnt, unsigned int *inpr)
39458 {
39459- unsigned int comb = atomic_read(&combined_event_count);
39460+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
39461
39462 *cnt = (comb >> IN_PROGRESS_BITS);
39463 *inpr = comb & MAX_IN_PROGRESS;
39464@@ -401,7 +401,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
39465 ws->start_prevent_time = ws->last_time;
39466
39467 /* Increment the counter of events in progress. */
39468- cec = atomic_inc_return(&combined_event_count);
39469+ cec = atomic_inc_return_unchecked(&combined_event_count);
39470
39471 trace_wakeup_source_activate(ws->name, cec);
39472 }
39473@@ -527,7 +527,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
39474 * Increment the counter of registered wakeup events and decrement the
39475 * couter of wakeup events in progress simultaneously.
39476 */
39477- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
39478+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
39479 trace_wakeup_source_deactivate(ws->name, cec);
39480
39481 split_counters(&cnt, &inpr);
39482diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
39483index dbb8350..4762f4c 100644
39484--- a/drivers/base/syscore.c
39485+++ b/drivers/base/syscore.c
39486@@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
39487 void register_syscore_ops(struct syscore_ops *ops)
39488 {
39489 mutex_lock(&syscore_ops_lock);
39490- list_add_tail(&ops->node, &syscore_ops_list);
39491+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
39492 mutex_unlock(&syscore_ops_lock);
39493 }
39494 EXPORT_SYMBOL_GPL(register_syscore_ops);
39495@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
39496 void unregister_syscore_ops(struct syscore_ops *ops)
39497 {
39498 mutex_lock(&syscore_ops_lock);
39499- list_del(&ops->node);
39500+ pax_list_del((struct list_head *)&ops->node);
39501 mutex_unlock(&syscore_ops_lock);
39502 }
39503 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
39504diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
39505index 4595c22..d4f6c54 100644
39506--- a/drivers/block/cciss.c
39507+++ b/drivers/block/cciss.c
39508@@ -3011,7 +3011,7 @@ static void start_io(ctlr_info_t *h)
39509 while (!list_empty(&h->reqQ)) {
39510 c = list_entry(h->reqQ.next, CommandList_struct, list);
39511 /* can't do anything if fifo is full */
39512- if ((h->access.fifo_full(h))) {
39513+ if ((h->access->fifo_full(h))) {
39514 dev_warn(&h->pdev->dev, "fifo full\n");
39515 break;
39516 }
39517@@ -3021,7 +3021,7 @@ static void start_io(ctlr_info_t *h)
39518 h->Qdepth--;
39519
39520 /* Tell the controller execute command */
39521- h->access.submit_command(h, c);
39522+ h->access->submit_command(h, c);
39523
39524 /* Put job onto the completed Q */
39525 addQ(&h->cmpQ, c);
39526@@ -3447,17 +3447,17 @@ startio:
39527
39528 static inline unsigned long get_next_completion(ctlr_info_t *h)
39529 {
39530- return h->access.command_completed(h);
39531+ return h->access->command_completed(h);
39532 }
39533
39534 static inline int interrupt_pending(ctlr_info_t *h)
39535 {
39536- return h->access.intr_pending(h);
39537+ return h->access->intr_pending(h);
39538 }
39539
39540 static inline long interrupt_not_for_us(ctlr_info_t *h)
39541 {
39542- return ((h->access.intr_pending(h) == 0) ||
39543+ return ((h->access->intr_pending(h) == 0) ||
39544 (h->interrupts_enabled == 0));
39545 }
39546
39547@@ -3490,7 +3490,7 @@ static inline u32 next_command(ctlr_info_t *h)
39548 u32 a;
39549
39550 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
39551- return h->access.command_completed(h);
39552+ return h->access->command_completed(h);
39553
39554 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
39555 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
39556@@ -4047,7 +4047,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
39557 trans_support & CFGTBL_Trans_use_short_tags);
39558
39559 /* Change the access methods to the performant access methods */
39560- h->access = SA5_performant_access;
39561+ h->access = &SA5_performant_access;
39562 h->transMethod = CFGTBL_Trans_Performant;
39563
39564 return;
39565@@ -4321,7 +4321,7 @@ static int cciss_pci_init(ctlr_info_t *h)
39566 if (prod_index < 0)
39567 return -ENODEV;
39568 h->product_name = products[prod_index].product_name;
39569- h->access = *(products[prod_index].access);
39570+ h->access = products[prod_index].access;
39571
39572 if (cciss_board_disabled(h)) {
39573 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
39574@@ -5053,7 +5053,7 @@ reinit_after_soft_reset:
39575 }
39576
39577 /* make sure the board interrupts are off */
39578- h->access.set_intr_mask(h, CCISS_INTR_OFF);
39579+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
39580 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
39581 if (rc)
39582 goto clean2;
39583@@ -5103,7 +5103,7 @@ reinit_after_soft_reset:
39584 * fake ones to scoop up any residual completions.
39585 */
39586 spin_lock_irqsave(&h->lock, flags);
39587- h->access.set_intr_mask(h, CCISS_INTR_OFF);
39588+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
39589 spin_unlock_irqrestore(&h->lock, flags);
39590 free_irq(h->intr[h->intr_mode], h);
39591 rc = cciss_request_irq(h, cciss_msix_discard_completions,
39592@@ -5123,9 +5123,9 @@ reinit_after_soft_reset:
39593 dev_info(&h->pdev->dev, "Board READY.\n");
39594 dev_info(&h->pdev->dev,
39595 "Waiting for stale completions to drain.\n");
39596- h->access.set_intr_mask(h, CCISS_INTR_ON);
39597+ h->access->set_intr_mask(h, CCISS_INTR_ON);
39598 msleep(10000);
39599- h->access.set_intr_mask(h, CCISS_INTR_OFF);
39600+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
39601
39602 rc = controller_reset_failed(h->cfgtable);
39603 if (rc)
39604@@ -5148,7 +5148,7 @@ reinit_after_soft_reset:
39605 cciss_scsi_setup(h);
39606
39607 /* Turn the interrupts on so we can service requests */
39608- h->access.set_intr_mask(h, CCISS_INTR_ON);
39609+ h->access->set_intr_mask(h, CCISS_INTR_ON);
39610
39611 /* Get the firmware version */
39612 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
39613@@ -5220,7 +5220,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
39614 kfree(flush_buf);
39615 if (return_code != IO_OK)
39616 dev_warn(&h->pdev->dev, "Error flushing cache\n");
39617- h->access.set_intr_mask(h, CCISS_INTR_OFF);
39618+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
39619 free_irq(h->intr[h->intr_mode], h);
39620 }
39621
39622diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
39623index 7fda30e..2f27946 100644
39624--- a/drivers/block/cciss.h
39625+++ b/drivers/block/cciss.h
39626@@ -101,7 +101,7 @@ struct ctlr_info
39627 /* information about each logical volume */
39628 drive_info_struct *drv[CISS_MAX_LUN];
39629
39630- struct access_method access;
39631+ struct access_method *access;
39632
39633 /* queue and queue Info */
39634 struct list_head reqQ;
39635@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
39636 }
39637
39638 static struct access_method SA5_access = {
39639- SA5_submit_command,
39640- SA5_intr_mask,
39641- SA5_fifo_full,
39642- SA5_intr_pending,
39643- SA5_completed,
39644+ .submit_command = SA5_submit_command,
39645+ .set_intr_mask = SA5_intr_mask,
39646+ .fifo_full = SA5_fifo_full,
39647+ .intr_pending = SA5_intr_pending,
39648+ .command_completed = SA5_completed,
39649 };
39650
39651 static struct access_method SA5B_access = {
39652- SA5_submit_command,
39653- SA5B_intr_mask,
39654- SA5_fifo_full,
39655- SA5B_intr_pending,
39656- SA5_completed,
39657+ .submit_command = SA5_submit_command,
39658+ .set_intr_mask = SA5B_intr_mask,
39659+ .fifo_full = SA5_fifo_full,
39660+ .intr_pending = SA5B_intr_pending,
39661+ .command_completed = SA5_completed,
39662 };
39663
39664 static struct access_method SA5_performant_access = {
39665- SA5_submit_command,
39666- SA5_performant_intr_mask,
39667- SA5_fifo_full,
39668- SA5_performant_intr_pending,
39669- SA5_performant_completed,
39670+ .submit_command = SA5_submit_command,
39671+ .set_intr_mask = SA5_performant_intr_mask,
39672+ .fifo_full = SA5_fifo_full,
39673+ .intr_pending = SA5_performant_intr_pending,
39674+ .command_completed = SA5_performant_completed,
39675 };
39676
39677 struct board_type {
39678diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
39679index 2b94403..fd6ad1f 100644
39680--- a/drivers/block/cpqarray.c
39681+++ b/drivers/block/cpqarray.c
39682@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
39683 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
39684 goto Enomem4;
39685 }
39686- hba[i]->access.set_intr_mask(hba[i], 0);
39687+ hba[i]->access->set_intr_mask(hba[i], 0);
39688 if (request_irq(hba[i]->intr, do_ida_intr,
39689 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
39690 {
39691@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
39692 add_timer(&hba[i]->timer);
39693
39694 /* Enable IRQ now that spinlock and rate limit timer are set up */
39695- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
39696+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
39697
39698 for(j=0; j<NWD; j++) {
39699 struct gendisk *disk = ida_gendisk[i][j];
39700@@ -694,7 +694,7 @@ DBGINFO(
39701 for(i=0; i<NR_PRODUCTS; i++) {
39702 if (board_id == products[i].board_id) {
39703 c->product_name = products[i].product_name;
39704- c->access = *(products[i].access);
39705+ c->access = products[i].access;
39706 break;
39707 }
39708 }
39709@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
39710 hba[ctlr]->intr = intr;
39711 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
39712 hba[ctlr]->product_name = products[j].product_name;
39713- hba[ctlr]->access = *(products[j].access);
39714+ hba[ctlr]->access = products[j].access;
39715 hba[ctlr]->ctlr = ctlr;
39716 hba[ctlr]->board_id = board_id;
39717 hba[ctlr]->pci_dev = NULL; /* not PCI */
39718@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
39719
39720 while((c = h->reqQ) != NULL) {
39721 /* Can't do anything if we're busy */
39722- if (h->access.fifo_full(h) == 0)
39723+ if (h->access->fifo_full(h) == 0)
39724 return;
39725
39726 /* Get the first entry from the request Q */
39727@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
39728 h->Qdepth--;
39729
39730 /* Tell the controller to do our bidding */
39731- h->access.submit_command(h, c);
39732+ h->access->submit_command(h, c);
39733
39734 /* Get onto the completion Q */
39735 addQ(&h->cmpQ, c);
39736@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
39737 unsigned long flags;
39738 __u32 a,a1;
39739
39740- istat = h->access.intr_pending(h);
39741+ istat = h->access->intr_pending(h);
39742 /* Is this interrupt for us? */
39743 if (istat == 0)
39744 return IRQ_NONE;
39745@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
39746 */
39747 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
39748 if (istat & FIFO_NOT_EMPTY) {
39749- while((a = h->access.command_completed(h))) {
39750+ while((a = h->access->command_completed(h))) {
39751 a1 = a; a &= ~3;
39752 if ((c = h->cmpQ) == NULL)
39753 {
39754@@ -1448,11 +1448,11 @@ static int sendcmd(
39755 /*
39756 * Disable interrupt
39757 */
39758- info_p->access.set_intr_mask(info_p, 0);
39759+ info_p->access->set_intr_mask(info_p, 0);
39760 /* Make sure there is room in the command FIFO */
39761 /* Actually it should be completely empty at this time. */
39762 for (i = 200000; i > 0; i--) {
39763- temp = info_p->access.fifo_full(info_p);
39764+ temp = info_p->access->fifo_full(info_p);
39765 if (temp != 0) {
39766 break;
39767 }
39768@@ -1465,7 +1465,7 @@ DBG(
39769 /*
39770 * Send the cmd
39771 */
39772- info_p->access.submit_command(info_p, c);
39773+ info_p->access->submit_command(info_p, c);
39774 complete = pollcomplete(ctlr);
39775
39776 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
39777@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
39778 * we check the new geometry. Then turn interrupts back on when
39779 * we're done.
39780 */
39781- host->access.set_intr_mask(host, 0);
39782+ host->access->set_intr_mask(host, 0);
39783 getgeometry(ctlr);
39784- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
39785+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
39786
39787 for(i=0; i<NWD; i++) {
39788 struct gendisk *disk = ida_gendisk[ctlr][i];
39789@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
39790 /* Wait (up to 2 seconds) for a command to complete */
39791
39792 for (i = 200000; i > 0; i--) {
39793- done = hba[ctlr]->access.command_completed(hba[ctlr]);
39794+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
39795 if (done == 0) {
39796 udelay(10); /* a short fixed delay */
39797 } else
39798diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
39799index be73e9d..7fbf140 100644
39800--- a/drivers/block/cpqarray.h
39801+++ b/drivers/block/cpqarray.h
39802@@ -99,7 +99,7 @@ struct ctlr_info {
39803 drv_info_t drv[NWD];
39804 struct proc_dir_entry *proc;
39805
39806- struct access_method access;
39807+ struct access_method *access;
39808
39809 cmdlist_t *reqQ;
39810 cmdlist_t *cmpQ;
39811diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
39812index a76ceb3..3c1a9fd 100644
39813--- a/drivers/block/drbd/drbd_int.h
39814+++ b/drivers/block/drbd/drbd_int.h
39815@@ -331,7 +331,7 @@ struct drbd_epoch {
39816 struct drbd_connection *connection;
39817 struct list_head list;
39818 unsigned int barrier_nr;
39819- atomic_t epoch_size; /* increased on every request added. */
39820+ atomic_unchecked_t epoch_size; /* increased on every request added. */
39821 atomic_t active; /* increased on every req. added, and dec on every finished. */
39822 unsigned long flags;
39823 };
39824@@ -797,7 +797,7 @@ struct drbd_device {
39825 unsigned int al_tr_number;
39826 int al_tr_cycle;
39827 wait_queue_head_t seq_wait;
39828- atomic_t packet_seq;
39829+ atomic_unchecked_t packet_seq;
39830 unsigned int peer_seq;
39831 spinlock_t peer_seq_lock;
39832 unsigned int minor;
39833@@ -1407,7 +1407,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
39834 char __user *uoptval;
39835 int err;
39836
39837- uoptval = (char __user __force *)optval;
39838+ uoptval = (char __force_user *)optval;
39839
39840 set_fs(KERNEL_DS);
39841 if (level == SOL_SOCKET)
39842diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
39843index 89c497c..9c736ae 100644
39844--- a/drivers/block/drbd/drbd_interval.c
39845+++ b/drivers/block/drbd/drbd_interval.c
39846@@ -67,9 +67,9 @@ static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new)
39847 }
39848
39849 static const struct rb_augment_callbacks augment_callbacks = {
39850- augment_propagate,
39851- augment_copy,
39852- augment_rotate,
39853+ .propagate = augment_propagate,
39854+ .copy = augment_copy,
39855+ .rotate = augment_rotate,
39856 };
39857
39858 /**
39859diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
39860index 960645c..6c2724a 100644
39861--- a/drivers/block/drbd/drbd_main.c
39862+++ b/drivers/block/drbd/drbd_main.c
39863@@ -1322,7 +1322,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
39864 p->sector = sector;
39865 p->block_id = block_id;
39866 p->blksize = blksize;
39867- p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
39868+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
39869 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
39870 }
39871
39872@@ -1628,7 +1628,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
39873 return -EIO;
39874 p->sector = cpu_to_be64(req->i.sector);
39875 p->block_id = (unsigned long)req;
39876- p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
39877+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
39878 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
39879 if (device->state.conn >= C_SYNC_SOURCE &&
39880 device->state.conn <= C_PAUSED_SYNC_T)
39881@@ -2670,8 +2670,8 @@ void drbd_destroy_connection(struct kref *kref)
39882 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
39883 struct drbd_resource *resource = connection->resource;
39884
39885- if (atomic_read(&connection->current_epoch->epoch_size) != 0)
39886- drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
39887+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size) != 0)
39888+ drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
39889 kfree(connection->current_epoch);
39890
39891 idr_destroy(&connection->peer_devices);
39892diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
39893index 3f2e167..d3170e4 100644
39894--- a/drivers/block/drbd/drbd_nl.c
39895+++ b/drivers/block/drbd/drbd_nl.c
39896@@ -3616,7 +3616,7 @@ finish:
39897
39898 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
39899 {
39900- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
39901+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
39902 struct sk_buff *msg;
39903 struct drbd_genlmsghdr *d_out;
39904 unsigned seq;
39905@@ -3629,7 +3629,7 @@ void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
39906 return;
39907 }
39908
39909- seq = atomic_inc_return(&drbd_genl_seq);
39910+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
39911 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
39912 if (!msg)
39913 goto failed;
39914diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
39915index 5b17ec8..6c21e6b 100644
39916--- a/drivers/block/drbd/drbd_receiver.c
39917+++ b/drivers/block/drbd/drbd_receiver.c
39918@@ -834,7 +834,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
39919 struct drbd_device *device = peer_device->device;
39920 int err;
39921
39922- atomic_set(&device->packet_seq, 0);
39923+ atomic_set_unchecked(&device->packet_seq, 0);
39924 device->peer_seq = 0;
39925
39926 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
39927@@ -1199,7 +1199,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
39928 do {
39929 next_epoch = NULL;
39930
39931- epoch_size = atomic_read(&epoch->epoch_size);
39932+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
39933
39934 switch (ev & ~EV_CLEANUP) {
39935 case EV_PUT:
39936@@ -1239,7 +1239,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
39937 rv = FE_DESTROYED;
39938 } else {
39939 epoch->flags = 0;
39940- atomic_set(&epoch->epoch_size, 0);
39941+ atomic_set_unchecked(&epoch->epoch_size, 0);
39942 /* atomic_set(&epoch->active, 0); is already zero */
39943 if (rv == FE_STILL_LIVE)
39944 rv = FE_RECYCLED;
39945@@ -1490,7 +1490,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
39946 conn_wait_active_ee_empty(connection);
39947 drbd_flush(connection);
39948
39949- if (atomic_read(&connection->current_epoch->epoch_size)) {
39950+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
39951 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
39952 if (epoch)
39953 break;
39954@@ -1503,11 +1503,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
39955 }
39956
39957 epoch->flags = 0;
39958- atomic_set(&epoch->epoch_size, 0);
39959+ atomic_set_unchecked(&epoch->epoch_size, 0);
39960 atomic_set(&epoch->active, 0);
39961
39962 spin_lock(&connection->epoch_lock);
39963- if (atomic_read(&connection->current_epoch->epoch_size)) {
39964+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
39965 list_add(&epoch->list, &connection->current_epoch->list);
39966 connection->current_epoch = epoch;
39967 connection->epochs++;
39968@@ -2224,7 +2224,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
39969
39970 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
39971 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
39972- atomic_inc(&connection->current_epoch->epoch_size);
39973+ atomic_inc_unchecked(&connection->current_epoch->epoch_size);
39974 err2 = drbd_drain_block(peer_device, pi->size);
39975 if (!err)
39976 err = err2;
39977@@ -2266,7 +2266,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
39978
39979 spin_lock(&connection->epoch_lock);
39980 peer_req->epoch = connection->current_epoch;
39981- atomic_inc(&peer_req->epoch->epoch_size);
39982+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
39983 atomic_inc(&peer_req->epoch->active);
39984 spin_unlock(&connection->epoch_lock);
39985
39986@@ -4461,7 +4461,7 @@ struct data_cmd {
39987 int expect_payload;
39988 size_t pkt_size;
39989 int (*fn)(struct drbd_connection *, struct packet_info *);
39990-};
39991+} __do_const;
39992
39993 static struct data_cmd drbd_cmd_handler[] = {
39994 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
39995@@ -4572,7 +4572,7 @@ static void conn_disconnect(struct drbd_connection *connection)
39996 if (!list_empty(&connection->current_epoch->list))
39997 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
39998 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
39999- atomic_set(&connection->current_epoch->epoch_size, 0);
40000+ atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
40001 connection->send.seen_any_write_yet = false;
40002
40003 drbd_info(connection, "Connection closed\n");
40004@@ -5364,7 +5364,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
40005 struct asender_cmd {
40006 size_t pkt_size;
40007 int (*fn)(struct drbd_connection *connection, struct packet_info *);
40008-};
40009+} __do_const;
40010
40011 static struct asender_cmd asender_tbl[] = {
40012 [P_PING] = { 0, got_Ping },
40013diff --git a/drivers/block/loop.c b/drivers/block/loop.c
40014index 6cb1beb..bf490f7 100644
40015--- a/drivers/block/loop.c
40016+++ b/drivers/block/loop.c
40017@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
40018
40019 file_start_write(file);
40020 set_fs(get_ds());
40021- bw = file->f_op->write(file, buf, len, &pos);
40022+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
40023 set_fs(old_fs);
40024 file_end_write(file);
40025 if (likely(bw == len))
40026diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
40027index 02351e2..a9ea617 100644
40028--- a/drivers/block/nvme-core.c
40029+++ b/drivers/block/nvme-core.c
40030@@ -73,7 +73,6 @@ static LIST_HEAD(dev_list);
40031 static struct task_struct *nvme_thread;
40032 static struct workqueue_struct *nvme_workq;
40033 static wait_queue_head_t nvme_kthread_wait;
40034-static struct notifier_block nvme_nb;
40035
40036 static void nvme_reset_failed_dev(struct work_struct *ws);
40037
40038@@ -2925,6 +2924,10 @@ static struct pci_driver nvme_driver = {
40039 .err_handler = &nvme_err_handler,
40040 };
40041
40042+static struct notifier_block nvme_nb = {
40043+ .notifier_call = &nvme_cpu_notify,
40044+};
40045+
40046 static int __init nvme_init(void)
40047 {
40048 int result;
40049@@ -2941,7 +2944,6 @@ static int __init nvme_init(void)
40050 else if (result > 0)
40051 nvme_major = result;
40052
40053- nvme_nb.notifier_call = &nvme_cpu_notify;
40054 result = register_hotcpu_notifier(&nvme_nb);
40055 if (result)
40056 goto unregister_blkdev;
40057diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
40058index 758ac44..58087fd 100644
40059--- a/drivers/block/pktcdvd.c
40060+++ b/drivers/block/pktcdvd.c
40061@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
40062
40063 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
40064 {
40065- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
40066+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
40067 }
40068
40069 /*
40070@@ -1888,7 +1888,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
40071 return -EROFS;
40072 }
40073 pd->settings.fp = ti.fp;
40074- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
40075+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
40076
40077 if (ti.nwa_v) {
40078 pd->nwa = be32_to_cpu(ti.next_writable);
40079diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
40080index e5565fb..71be10b4 100644
40081--- a/drivers/block/smart1,2.h
40082+++ b/drivers/block/smart1,2.h
40083@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
40084 }
40085
40086 static struct access_method smart4_access = {
40087- smart4_submit_command,
40088- smart4_intr_mask,
40089- smart4_fifo_full,
40090- smart4_intr_pending,
40091- smart4_completed,
40092+ .submit_command = smart4_submit_command,
40093+ .set_intr_mask = smart4_intr_mask,
40094+ .fifo_full = smart4_fifo_full,
40095+ .intr_pending = smart4_intr_pending,
40096+ .command_completed = smart4_completed,
40097 };
40098
40099 /*
40100@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
40101 }
40102
40103 static struct access_method smart2_access = {
40104- smart2_submit_command,
40105- smart2_intr_mask,
40106- smart2_fifo_full,
40107- smart2_intr_pending,
40108- smart2_completed,
40109+ .submit_command = smart2_submit_command,
40110+ .set_intr_mask = smart2_intr_mask,
40111+ .fifo_full = smart2_fifo_full,
40112+ .intr_pending = smart2_intr_pending,
40113+ .command_completed = smart2_completed,
40114 };
40115
40116 /*
40117@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
40118 }
40119
40120 static struct access_method smart2e_access = {
40121- smart2e_submit_command,
40122- smart2e_intr_mask,
40123- smart2e_fifo_full,
40124- smart2e_intr_pending,
40125- smart2e_completed,
40126+ .submit_command = smart2e_submit_command,
40127+ .set_intr_mask = smart2e_intr_mask,
40128+ .fifo_full = smart2e_fifo_full,
40129+ .intr_pending = smart2e_intr_pending,
40130+ .command_completed = smart2e_completed,
40131 };
40132
40133 /*
40134@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
40135 }
40136
40137 static struct access_method smart1_access = {
40138- smart1_submit_command,
40139- smart1_intr_mask,
40140- smart1_fifo_full,
40141- smart1_intr_pending,
40142- smart1_completed,
40143+ .submit_command = smart1_submit_command,
40144+ .set_intr_mask = smart1_intr_mask,
40145+ .fifo_full = smart1_fifo_full,
40146+ .intr_pending = smart1_intr_pending,
40147+ .command_completed = smart1_completed,
40148 };
40149diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
40150index f038dba..bb74c08 100644
40151--- a/drivers/bluetooth/btwilink.c
40152+++ b/drivers/bluetooth/btwilink.c
40153@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
40154
40155 static int bt_ti_probe(struct platform_device *pdev)
40156 {
40157- static struct ti_st *hst;
40158+ struct ti_st *hst;
40159 struct hci_dev *hdev;
40160 int err;
40161
40162diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
40163index 898b84b..86f74b9 100644
40164--- a/drivers/cdrom/cdrom.c
40165+++ b/drivers/cdrom/cdrom.c
40166@@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
40167 ENSURE(reset, CDC_RESET);
40168 ENSURE(generic_packet, CDC_GENERIC_PACKET);
40169 cdi->mc_flags = 0;
40170- cdo->n_minors = 0;
40171 cdi->options = CDO_USE_FFLAGS;
40172
40173 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
40174@@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
40175 else
40176 cdi->cdda_method = CDDA_OLD;
40177
40178- if (!cdo->generic_packet)
40179- cdo->generic_packet = cdrom_dummy_generic_packet;
40180+ if (!cdo->generic_packet) {
40181+ pax_open_kernel();
40182+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
40183+ pax_close_kernel();
40184+ }
40185
40186 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
40187 mutex_lock(&cdrom_mutex);
40188@@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
40189 if (cdi->exit)
40190 cdi->exit(cdi);
40191
40192- cdi->ops->n_minors--;
40193 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
40194 }
40195
40196@@ -2126,7 +2127,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
40197 */
40198 nr = nframes;
40199 do {
40200- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
40201+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
40202 if (cgc.buffer)
40203 break;
40204
40205@@ -3434,7 +3435,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
40206 struct cdrom_device_info *cdi;
40207 int ret;
40208
40209- ret = scnprintf(info + *pos, max_size - *pos, header);
40210+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
40211 if (!ret)
40212 return 1;
40213
40214diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
40215index 584bc31..e64a12c 100644
40216--- a/drivers/cdrom/gdrom.c
40217+++ b/drivers/cdrom/gdrom.c
40218@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
40219 .audio_ioctl = gdrom_audio_ioctl,
40220 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
40221 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
40222- .n_minors = 1,
40223 };
40224
40225 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
40226diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
40227index 6e9f74a..50c7cea 100644
40228--- a/drivers/char/Kconfig
40229+++ b/drivers/char/Kconfig
40230@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
40231
40232 config DEVKMEM
40233 bool "/dev/kmem virtual device support"
40234- default y
40235+ default n
40236+ depends on !GRKERNSEC_KMEM
40237 help
40238 Say Y here if you want to support the /dev/kmem device. The
40239 /dev/kmem device is rarely used, but can be used for certain
40240@@ -577,6 +578,7 @@ config DEVPORT
40241 bool
40242 depends on !M68K
40243 depends on ISA || PCI
40244+ depends on !GRKERNSEC_KMEM
40245 default y
40246
40247 source "drivers/s390/char/Kconfig"
40248diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
40249index a48e05b..6bac831 100644
40250--- a/drivers/char/agp/compat_ioctl.c
40251+++ b/drivers/char/agp/compat_ioctl.c
40252@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
40253 return -ENOMEM;
40254 }
40255
40256- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
40257+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
40258 sizeof(*usegment) * ureserve.seg_count)) {
40259 kfree(usegment);
40260 kfree(ksegment);
40261diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
40262index b297033..fa217ca 100644
40263--- a/drivers/char/agp/frontend.c
40264+++ b/drivers/char/agp/frontend.c
40265@@ -819,7 +819,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
40266 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
40267 return -EFAULT;
40268
40269- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
40270+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
40271 return -EFAULT;
40272
40273 client = agp_find_client_by_pid(reserve.pid);
40274@@ -849,7 +849,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
40275 if (segment == NULL)
40276 return -ENOMEM;
40277
40278- if (copy_from_user(segment, (void __user *) reserve.seg_list,
40279+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
40280 sizeof(struct agp_segment) * reserve.seg_count)) {
40281 kfree(segment);
40282 return -EFAULT;
40283diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
40284index 4f94375..413694e 100644
40285--- a/drivers/char/genrtc.c
40286+++ b/drivers/char/genrtc.c
40287@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
40288 switch (cmd) {
40289
40290 case RTC_PLL_GET:
40291+ memset(&pll, 0, sizeof(pll));
40292 if (get_rtc_pll(&pll))
40293 return -EINVAL;
40294 else
40295diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
40296index d5d4cd8..22d561d 100644
40297--- a/drivers/char/hpet.c
40298+++ b/drivers/char/hpet.c
40299@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
40300 }
40301
40302 static int
40303-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
40304+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
40305 struct hpet_info *info)
40306 {
40307 struct hpet_timer __iomem *timer;
40308diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
40309index 86fe45c..c0ea948 100644
40310--- a/drivers/char/hw_random/intel-rng.c
40311+++ b/drivers/char/hw_random/intel-rng.c
40312@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
40313
40314 if (no_fwh_detect)
40315 return -ENODEV;
40316- printk(warning);
40317+ printk("%s", warning);
40318 return -EBUSY;
40319 }
40320
40321diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
40322index e6db938..835e3a2 100644
40323--- a/drivers/char/ipmi/ipmi_msghandler.c
40324+++ b/drivers/char/ipmi/ipmi_msghandler.c
40325@@ -438,7 +438,7 @@ struct ipmi_smi {
40326 struct proc_dir_entry *proc_dir;
40327 char proc_dir_name[10];
40328
40329- atomic_t stats[IPMI_NUM_STATS];
40330+ atomic_unchecked_t stats[IPMI_NUM_STATS];
40331
40332 /*
40333 * run_to_completion duplicate of smb_info, smi_info
40334@@ -470,9 +470,9 @@ static LIST_HEAD(smi_watchers);
40335 static DEFINE_MUTEX(smi_watchers_mutex);
40336
40337 #define ipmi_inc_stat(intf, stat) \
40338- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
40339+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
40340 #define ipmi_get_stat(intf, stat) \
40341- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
40342+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
40343
40344 static int is_lan_addr(struct ipmi_addr *addr)
40345 {
40346@@ -2926,7 +2926,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
40347 INIT_LIST_HEAD(&intf->cmd_rcvrs);
40348 init_waitqueue_head(&intf->waitq);
40349 for (i = 0; i < IPMI_NUM_STATS; i++)
40350- atomic_set(&intf->stats[i], 0);
40351+ atomic_set_unchecked(&intf->stats[i], 0);
40352
40353 intf->proc_dir = NULL;
40354
40355diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
40356index 5d66568..c9d93c3 100644
40357--- a/drivers/char/ipmi/ipmi_si_intf.c
40358+++ b/drivers/char/ipmi/ipmi_si_intf.c
40359@@ -285,7 +285,7 @@ struct smi_info {
40360 unsigned char slave_addr;
40361
40362 /* Counters and things for the proc filesystem. */
40363- atomic_t stats[SI_NUM_STATS];
40364+ atomic_unchecked_t stats[SI_NUM_STATS];
40365
40366 struct task_struct *thread;
40367
40368@@ -294,9 +294,9 @@ struct smi_info {
40369 };
40370
40371 #define smi_inc_stat(smi, stat) \
40372- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
40373+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
40374 #define smi_get_stat(smi, stat) \
40375- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
40376+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
40377
40378 #define SI_MAX_PARMS 4
40379
40380@@ -3374,7 +3374,7 @@ static int try_smi_init(struct smi_info *new_smi)
40381 atomic_set(&new_smi->req_events, 0);
40382 new_smi->run_to_completion = false;
40383 for (i = 0; i < SI_NUM_STATS; i++)
40384- atomic_set(&new_smi->stats[i], 0);
40385+ atomic_set_unchecked(&new_smi->stats[i], 0);
40386
40387 new_smi->interrupt_disabled = true;
40388 atomic_set(&new_smi->stop_operation, 0);
40389diff --git a/drivers/char/mem.c b/drivers/char/mem.c
40390index 917403f..dddd899 100644
40391--- a/drivers/char/mem.c
40392+++ b/drivers/char/mem.c
40393@@ -18,6 +18,7 @@
40394 #include <linux/raw.h>
40395 #include <linux/tty.h>
40396 #include <linux/capability.h>
40397+#include <linux/security.h>
40398 #include <linux/ptrace.h>
40399 #include <linux/device.h>
40400 #include <linux/highmem.h>
40401@@ -36,6 +37,10 @@
40402
40403 #define DEVPORT_MINOR 4
40404
40405+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
40406+extern const struct file_operations grsec_fops;
40407+#endif
40408+
40409 static inline unsigned long size_inside_page(unsigned long start,
40410 unsigned long size)
40411 {
40412@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
40413
40414 while (cursor < to) {
40415 if (!devmem_is_allowed(pfn)) {
40416+#ifdef CONFIG_GRKERNSEC_KMEM
40417+ gr_handle_mem_readwrite(from, to);
40418+#else
40419 printk(KERN_INFO
40420 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
40421 current->comm, from, to);
40422+#endif
40423 return 0;
40424 }
40425 cursor += PAGE_SIZE;
40426@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
40427 }
40428 return 1;
40429 }
40430+#elif defined(CONFIG_GRKERNSEC_KMEM)
40431+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
40432+{
40433+ return 0;
40434+}
40435 #else
40436 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
40437 {
40438@@ -122,6 +136,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
40439
40440 while (count > 0) {
40441 unsigned long remaining;
40442+ char *temp;
40443
40444 sz = size_inside_page(p, count);
40445
40446@@ -137,7 +152,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
40447 if (!ptr)
40448 return -EFAULT;
40449
40450- remaining = copy_to_user(buf, ptr, sz);
40451+#ifdef CONFIG_PAX_USERCOPY
40452+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
40453+ if (!temp) {
40454+ unxlate_dev_mem_ptr(p, ptr);
40455+ return -ENOMEM;
40456+ }
40457+ memcpy(temp, ptr, sz);
40458+#else
40459+ temp = ptr;
40460+#endif
40461+
40462+ remaining = copy_to_user(buf, temp, sz);
40463+
40464+#ifdef CONFIG_PAX_USERCOPY
40465+ kfree(temp);
40466+#endif
40467+
40468 unxlate_dev_mem_ptr(p, ptr);
40469 if (remaining)
40470 return -EFAULT;
40471@@ -369,9 +400,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
40472 size_t count, loff_t *ppos)
40473 {
40474 unsigned long p = *ppos;
40475- ssize_t low_count, read, sz;
40476+ ssize_t low_count, read, sz, err = 0;
40477 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
40478- int err = 0;
40479
40480 read = 0;
40481 if (p < (unsigned long) high_memory) {
40482@@ -393,6 +423,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
40483 }
40484 #endif
40485 while (low_count > 0) {
40486+ char *temp;
40487+
40488 sz = size_inside_page(p, low_count);
40489
40490 /*
40491@@ -402,7 +434,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
40492 */
40493 kbuf = xlate_dev_kmem_ptr((char *)p);
40494
40495- if (copy_to_user(buf, kbuf, sz))
40496+#ifdef CONFIG_PAX_USERCOPY
40497+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
40498+ if (!temp)
40499+ return -ENOMEM;
40500+ memcpy(temp, kbuf, sz);
40501+#else
40502+ temp = kbuf;
40503+#endif
40504+
40505+ err = copy_to_user(buf, temp, sz);
40506+
40507+#ifdef CONFIG_PAX_USERCOPY
40508+ kfree(temp);
40509+#endif
40510+
40511+ if (err)
40512 return -EFAULT;
40513 buf += sz;
40514 p += sz;
40515@@ -827,6 +874,9 @@ static const struct memdev {
40516 #ifdef CONFIG_PRINTK
40517 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
40518 #endif
40519+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
40520+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
40521+#endif
40522 };
40523
40524 static int memory_open(struct inode *inode, struct file *filp)
40525@@ -898,7 +948,7 @@ static int __init chr_dev_init(void)
40526 continue;
40527
40528 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
40529- NULL, devlist[minor].name);
40530+ NULL, "%s", devlist[minor].name);
40531 }
40532
40533 return tty_init();
40534diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
40535index 9df78e2..01ba9ae 100644
40536--- a/drivers/char/nvram.c
40537+++ b/drivers/char/nvram.c
40538@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
40539
40540 spin_unlock_irq(&rtc_lock);
40541
40542- if (copy_to_user(buf, contents, tmp - contents))
40543+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
40544 return -EFAULT;
40545
40546 *ppos = i;
40547diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
40548index 8320abd..ec48108 100644
40549--- a/drivers/char/pcmcia/synclink_cs.c
40550+++ b/drivers/char/pcmcia/synclink_cs.c
40551@@ -2345,9 +2345,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
40552
40553 if (debug_level >= DEBUG_LEVEL_INFO)
40554 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
40555- __FILE__, __LINE__, info->device_name, port->count);
40556+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
40557
40558- WARN_ON(!port->count);
40559+ WARN_ON(!atomic_read(&port->count));
40560
40561 if (tty_port_close_start(port, tty, filp) == 0)
40562 goto cleanup;
40563@@ -2365,7 +2365,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
40564 cleanup:
40565 if (debug_level >= DEBUG_LEVEL_INFO)
40566 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
40567- tty->driver->name, port->count);
40568+ tty->driver->name, atomic_read(&port->count));
40569 }
40570
40571 /* Wait until the transmitter is empty.
40572@@ -2507,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
40573
40574 if (debug_level >= DEBUG_LEVEL_INFO)
40575 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
40576- __FILE__, __LINE__, tty->driver->name, port->count);
40577+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
40578
40579 /* If port is closing, signal caller to try again */
40580 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
40581@@ -2527,11 +2527,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
40582 goto cleanup;
40583 }
40584 spin_lock(&port->lock);
40585- port->count++;
40586+ atomic_inc(&port->count);
40587 spin_unlock(&port->lock);
40588 spin_unlock_irqrestore(&info->netlock, flags);
40589
40590- if (port->count == 1) {
40591+ if (atomic_read(&port->count) == 1) {
40592 /* 1st open on this device, init hardware */
40593 retval = startup(info, tty);
40594 if (retval < 0)
40595@@ -3920,7 +3920,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
40596 unsigned short new_crctype;
40597
40598 /* return error if TTY interface open */
40599- if (info->port.count)
40600+ if (atomic_read(&info->port.count))
40601 return -EBUSY;
40602
40603 switch (encoding)
40604@@ -4024,7 +4024,7 @@ static int hdlcdev_open(struct net_device *dev)
40605
40606 /* arbitrate between network and tty opens */
40607 spin_lock_irqsave(&info->netlock, flags);
40608- if (info->port.count != 0 || info->netcount != 0) {
40609+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
40610 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
40611 spin_unlock_irqrestore(&info->netlock, flags);
40612 return -EBUSY;
40613@@ -4114,7 +4114,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40614 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
40615
40616 /* return error if TTY interface open */
40617- if (info->port.count)
40618+ if (atomic_read(&info->port.count))
40619 return -EBUSY;
40620
40621 if (cmd != SIOCWANDEV)
40622diff --git a/drivers/char/random.c b/drivers/char/random.c
40623index 71529e1..822b036 100644
40624--- a/drivers/char/random.c
40625+++ b/drivers/char/random.c
40626@@ -284,9 +284,6 @@
40627 /*
40628 * To allow fractional bits to be tracked, the entropy_count field is
40629 * denominated in units of 1/8th bits.
40630- *
40631- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
40632- * credit_entropy_bits() needs to be 64 bits wide.
40633 */
40634 #define ENTROPY_SHIFT 3
40635 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
40636@@ -433,9 +430,9 @@ struct entropy_store {
40637 };
40638
40639 static void push_to_pool(struct work_struct *work);
40640-static __u32 input_pool_data[INPUT_POOL_WORDS];
40641-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
40642-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
40643+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
40644+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
40645+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
40646
40647 static struct entropy_store input_pool = {
40648 .poolinfo = &poolinfo_table[0],
40649@@ -524,8 +521,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
40650 input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
40651 }
40652
40653- ACCESS_ONCE(r->input_rotate) = input_rotate;
40654- ACCESS_ONCE(r->add_ptr) = i;
40655+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
40656+ ACCESS_ONCE_RW(r->add_ptr) = i;
40657 smp_wmb();
40658
40659 if (out)
40660@@ -632,7 +629,7 @@ retry:
40661 /* The +2 corresponds to the /4 in the denominator */
40662
40663 do {
40664- unsigned int anfrac = min(pnfrac, pool_size/2);
40665+ u64 anfrac = min(pnfrac, pool_size/2);
40666 unsigned int add =
40667 ((pool_size - entropy_count)*anfrac*3) >> s;
40668
40669@@ -1177,7 +1174,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
40670
40671 extract_buf(r, tmp);
40672 i = min_t(int, nbytes, EXTRACT_SIZE);
40673- if (copy_to_user(buf, tmp, i)) {
40674+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
40675 ret = -EFAULT;
40676 break;
40677 }
40678@@ -1567,7 +1564,7 @@ static char sysctl_bootid[16];
40679 static int proc_do_uuid(struct ctl_table *table, int write,
40680 void __user *buffer, size_t *lenp, loff_t *ppos)
40681 {
40682- struct ctl_table fake_table;
40683+ ctl_table_no_const fake_table;
40684 unsigned char buf[64], tmp_uuid[16], *uuid;
40685
40686 uuid = table->data;
40687@@ -1597,7 +1594,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
40688 static int proc_do_entropy(struct ctl_table *table, int write,
40689 void __user *buffer, size_t *lenp, loff_t *ppos)
40690 {
40691- struct ctl_table fake_table;
40692+ ctl_table_no_const fake_table;
40693 int entropy_count;
40694
40695 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
40696diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
40697index 7cc1fe22..b602d6b 100644
40698--- a/drivers/char/sonypi.c
40699+++ b/drivers/char/sonypi.c
40700@@ -54,6 +54,7 @@
40701
40702 #include <asm/uaccess.h>
40703 #include <asm/io.h>
40704+#include <asm/local.h>
40705
40706 #include <linux/sonypi.h>
40707
40708@@ -490,7 +491,7 @@ static struct sonypi_device {
40709 spinlock_t fifo_lock;
40710 wait_queue_head_t fifo_proc_list;
40711 struct fasync_struct *fifo_async;
40712- int open_count;
40713+ local_t open_count;
40714 int model;
40715 struct input_dev *input_jog_dev;
40716 struct input_dev *input_key_dev;
40717@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
40718 static int sonypi_misc_release(struct inode *inode, struct file *file)
40719 {
40720 mutex_lock(&sonypi_device.lock);
40721- sonypi_device.open_count--;
40722+ local_dec(&sonypi_device.open_count);
40723 mutex_unlock(&sonypi_device.lock);
40724 return 0;
40725 }
40726@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
40727 {
40728 mutex_lock(&sonypi_device.lock);
40729 /* Flush input queue on first open */
40730- if (!sonypi_device.open_count)
40731+ if (!local_read(&sonypi_device.open_count))
40732 kfifo_reset(&sonypi_device.fifo);
40733- sonypi_device.open_count++;
40734+ local_inc(&sonypi_device.open_count);
40735 mutex_unlock(&sonypi_device.lock);
40736
40737 return 0;
40738diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
40739index 565a947..dcdc06e 100644
40740--- a/drivers/char/tpm/tpm_acpi.c
40741+++ b/drivers/char/tpm/tpm_acpi.c
40742@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
40743 virt = acpi_os_map_iomem(start, len);
40744 if (!virt) {
40745 kfree(log->bios_event_log);
40746+ log->bios_event_log = NULL;
40747 printk("%s: ERROR - Unable to map memory\n", __func__);
40748 return -EIO;
40749 }
40750
40751- memcpy_fromio(log->bios_event_log, virt, len);
40752+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
40753
40754 acpi_os_unmap_iomem(virt, len);
40755 return 0;
40756diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
40757index 59f7cb2..bac8b6d 100644
40758--- a/drivers/char/tpm/tpm_eventlog.c
40759+++ b/drivers/char/tpm/tpm_eventlog.c
40760@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
40761 event = addr;
40762
40763 if ((event->event_type == 0 && event->event_size == 0) ||
40764- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
40765+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
40766 return NULL;
40767
40768 return addr;
40769@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
40770 return NULL;
40771
40772 if ((event->event_type == 0 && event->event_size == 0) ||
40773- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
40774+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
40775 return NULL;
40776
40777 (*pos)++;
40778@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
40779 int i;
40780
40781 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
40782- seq_putc(m, data[i]);
40783+ if (!seq_putc(m, data[i]))
40784+ return -EFAULT;
40785
40786 return 0;
40787 }
40788diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
40789index 60aafb8..10c08e0 100644
40790--- a/drivers/char/virtio_console.c
40791+++ b/drivers/char/virtio_console.c
40792@@ -684,7 +684,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
40793 if (to_user) {
40794 ssize_t ret;
40795
40796- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
40797+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
40798 if (ret)
40799 return -EFAULT;
40800 } else {
40801@@ -787,7 +787,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
40802 if (!port_has_data(port) && !port->host_connected)
40803 return 0;
40804
40805- return fill_readbuf(port, ubuf, count, true);
40806+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
40807 }
40808
40809 static int wait_port_writable(struct port *port, bool nonblock)
40810diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
40811index 57a078e..c17cde8 100644
40812--- a/drivers/clk/clk-composite.c
40813+++ b/drivers/clk/clk-composite.c
40814@@ -146,7 +146,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
40815 struct clk *clk;
40816 struct clk_init_data init;
40817 struct clk_composite *composite;
40818- struct clk_ops *clk_composite_ops;
40819+ clk_ops_no_const *clk_composite_ops;
40820
40821 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
40822 if (!composite) {
40823diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
40824index dd3a78c..386d49c 100644
40825--- a/drivers/clk/socfpga/clk-gate.c
40826+++ b/drivers/clk/socfpga/clk-gate.c
40827@@ -22,6 +22,7 @@
40828 #include <linux/mfd/syscon.h>
40829 #include <linux/of.h>
40830 #include <linux/regmap.h>
40831+#include <asm/pgtable.h>
40832
40833 #include "clk.h"
40834
40835@@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
40836 return 0;
40837 }
40838
40839-static struct clk_ops gateclk_ops = {
40840+static clk_ops_no_const gateclk_ops __read_only = {
40841 .prepare = socfpga_clk_prepare,
40842 .recalc_rate = socfpga_clk_recalc_rate,
40843 .get_parent = socfpga_clk_get_parent,
40844@@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
40845 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
40846 socfpga_clk->hw.bit_idx = clk_gate[1];
40847
40848- gateclk_ops.enable = clk_gate_ops.enable;
40849- gateclk_ops.disable = clk_gate_ops.disable;
40850+ pax_open_kernel();
40851+ *(void **)&gateclk_ops.enable = clk_gate_ops.enable;
40852+ *(void **)&gateclk_ops.disable = clk_gate_ops.disable;
40853+ pax_close_kernel();
40854 }
40855
40856 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
40857diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
40858index de6da95..c98278b 100644
40859--- a/drivers/clk/socfpga/clk-pll.c
40860+++ b/drivers/clk/socfpga/clk-pll.c
40861@@ -21,6 +21,7 @@
40862 #include <linux/io.h>
40863 #include <linux/of.h>
40864 #include <linux/of_address.h>
40865+#include <asm/pgtable.h>
40866
40867 #include "clk.h"
40868
40869@@ -76,7 +77,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
40870 CLK_MGR_PLL_CLK_SRC_MASK;
40871 }
40872
40873-static struct clk_ops clk_pll_ops = {
40874+static clk_ops_no_const clk_pll_ops __read_only = {
40875 .recalc_rate = clk_pll_recalc_rate,
40876 .get_parent = clk_pll_get_parent,
40877 };
40878@@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
40879 pll_clk->hw.hw.init = &init;
40880
40881 pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
40882- clk_pll_ops.enable = clk_gate_ops.enable;
40883- clk_pll_ops.disable = clk_gate_ops.disable;
40884+ pax_open_kernel();
40885+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
40886+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
40887+ pax_close_kernel();
40888
40889 clk = clk_register(NULL, &pll_clk->hw.hw);
40890 if (WARN_ON(IS_ERR(clk))) {
40891diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
40892index b0c18ed..1713a80 100644
40893--- a/drivers/cpufreq/acpi-cpufreq.c
40894+++ b/drivers/cpufreq/acpi-cpufreq.c
40895@@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
40896 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
40897 per_cpu(acfreq_data, cpu) = data;
40898
40899- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
40900- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
40901+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
40902+ pax_open_kernel();
40903+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
40904+ pax_close_kernel();
40905+ }
40906
40907 result = acpi_processor_register_performance(data->acpi_data, cpu);
40908 if (result)
40909@@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
40910 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
40911 break;
40912 case ACPI_ADR_SPACE_FIXED_HARDWARE:
40913- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
40914+ pax_open_kernel();
40915+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
40916+ pax_close_kernel();
40917 break;
40918 default:
40919 break;
40920@@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
40921 if (!msrs)
40922 return;
40923
40924- acpi_cpufreq_driver.boost_supported = true;
40925- acpi_cpufreq_driver.boost_enabled = boost_state(0);
40926+ pax_open_kernel();
40927+ *(bool *)&acpi_cpufreq_driver.boost_supported = true;
40928+ *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
40929+ pax_close_kernel();
40930
40931 cpu_notifier_register_begin();
40932
40933diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
40934index 6f02485..13684ae 100644
40935--- a/drivers/cpufreq/cpufreq.c
40936+++ b/drivers/cpufreq/cpufreq.c
40937@@ -2100,7 +2100,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
40938 }
40939
40940 mutex_lock(&cpufreq_governor_mutex);
40941- list_del(&governor->governor_list);
40942+ pax_list_del(&governor->governor_list);
40943 mutex_unlock(&cpufreq_governor_mutex);
40944 return;
40945 }
40946@@ -2316,7 +2316,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
40947 return NOTIFY_OK;
40948 }
40949
40950-static struct notifier_block __refdata cpufreq_cpu_notifier = {
40951+static struct notifier_block cpufreq_cpu_notifier = {
40952 .notifier_call = cpufreq_cpu_callback,
40953 };
40954
40955@@ -2356,13 +2356,17 @@ int cpufreq_boost_trigger_state(int state)
40956 return 0;
40957
40958 write_lock_irqsave(&cpufreq_driver_lock, flags);
40959- cpufreq_driver->boost_enabled = state;
40960+ pax_open_kernel();
40961+ *(bool *)&cpufreq_driver->boost_enabled = state;
40962+ pax_close_kernel();
40963 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
40964
40965 ret = cpufreq_driver->set_boost(state);
40966 if (ret) {
40967 write_lock_irqsave(&cpufreq_driver_lock, flags);
40968- cpufreq_driver->boost_enabled = !state;
40969+ pax_open_kernel();
40970+ *(bool *)&cpufreq_driver->boost_enabled = !state;
40971+ pax_close_kernel();
40972 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
40973
40974 pr_err("%s: Cannot %s BOOST\n",
40975@@ -2419,8 +2423,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
40976
40977 pr_debug("trying to register driver %s\n", driver_data->name);
40978
40979- if (driver_data->setpolicy)
40980- driver_data->flags |= CPUFREQ_CONST_LOOPS;
40981+ if (driver_data->setpolicy) {
40982+ pax_open_kernel();
40983+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
40984+ pax_close_kernel();
40985+ }
40986
40987 write_lock_irqsave(&cpufreq_driver_lock, flags);
40988 if (cpufreq_driver) {
40989@@ -2435,8 +2442,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
40990 * Check if driver provides function to enable boost -
40991 * if not, use cpufreq_boost_set_sw as default
40992 */
40993- if (!cpufreq_driver->set_boost)
40994- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
40995+ if (!cpufreq_driver->set_boost) {
40996+ pax_open_kernel();
40997+ *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
40998+ pax_close_kernel();
40999+ }
41000
41001 ret = cpufreq_sysfs_create_file(&boost.attr);
41002 if (ret) {
41003diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
41004index 1b44496..b80ff5e 100644
41005--- a/drivers/cpufreq/cpufreq_governor.c
41006+++ b/drivers/cpufreq/cpufreq_governor.c
41007@@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
41008 struct dbs_data *dbs_data;
41009 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
41010 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
41011- struct od_ops *od_ops = NULL;
41012+ const struct od_ops *od_ops = NULL;
41013 struct od_dbs_tuners *od_tuners = NULL;
41014 struct cs_dbs_tuners *cs_tuners = NULL;
41015 struct cpu_dbs_common_info *cpu_cdbs;
41016@@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
41017
41018 if ((cdata->governor == GOV_CONSERVATIVE) &&
41019 (!policy->governor->initialized)) {
41020- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
41021+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
41022
41023 cpufreq_register_notifier(cs_ops->notifier_block,
41024 CPUFREQ_TRANSITION_NOTIFIER);
41025@@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
41026
41027 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
41028 (policy->governor->initialized == 1)) {
41029- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
41030+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
41031
41032 cpufreq_unregister_notifier(cs_ops->notifier_block,
41033 CPUFREQ_TRANSITION_NOTIFIER);
41034diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
41035index cc401d1..8197340 100644
41036--- a/drivers/cpufreq/cpufreq_governor.h
41037+++ b/drivers/cpufreq/cpufreq_governor.h
41038@@ -212,7 +212,7 @@ struct common_dbs_data {
41039 void (*exit)(struct dbs_data *dbs_data);
41040
41041 /* Governor specific ops, see below */
41042- void *gov_ops;
41043+ const void *gov_ops;
41044 };
41045
41046 /* Governor Per policy data */
41047@@ -232,7 +232,7 @@ struct od_ops {
41048 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
41049 unsigned int freq_next, unsigned int relation);
41050 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
41051-};
41052+} __no_const;
41053
41054 struct cs_ops {
41055 struct notifier_block *notifier_block;
41056diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
41057index 18d4091..434be15 100644
41058--- a/drivers/cpufreq/cpufreq_ondemand.c
41059+++ b/drivers/cpufreq/cpufreq_ondemand.c
41060@@ -521,7 +521,7 @@ static void od_exit(struct dbs_data *dbs_data)
41061
41062 define_get_cpu_dbs_routines(od_cpu_dbs_info);
41063
41064-static struct od_ops od_ops = {
41065+static struct od_ops od_ops __read_only = {
41066 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
41067 .powersave_bias_target = generic_powersave_bias_target,
41068 .freq_increase = dbs_freq_increase,
41069@@ -576,14 +576,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
41070 (struct cpufreq_policy *, unsigned int, unsigned int),
41071 unsigned int powersave_bias)
41072 {
41073- od_ops.powersave_bias_target = f;
41074+ pax_open_kernel();
41075+ *(void **)&od_ops.powersave_bias_target = f;
41076+ pax_close_kernel();
41077 od_set_powersave_bias(powersave_bias);
41078 }
41079 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
41080
41081 void od_unregister_powersave_bias_handler(void)
41082 {
41083- od_ops.powersave_bias_target = generic_powersave_bias_target;
41084+ pax_open_kernel();
41085+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
41086+ pax_close_kernel();
41087 od_set_powersave_bias(0);
41088 }
41089 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
41090diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
41091index 86631cb..c34ec78 100644
41092--- a/drivers/cpufreq/intel_pstate.c
41093+++ b/drivers/cpufreq/intel_pstate.c
41094@@ -121,10 +121,10 @@ struct pstate_funcs {
41095 struct cpu_defaults {
41096 struct pstate_adjust_policy pid_policy;
41097 struct pstate_funcs funcs;
41098-};
41099+} __do_const;
41100
41101 static struct pstate_adjust_policy pid_params;
41102-static struct pstate_funcs pstate_funcs;
41103+static struct pstate_funcs *pstate_funcs;
41104
41105 struct perf_limits {
41106 int no_turbo;
41107@@ -526,7 +526,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
41108
41109 cpu->pstate.current_pstate = pstate;
41110
41111- pstate_funcs.set(cpu, pstate);
41112+ pstate_funcs->set(cpu, pstate);
41113 }
41114
41115 static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
41116@@ -546,12 +546,12 @@ static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps)
41117
41118 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
41119 {
41120- cpu->pstate.min_pstate = pstate_funcs.get_min();
41121- cpu->pstate.max_pstate = pstate_funcs.get_max();
41122- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
41123+ cpu->pstate.min_pstate = pstate_funcs->get_min();
41124+ cpu->pstate.max_pstate = pstate_funcs->get_max();
41125+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
41126
41127- if (pstate_funcs.get_vid)
41128- pstate_funcs.get_vid(cpu);
41129+ if (pstate_funcs->get_vid)
41130+ pstate_funcs->get_vid(cpu);
41131 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
41132 }
41133
41134@@ -838,9 +838,9 @@ static int intel_pstate_msrs_not_valid(void)
41135 rdmsrl(MSR_IA32_APERF, aperf);
41136 rdmsrl(MSR_IA32_MPERF, mperf);
41137
41138- if (!pstate_funcs.get_max() ||
41139- !pstate_funcs.get_min() ||
41140- !pstate_funcs.get_turbo())
41141+ if (!pstate_funcs->get_max() ||
41142+ !pstate_funcs->get_min() ||
41143+ !pstate_funcs->get_turbo())
41144 return -ENODEV;
41145
41146 rdmsrl(MSR_IA32_APERF, tmp);
41147@@ -854,7 +854,7 @@ static int intel_pstate_msrs_not_valid(void)
41148 return 0;
41149 }
41150
41151-static void copy_pid_params(struct pstate_adjust_policy *policy)
41152+static void copy_pid_params(const struct pstate_adjust_policy *policy)
41153 {
41154 pid_params.sample_rate_ms = policy->sample_rate_ms;
41155 pid_params.p_gain_pct = policy->p_gain_pct;
41156@@ -866,11 +866,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
41157
41158 static void copy_cpu_funcs(struct pstate_funcs *funcs)
41159 {
41160- pstate_funcs.get_max = funcs->get_max;
41161- pstate_funcs.get_min = funcs->get_min;
41162- pstate_funcs.get_turbo = funcs->get_turbo;
41163- pstate_funcs.set = funcs->set;
41164- pstate_funcs.get_vid = funcs->get_vid;
41165+ pstate_funcs = funcs;
41166 }
41167
41168 #if IS_ENABLED(CONFIG_ACPI)
41169diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
41170index 529cfd9..0e28fff 100644
41171--- a/drivers/cpufreq/p4-clockmod.c
41172+++ b/drivers/cpufreq/p4-clockmod.c
41173@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
41174 case 0x0F: /* Core Duo */
41175 case 0x16: /* Celeron Core */
41176 case 0x1C: /* Atom */
41177- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41178+ pax_open_kernel();
41179+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41180+ pax_close_kernel();
41181 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
41182 case 0x0D: /* Pentium M (Dothan) */
41183- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41184+ pax_open_kernel();
41185+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41186+ pax_close_kernel();
41187 /* fall through */
41188 case 0x09: /* Pentium M (Banias) */
41189 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
41190@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
41191
41192 /* on P-4s, the TSC runs with constant frequency independent whether
41193 * throttling is active or not. */
41194- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41195+ pax_open_kernel();
41196+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41197+ pax_close_kernel();
41198
41199 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
41200 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
41201diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
41202index 9bb42ba..b01b4a2 100644
41203--- a/drivers/cpufreq/sparc-us3-cpufreq.c
41204+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
41205@@ -18,14 +18,12 @@
41206 #include <asm/head.h>
41207 #include <asm/timer.h>
41208
41209-static struct cpufreq_driver *cpufreq_us3_driver;
41210-
41211 struct us3_freq_percpu_info {
41212 struct cpufreq_frequency_table table[4];
41213 };
41214
41215 /* Indexed by cpu number. */
41216-static struct us3_freq_percpu_info *us3_freq_table;
41217+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
41218
41219 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
41220 * in the Safari config register.
41221@@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
41222
41223 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
41224 {
41225- if (cpufreq_us3_driver)
41226- us3_freq_target(policy, 0);
41227+ us3_freq_target(policy, 0);
41228
41229 return 0;
41230 }
41231
41232+static int __init us3_freq_init(void);
41233+static void __exit us3_freq_exit(void);
41234+
41235+static struct cpufreq_driver cpufreq_us3_driver = {
41236+ .init = us3_freq_cpu_init,
41237+ .verify = cpufreq_generic_frequency_table_verify,
41238+ .target_index = us3_freq_target,
41239+ .get = us3_freq_get,
41240+ .exit = us3_freq_cpu_exit,
41241+ .name = "UltraSPARC-III",
41242+
41243+};
41244+
41245 static int __init us3_freq_init(void)
41246 {
41247 unsigned long manuf, impl, ver;
41248- int ret;
41249
41250 if (tlb_type != cheetah && tlb_type != cheetah_plus)
41251 return -ENODEV;
41252@@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
41253 (impl == CHEETAH_IMPL ||
41254 impl == CHEETAH_PLUS_IMPL ||
41255 impl == JAGUAR_IMPL ||
41256- impl == PANTHER_IMPL)) {
41257- struct cpufreq_driver *driver;
41258-
41259- ret = -ENOMEM;
41260- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
41261- if (!driver)
41262- goto err_out;
41263-
41264- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
41265- GFP_KERNEL);
41266- if (!us3_freq_table)
41267- goto err_out;
41268-
41269- driver->init = us3_freq_cpu_init;
41270- driver->verify = cpufreq_generic_frequency_table_verify;
41271- driver->target_index = us3_freq_target;
41272- driver->get = us3_freq_get;
41273- driver->exit = us3_freq_cpu_exit;
41274- strcpy(driver->name, "UltraSPARC-III");
41275-
41276- cpufreq_us3_driver = driver;
41277- ret = cpufreq_register_driver(driver);
41278- if (ret)
41279- goto err_out;
41280-
41281- return 0;
41282-
41283-err_out:
41284- if (driver) {
41285- kfree(driver);
41286- cpufreq_us3_driver = NULL;
41287- }
41288- kfree(us3_freq_table);
41289- us3_freq_table = NULL;
41290- return ret;
41291- }
41292+ impl == PANTHER_IMPL))
41293+ return cpufreq_register_driver(&cpufreq_us3_driver);
41294
41295 return -ENODEV;
41296 }
41297
41298 static void __exit us3_freq_exit(void)
41299 {
41300- if (cpufreq_us3_driver) {
41301- cpufreq_unregister_driver(cpufreq_us3_driver);
41302- kfree(cpufreq_us3_driver);
41303- cpufreq_us3_driver = NULL;
41304- kfree(us3_freq_table);
41305- us3_freq_table = NULL;
41306- }
41307+ cpufreq_unregister_driver(&cpufreq_us3_driver);
41308 }
41309
41310 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
41311diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
41312index 7d4a315..21bb886 100644
41313--- a/drivers/cpufreq/speedstep-centrino.c
41314+++ b/drivers/cpufreq/speedstep-centrino.c
41315@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
41316 !cpu_has(cpu, X86_FEATURE_EST))
41317 return -ENODEV;
41318
41319- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
41320- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
41321+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
41322+ pax_open_kernel();
41323+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
41324+ pax_close_kernel();
41325+ }
41326
41327 if (policy->cpu != 0)
41328 return -ENODEV;
41329diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
41330index 9634f20..e1499c7 100644
41331--- a/drivers/cpuidle/driver.c
41332+++ b/drivers/cpuidle/driver.c
41333@@ -205,7 +205,7 @@ static int poll_idle(struct cpuidle_device *dev,
41334
41335 static void poll_idle_init(struct cpuidle_driver *drv)
41336 {
41337- struct cpuidle_state *state = &drv->states[0];
41338+ cpuidle_state_no_const *state = &drv->states[0];
41339
41340 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
41341 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
41342diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
41343index ca89412..a7b9c49 100644
41344--- a/drivers/cpuidle/governor.c
41345+++ b/drivers/cpuidle/governor.c
41346@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
41347 mutex_lock(&cpuidle_lock);
41348 if (__cpuidle_find_governor(gov->name) == NULL) {
41349 ret = 0;
41350- list_add_tail(&gov->governor_list, &cpuidle_governors);
41351+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
41352 if (!cpuidle_curr_governor ||
41353 cpuidle_curr_governor->rating < gov->rating)
41354 cpuidle_switch_governor(gov);
41355diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
41356index efe2f17..b8124f9 100644
41357--- a/drivers/cpuidle/sysfs.c
41358+++ b/drivers/cpuidle/sysfs.c
41359@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
41360 NULL
41361 };
41362
41363-static struct attribute_group cpuidle_attr_group = {
41364+static attribute_group_no_const cpuidle_attr_group = {
41365 .attrs = cpuidle_default_attrs,
41366 .name = "cpuidle",
41367 };
41368diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
41369index 12fea3e..1e28f47 100644
41370--- a/drivers/crypto/hifn_795x.c
41371+++ b/drivers/crypto/hifn_795x.c
41372@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
41373 MODULE_PARM_DESC(hifn_pll_ref,
41374 "PLL reference clock (pci[freq] or ext[freq], default ext)");
41375
41376-static atomic_t hifn_dev_number;
41377+static atomic_unchecked_t hifn_dev_number;
41378
41379 #define ACRYPTO_OP_DECRYPT 0
41380 #define ACRYPTO_OP_ENCRYPT 1
41381@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
41382 goto err_out_disable_pci_device;
41383
41384 snprintf(name, sizeof(name), "hifn%d",
41385- atomic_inc_return(&hifn_dev_number)-1);
41386+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
41387
41388 err = pci_request_regions(pdev, name);
41389 if (err)
41390diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
41391index 9f90369..bfcacdb 100644
41392--- a/drivers/devfreq/devfreq.c
41393+++ b/drivers/devfreq/devfreq.c
41394@@ -673,7 +673,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
41395 goto err_out;
41396 }
41397
41398- list_add(&governor->node, &devfreq_governor_list);
41399+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
41400
41401 list_for_each_entry(devfreq, &devfreq_list, node) {
41402 int ret = 0;
41403@@ -761,7 +761,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
41404 }
41405 }
41406
41407- list_del(&governor->node);
41408+ pax_list_del((struct list_head *)&governor->node);
41409 err_out:
41410 mutex_unlock(&devfreq_list_lock);
41411
41412diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
41413index b35007e..55ad549 100644
41414--- a/drivers/dma/sh/shdma-base.c
41415+++ b/drivers/dma/sh/shdma-base.c
41416@@ -267,8 +267,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
41417 schan->slave_id = -EINVAL;
41418 }
41419
41420- schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
41421- sdev->desc_size, GFP_KERNEL);
41422+ schan->desc = kcalloc(sdev->desc_size,
41423+ NR_DESCS_PER_CHANNEL, GFP_KERNEL);
41424 if (!schan->desc) {
41425 ret = -ENOMEM;
41426 goto edescalloc;
41427diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
41428index 146d5df..3c14970 100644
41429--- a/drivers/dma/sh/shdmac.c
41430+++ b/drivers/dma/sh/shdmac.c
41431@@ -514,7 +514,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
41432 return ret;
41433 }
41434
41435-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
41436+static struct notifier_block sh_dmae_nmi_notifier = {
41437 .notifier_call = sh_dmae_nmi_handler,
41438
41439 /* Run before NMI debug handler and KGDB */
41440diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
41441index 592af5f..bb1d583 100644
41442--- a/drivers/edac/edac_device.c
41443+++ b/drivers/edac/edac_device.c
41444@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
41445 */
41446 int edac_device_alloc_index(void)
41447 {
41448- static atomic_t device_indexes = ATOMIC_INIT(0);
41449+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
41450
41451- return atomic_inc_return(&device_indexes) - 1;
41452+ return atomic_inc_return_unchecked(&device_indexes) - 1;
41453 }
41454 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
41455
41456diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
41457index 01fae82..1dd8289 100644
41458--- a/drivers/edac/edac_mc_sysfs.c
41459+++ b/drivers/edac/edac_mc_sysfs.c
41460@@ -152,7 +152,7 @@ static const char * const edac_caps[] = {
41461 struct dev_ch_attribute {
41462 struct device_attribute attr;
41463 int channel;
41464-};
41465+} __do_const;
41466
41467 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
41468 struct dev_ch_attribute dev_attr_legacy_##_name = \
41469@@ -1009,14 +1009,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
41470 }
41471
41472 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
41473+ pax_open_kernel();
41474 if (mci->get_sdram_scrub_rate) {
41475- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
41476- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
41477+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
41478+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
41479 }
41480 if (mci->set_sdram_scrub_rate) {
41481- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
41482- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
41483+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
41484+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
41485 }
41486+ pax_close_kernel();
41487 err = device_create_file(&mci->dev,
41488 &dev_attr_sdram_scrub_rate);
41489 if (err) {
41490diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
41491index 2cf44b4d..6dd2dc7 100644
41492--- a/drivers/edac/edac_pci.c
41493+++ b/drivers/edac/edac_pci.c
41494@@ -29,7 +29,7 @@
41495
41496 static DEFINE_MUTEX(edac_pci_ctls_mutex);
41497 static LIST_HEAD(edac_pci_list);
41498-static atomic_t pci_indexes = ATOMIC_INIT(0);
41499+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
41500
41501 /*
41502 * edac_pci_alloc_ctl_info
41503@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
41504 */
41505 int edac_pci_alloc_index(void)
41506 {
41507- return atomic_inc_return(&pci_indexes) - 1;
41508+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
41509 }
41510 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
41511
41512diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
41513index e8658e4..22746d6 100644
41514--- a/drivers/edac/edac_pci_sysfs.c
41515+++ b/drivers/edac/edac_pci_sysfs.c
41516@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
41517 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
41518 static int edac_pci_poll_msec = 1000; /* one second workq period */
41519
41520-static atomic_t pci_parity_count = ATOMIC_INIT(0);
41521-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
41522+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
41523+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
41524
41525 static struct kobject *edac_pci_top_main_kobj;
41526 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
41527@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
41528 void *value;
41529 ssize_t(*show) (void *, char *);
41530 ssize_t(*store) (void *, const char *, size_t);
41531-};
41532+} __do_const;
41533
41534 /* Set of show/store abstract level functions for PCI Parity object */
41535 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
41536@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41537 edac_printk(KERN_CRIT, EDAC_PCI,
41538 "Signaled System Error on %s\n",
41539 pci_name(dev));
41540- atomic_inc(&pci_nonparity_count);
41541+ atomic_inc_unchecked(&pci_nonparity_count);
41542 }
41543
41544 if (status & (PCI_STATUS_PARITY)) {
41545@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41546 "Master Data Parity Error on %s\n",
41547 pci_name(dev));
41548
41549- atomic_inc(&pci_parity_count);
41550+ atomic_inc_unchecked(&pci_parity_count);
41551 }
41552
41553 if (status & (PCI_STATUS_DETECTED_PARITY)) {
41554@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41555 "Detected Parity Error on %s\n",
41556 pci_name(dev));
41557
41558- atomic_inc(&pci_parity_count);
41559+ atomic_inc_unchecked(&pci_parity_count);
41560 }
41561 }
41562
41563@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41564 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
41565 "Signaled System Error on %s\n",
41566 pci_name(dev));
41567- atomic_inc(&pci_nonparity_count);
41568+ atomic_inc_unchecked(&pci_nonparity_count);
41569 }
41570
41571 if (status & (PCI_STATUS_PARITY)) {
41572@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41573 "Master Data Parity Error on "
41574 "%s\n", pci_name(dev));
41575
41576- atomic_inc(&pci_parity_count);
41577+ atomic_inc_unchecked(&pci_parity_count);
41578 }
41579
41580 if (status & (PCI_STATUS_DETECTED_PARITY)) {
41581@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41582 "Detected Parity Error on %s\n",
41583 pci_name(dev));
41584
41585- atomic_inc(&pci_parity_count);
41586+ atomic_inc_unchecked(&pci_parity_count);
41587 }
41588 }
41589 }
41590@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
41591 if (!check_pci_errors)
41592 return;
41593
41594- before_count = atomic_read(&pci_parity_count);
41595+ before_count = atomic_read_unchecked(&pci_parity_count);
41596
41597 /* scan all PCI devices looking for a Parity Error on devices and
41598 * bridges.
41599@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
41600 /* Only if operator has selected panic on PCI Error */
41601 if (edac_pci_get_panic_on_pe()) {
41602 /* If the count is different 'after' from 'before' */
41603- if (before_count != atomic_read(&pci_parity_count))
41604+ if (before_count != atomic_read_unchecked(&pci_parity_count))
41605 panic("EDAC: PCI Parity Error");
41606 }
41607 }
41608diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
41609index 51b7e3a..aa8a3e8 100644
41610--- a/drivers/edac/mce_amd.h
41611+++ b/drivers/edac/mce_amd.h
41612@@ -77,7 +77,7 @@ struct amd_decoder_ops {
41613 bool (*mc0_mce)(u16, u8);
41614 bool (*mc1_mce)(u16, u8);
41615 bool (*mc2_mce)(u16, u8);
41616-};
41617+} __no_const;
41618
41619 void amd_report_gart_errors(bool);
41620 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
41621diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
41622index 57ea7f4..af06b76 100644
41623--- a/drivers/firewire/core-card.c
41624+++ b/drivers/firewire/core-card.c
41625@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
41626 const struct fw_card_driver *driver,
41627 struct device *device)
41628 {
41629- static atomic_t index = ATOMIC_INIT(-1);
41630+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
41631
41632- card->index = atomic_inc_return(&index);
41633+ card->index = atomic_inc_return_unchecked(&index);
41634 card->driver = driver;
41635 card->device = device;
41636 card->current_tlabel = 0;
41637@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
41638
41639 void fw_core_remove_card(struct fw_card *card)
41640 {
41641- struct fw_card_driver dummy_driver = dummy_driver_template;
41642+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
41643
41644 card->driver->update_phy_reg(card, 4,
41645 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
41646diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
41647index 2c6d5e1..a2cca6b 100644
41648--- a/drivers/firewire/core-device.c
41649+++ b/drivers/firewire/core-device.c
41650@@ -253,7 +253,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
41651 struct config_rom_attribute {
41652 struct device_attribute attr;
41653 u32 key;
41654-};
41655+} __do_const;
41656
41657 static ssize_t show_immediate(struct device *dev,
41658 struct device_attribute *dattr, char *buf)
41659diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
41660index eb6935c..3cc2bfa 100644
41661--- a/drivers/firewire/core-transaction.c
41662+++ b/drivers/firewire/core-transaction.c
41663@@ -38,6 +38,7 @@
41664 #include <linux/timer.h>
41665 #include <linux/types.h>
41666 #include <linux/workqueue.h>
41667+#include <linux/sched.h>
41668
41669 #include <asm/byteorder.h>
41670
41671diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
41672index e1480ff6..1a429bd 100644
41673--- a/drivers/firewire/core.h
41674+++ b/drivers/firewire/core.h
41675@@ -111,6 +111,7 @@ struct fw_card_driver {
41676
41677 int (*stop_iso)(struct fw_iso_context *ctx);
41678 };
41679+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
41680
41681 void fw_card_initialize(struct fw_card *card,
41682 const struct fw_card_driver *driver, struct device *device);
41683diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
41684index a66a321..f6caf20 100644
41685--- a/drivers/firewire/ohci.c
41686+++ b/drivers/firewire/ohci.c
41687@@ -2056,10 +2056,12 @@ static void bus_reset_work(struct work_struct *work)
41688 be32_to_cpu(ohci->next_header));
41689 }
41690
41691+#ifndef CONFIG_GRKERNSEC
41692 if (param_remote_dma) {
41693 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
41694 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
41695 }
41696+#endif
41697
41698 spin_unlock_irq(&ohci->lock);
41699
41700@@ -2591,8 +2593,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
41701 unsigned long flags;
41702 int n, ret = 0;
41703
41704+#ifndef CONFIG_GRKERNSEC
41705 if (param_remote_dma)
41706 return 0;
41707+#endif
41708
41709 /*
41710 * FIXME: Make sure this bitmask is cleared when we clear the busReset
41711diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
41712index 94a58a0..f5eba42 100644
41713--- a/drivers/firmware/dmi-id.c
41714+++ b/drivers/firmware/dmi-id.c
41715@@ -16,7 +16,7 @@
41716 struct dmi_device_attribute{
41717 struct device_attribute dev_attr;
41718 int field;
41719-};
41720+} __do_const;
41721 #define to_dmi_dev_attr(_dev_attr) \
41722 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
41723
41724diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
41725index 17afc51..0ef90cd 100644
41726--- a/drivers/firmware/dmi_scan.c
41727+++ b/drivers/firmware/dmi_scan.c
41728@@ -835,7 +835,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
41729 if (buf == NULL)
41730 return -1;
41731
41732- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
41733+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
41734
41735 dmi_unmap(buf);
41736 return 0;
41737diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
41738index 1491dd4..aa910db 100644
41739--- a/drivers/firmware/efi/cper.c
41740+++ b/drivers/firmware/efi/cper.c
41741@@ -41,12 +41,12 @@
41742 */
41743 u64 cper_next_record_id(void)
41744 {
41745- static atomic64_t seq;
41746+ static atomic64_unchecked_t seq;
41747
41748- if (!atomic64_read(&seq))
41749- atomic64_set(&seq, ((u64)get_seconds()) << 32);
41750+ if (!atomic64_read_unchecked(&seq))
41751+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
41752
41753- return atomic64_inc_return(&seq);
41754+ return atomic64_inc_return_unchecked(&seq);
41755 }
41756 EXPORT_SYMBOL_GPL(cper_next_record_id);
41757
41758diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
41759index dc79346..b39bd69 100644
41760--- a/drivers/firmware/efi/efi.c
41761+++ b/drivers/firmware/efi/efi.c
41762@@ -122,14 +122,16 @@ static struct attribute_group efi_subsys_attr_group = {
41763 };
41764
41765 static struct efivars generic_efivars;
41766-static struct efivar_operations generic_ops;
41767+static efivar_operations_no_const generic_ops __read_only;
41768
41769 static int generic_ops_register(void)
41770 {
41771- generic_ops.get_variable = efi.get_variable;
41772- generic_ops.set_variable = efi.set_variable;
41773- generic_ops.get_next_variable = efi.get_next_variable;
41774- generic_ops.query_variable_store = efi_query_variable_store;
41775+ pax_open_kernel();
41776+ *(void **)&generic_ops.get_variable = efi.get_variable;
41777+ *(void **)&generic_ops.set_variable = efi.set_variable;
41778+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
41779+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
41780+ pax_close_kernel();
41781
41782 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
41783 }
41784diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
41785index 463c565..02a5640 100644
41786--- a/drivers/firmware/efi/efivars.c
41787+++ b/drivers/firmware/efi/efivars.c
41788@@ -588,7 +588,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
41789 static int
41790 create_efivars_bin_attributes(void)
41791 {
41792- struct bin_attribute *attr;
41793+ bin_attribute_no_const *attr;
41794 int error;
41795
41796 /* new_var */
41797diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
41798index 2f569aa..c95f4fb 100644
41799--- a/drivers/firmware/google/memconsole.c
41800+++ b/drivers/firmware/google/memconsole.c
41801@@ -155,7 +155,10 @@ static int __init memconsole_init(void)
41802 if (!found_memconsole())
41803 return -ENODEV;
41804
41805- memconsole_bin_attr.size = memconsole_length;
41806+ pax_open_kernel();
41807+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
41808+ pax_close_kernel();
41809+
41810 return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
41811 }
41812
41813diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
41814index cde3605..8b69df7 100644
41815--- a/drivers/gpio/gpio-em.c
41816+++ b/drivers/gpio/gpio-em.c
41817@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
41818 struct em_gio_priv *p;
41819 struct resource *io[2], *irq[2];
41820 struct gpio_chip *gpio_chip;
41821- struct irq_chip *irq_chip;
41822+ irq_chip_no_const *irq_chip;
41823 const char *name = dev_name(&pdev->dev);
41824 int ret;
41825
41826diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
41827index 7030422..42a3fe9 100644
41828--- a/drivers/gpio/gpio-ich.c
41829+++ b/drivers/gpio/gpio-ich.c
41830@@ -94,7 +94,7 @@ struct ichx_desc {
41831 * this option allows driver caching written output values
41832 */
41833 bool use_outlvl_cache;
41834-};
41835+} __do_const;
41836
41837 static struct {
41838 spinlock_t lock;
41839diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
41840index b6ae89e..ac7349c 100644
41841--- a/drivers/gpio/gpio-rcar.c
41842+++ b/drivers/gpio/gpio-rcar.c
41843@@ -357,7 +357,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
41844 struct gpio_rcar_priv *p;
41845 struct resource *io, *irq;
41846 struct gpio_chip *gpio_chip;
41847- struct irq_chip *irq_chip;
41848+ irq_chip_no_const *irq_chip;
41849 struct device *dev = &pdev->dev;
41850 const char *name = dev_name(dev);
41851 int ret;
41852diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
41853index 66cbcc1..0c5e622 100644
41854--- a/drivers/gpio/gpio-vr41xx.c
41855+++ b/drivers/gpio/gpio-vr41xx.c
41856@@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
41857 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
41858 maskl, pendl, maskh, pendh);
41859
41860- atomic_inc(&irq_err_count);
41861+ atomic_inc_unchecked(&irq_err_count);
41862
41863 return -EINVAL;
41864 }
41865diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
41866index 2ebc907..01bdd6e 100644
41867--- a/drivers/gpio/gpiolib.c
41868+++ b/drivers/gpio/gpiolib.c
41869@@ -1482,8 +1482,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
41870 }
41871
41872 if (gpiochip->irqchip) {
41873- gpiochip->irqchip->irq_request_resources = NULL;
41874- gpiochip->irqchip->irq_release_resources = NULL;
41875+ pax_open_kernel();
41876+ *(void **)&gpiochip->irqchip->irq_request_resources = NULL;
41877+ *(void **)&gpiochip->irqchip->irq_release_resources = NULL;
41878+ pax_close_kernel();
41879 gpiochip->irqchip = NULL;
41880 }
41881 }
41882@@ -1549,8 +1551,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
41883 gpiochip->irqchip = NULL;
41884 return -EINVAL;
41885 }
41886- irqchip->irq_request_resources = gpiochip_irq_reqres;
41887- irqchip->irq_release_resources = gpiochip_irq_relres;
41888+
41889+ pax_open_kernel();
41890+ *(void **)&irqchip->irq_request_resources = gpiochip_irq_reqres;
41891+ *(void **)&irqchip->irq_release_resources = gpiochip_irq_relres;
41892+ pax_close_kernel();
41893
41894 /*
41895 * Prepare the mapping since the irqchip shall be orthogonal to
41896diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
41897index fe94cc1..5e697b3 100644
41898--- a/drivers/gpu/drm/drm_crtc.c
41899+++ b/drivers/gpu/drm/drm_crtc.c
41900@@ -3584,7 +3584,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
41901 goto done;
41902 }
41903
41904- if (copy_to_user(&enum_ptr[copied].name,
41905+ if (copy_to_user(enum_ptr[copied].name,
41906 &prop_enum->name, DRM_PROP_NAME_LEN)) {
41907 ret = -EFAULT;
41908 goto done;
41909diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
41910index 8218078..9960928a 100644
41911--- a/drivers/gpu/drm/drm_drv.c
41912+++ b/drivers/gpu/drm/drm_drv.c
41913@@ -233,7 +233,7 @@ module_exit(drm_core_exit);
41914 /**
41915 * Copy and IOCTL return string to user space
41916 */
41917-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
41918+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
41919 {
41920 int len;
41921
41922@@ -342,7 +342,7 @@ long drm_ioctl(struct file *filp,
41923 struct drm_file *file_priv = filp->private_data;
41924 struct drm_device *dev;
41925 const struct drm_ioctl_desc *ioctl = NULL;
41926- drm_ioctl_t *func;
41927+ drm_ioctl_no_const_t func;
41928 unsigned int nr = DRM_IOCTL_NR(cmd);
41929 int retcode = -EINVAL;
41930 char stack_kdata[128];
41931diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
41932index 021fe5d..abc9ce6 100644
41933--- a/drivers/gpu/drm/drm_fops.c
41934+++ b/drivers/gpu/drm/drm_fops.c
41935@@ -88,7 +88,7 @@ int drm_open(struct inode *inode, struct file *filp)
41936 return PTR_ERR(minor);
41937
41938 dev = minor->dev;
41939- if (!dev->open_count++)
41940+ if (local_inc_return(&dev->open_count) == 1)
41941 need_setup = 1;
41942
41943 /* share address_space across all char-devs of a single device */
41944@@ -105,7 +105,7 @@ int drm_open(struct inode *inode, struct file *filp)
41945 return 0;
41946
41947 err_undo:
41948- dev->open_count--;
41949+ local_dec(&dev->open_count);
41950 drm_minor_release(minor);
41951 return retcode;
41952 }
41953@@ -427,7 +427,7 @@ int drm_release(struct inode *inode, struct file *filp)
41954
41955 mutex_lock(&drm_global_mutex);
41956
41957- DRM_DEBUG("open_count = %d\n", dev->open_count);
41958+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
41959
41960 if (dev->driver->preclose)
41961 dev->driver->preclose(dev, file_priv);
41962@@ -436,10 +436,10 @@ int drm_release(struct inode *inode, struct file *filp)
41963 * Begin inline drm_release
41964 */
41965
41966- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
41967+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
41968 task_pid_nr(current),
41969 (long)old_encode_dev(file_priv->minor->kdev->devt),
41970- dev->open_count);
41971+ local_read(&dev->open_count));
41972
41973 /* Release any auth tokens that might point to this file_priv,
41974 (do that under the drm_global_mutex) */
41975@@ -540,7 +540,7 @@ int drm_release(struct inode *inode, struct file *filp)
41976 * End inline drm_release
41977 */
41978
41979- if (!--dev->open_count) {
41980+ if (local_dec_and_test(&dev->open_count)) {
41981 retcode = drm_lastclose(dev);
41982 if (drm_device_is_unplugged(dev))
41983 drm_put_dev(dev);
41984diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
41985index 3d2e91c..d31c4c9 100644
41986--- a/drivers/gpu/drm/drm_global.c
41987+++ b/drivers/gpu/drm/drm_global.c
41988@@ -36,7 +36,7 @@
41989 struct drm_global_item {
41990 struct mutex mutex;
41991 void *object;
41992- int refcount;
41993+ atomic_t refcount;
41994 };
41995
41996 static struct drm_global_item glob[DRM_GLOBAL_NUM];
41997@@ -49,7 +49,7 @@ void drm_global_init(void)
41998 struct drm_global_item *item = &glob[i];
41999 mutex_init(&item->mutex);
42000 item->object = NULL;
42001- item->refcount = 0;
42002+ atomic_set(&item->refcount, 0);
42003 }
42004 }
42005
42006@@ -59,7 +59,7 @@ void drm_global_release(void)
42007 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
42008 struct drm_global_item *item = &glob[i];
42009 BUG_ON(item->object != NULL);
42010- BUG_ON(item->refcount != 0);
42011+ BUG_ON(atomic_read(&item->refcount) != 0);
42012 }
42013 }
42014
42015@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
42016 struct drm_global_item *item = &glob[ref->global_type];
42017
42018 mutex_lock(&item->mutex);
42019- if (item->refcount == 0) {
42020+ if (atomic_read(&item->refcount) == 0) {
42021 item->object = kzalloc(ref->size, GFP_KERNEL);
42022 if (unlikely(item->object == NULL)) {
42023 ret = -ENOMEM;
42024@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
42025 goto out_err;
42026
42027 }
42028- ++item->refcount;
42029+ atomic_inc(&item->refcount);
42030 ref->object = item->object;
42031 mutex_unlock(&item->mutex);
42032 return 0;
42033@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
42034 struct drm_global_item *item = &glob[ref->global_type];
42035
42036 mutex_lock(&item->mutex);
42037- BUG_ON(item->refcount == 0);
42038+ BUG_ON(atomic_read(&item->refcount) == 0);
42039 BUG_ON(ref->object != item->object);
42040- if (--item->refcount == 0) {
42041+ if (atomic_dec_and_test(&item->refcount)) {
42042 ref->release(ref);
42043 item->object = NULL;
42044 }
42045diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
42046index 86feedd..cba70f5 100644
42047--- a/drivers/gpu/drm/drm_info.c
42048+++ b/drivers/gpu/drm/drm_info.c
42049@@ -73,10 +73,13 @@ int drm_vm_info(struct seq_file *m, void *data)
42050 struct drm_local_map *map;
42051 struct drm_map_list *r_list;
42052
42053- /* Hardcoded from _DRM_FRAME_BUFFER,
42054- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
42055- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
42056- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
42057+ static const char * const types[] = {
42058+ [_DRM_FRAME_BUFFER] = "FB",
42059+ [_DRM_REGISTERS] = "REG",
42060+ [_DRM_SHM] = "SHM",
42061+ [_DRM_AGP] = "AGP",
42062+ [_DRM_SCATTER_GATHER] = "SG",
42063+ [_DRM_CONSISTENT] = "PCI"};
42064 const char *type;
42065 int i;
42066
42067@@ -87,7 +90,7 @@ int drm_vm_info(struct seq_file *m, void *data)
42068 map = r_list->map;
42069 if (!map)
42070 continue;
42071- if (map->type < 0 || map->type > 5)
42072+ if (map->type >= ARRAY_SIZE(types))
42073 type = "??";
42074 else
42075 type = types[map->type];
42076@@ -259,7 +262,11 @@ int drm_vma_info(struct seq_file *m, void *data)
42077 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
42078 vma->vm_flags & VM_LOCKED ? 'l' : '-',
42079 vma->vm_flags & VM_IO ? 'i' : '-',
42080+#ifdef CONFIG_GRKERNSEC_HIDESYM
42081+ 0);
42082+#else
42083 vma->vm_pgoff);
42084+#endif
42085
42086 #if defined(__i386__)
42087 pgprot = pgprot_val(vma->vm_page_prot);
42088diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
42089index 2f4c4343..dd12cd2 100644
42090--- a/drivers/gpu/drm/drm_ioc32.c
42091+++ b/drivers/gpu/drm/drm_ioc32.c
42092@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
42093 request = compat_alloc_user_space(nbytes);
42094 if (!access_ok(VERIFY_WRITE, request, nbytes))
42095 return -EFAULT;
42096- list = (struct drm_buf_desc *) (request + 1);
42097+ list = (struct drm_buf_desc __user *) (request + 1);
42098
42099 if (__put_user(count, &request->count)
42100 || __put_user(list, &request->list))
42101@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
42102 request = compat_alloc_user_space(nbytes);
42103 if (!access_ok(VERIFY_WRITE, request, nbytes))
42104 return -EFAULT;
42105- list = (struct drm_buf_pub *) (request + 1);
42106+ list = (struct drm_buf_pub __user *) (request + 1);
42107
42108 if (__put_user(count, &request->count)
42109 || __put_user(list, &request->list))
42110@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
42111 return 0;
42112 }
42113
42114-drm_ioctl_compat_t *drm_compat_ioctls[] = {
42115+drm_ioctl_compat_t drm_compat_ioctls[] = {
42116 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
42117 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
42118 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
42119@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
42120 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42121 {
42122 unsigned int nr = DRM_IOCTL_NR(cmd);
42123- drm_ioctl_compat_t *fn;
42124 int ret;
42125
42126 /* Assume that ioctls without an explicit compat routine will just
42127@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42128 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
42129 return drm_ioctl(filp, cmd, arg);
42130
42131- fn = drm_compat_ioctls[nr];
42132-
42133- if (fn != NULL)
42134- ret = (*fn) (filp, cmd, arg);
42135+ if (drm_compat_ioctls[nr] != NULL)
42136+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
42137 else
42138 ret = drm_ioctl(filp, cmd, arg);
42139
42140diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
42141index 14d1646..99f9d49 100644
42142--- a/drivers/gpu/drm/drm_stub.c
42143+++ b/drivers/gpu/drm/drm_stub.c
42144@@ -455,7 +455,7 @@ void drm_unplug_dev(struct drm_device *dev)
42145
42146 drm_device_set_unplugged(dev);
42147
42148- if (dev->open_count == 0) {
42149+ if (local_read(&dev->open_count) == 0) {
42150 drm_put_dev(dev);
42151 }
42152 mutex_unlock(&drm_global_mutex);
42153diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
42154index 369b262..09ea3ab 100644
42155--- a/drivers/gpu/drm/drm_sysfs.c
42156+++ b/drivers/gpu/drm/drm_sysfs.c
42157@@ -505,7 +505,7 @@ static void drm_sysfs_release(struct device *dev)
42158 */
42159 int drm_sysfs_device_add(struct drm_minor *minor)
42160 {
42161- char *minor_str;
42162+ const char *minor_str;
42163 int r;
42164
42165 if (minor->type == DRM_MINOR_CONTROL)
42166diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
42167index d4d16ed..8fb0b51 100644
42168--- a/drivers/gpu/drm/i810/i810_drv.h
42169+++ b/drivers/gpu/drm/i810/i810_drv.h
42170@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
42171 int page_flipping;
42172
42173 wait_queue_head_t irq_queue;
42174- atomic_t irq_received;
42175- atomic_t irq_emitted;
42176+ atomic_unchecked_t irq_received;
42177+ atomic_unchecked_t irq_emitted;
42178
42179 int front_offset;
42180 } drm_i810_private_t;
42181diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
42182index d443441..ab091dd 100644
42183--- a/drivers/gpu/drm/i915/i915_dma.c
42184+++ b/drivers/gpu/drm/i915/i915_dma.c
42185@@ -1290,7 +1290,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
42186 * locking inversion with the driver load path. And the access here is
42187 * completely racy anyway. So don't bother with locking for now.
42188 */
42189- return dev->open_count == 0;
42190+ return local_read(&dev->open_count) == 0;
42191 }
42192
42193 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
42194diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
42195index 3a30133..ef4a743 100644
42196--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
42197+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
42198@@ -891,9 +891,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
42199
42200 static int
42201 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
42202- int count)
42203+ unsigned int count)
42204 {
42205- int i;
42206+ unsigned int i;
42207 unsigned relocs_total = 0;
42208 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
42209
42210diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
42211index 2e0613e..a8b94d9 100644
42212--- a/drivers/gpu/drm/i915/i915_ioc32.c
42213+++ b/drivers/gpu/drm/i915/i915_ioc32.c
42214@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
42215 (unsigned long)request);
42216 }
42217
42218-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
42219+static drm_ioctl_compat_t i915_compat_ioctls[] = {
42220 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
42221 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
42222 [DRM_I915_GETPARAM] = compat_i915_getparam,
42223@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
42224 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42225 {
42226 unsigned int nr = DRM_IOCTL_NR(cmd);
42227- drm_ioctl_compat_t *fn = NULL;
42228 int ret;
42229
42230 if (nr < DRM_COMMAND_BASE)
42231 return drm_compat_ioctl(filp, cmd, arg);
42232
42233- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
42234- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
42235-
42236- if (fn != NULL)
42237+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls)) {
42238+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
42239 ret = (*fn) (filp, cmd, arg);
42240- else
42241+ } else
42242 ret = drm_ioctl(filp, cmd, arg);
42243
42244 return ret;
42245diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
42246index f0be855..94e82d9 100644
42247--- a/drivers/gpu/drm/i915/intel_display.c
42248+++ b/drivers/gpu/drm/i915/intel_display.c
42249@@ -11604,13 +11604,13 @@ struct intel_quirk {
42250 int subsystem_vendor;
42251 int subsystem_device;
42252 void (*hook)(struct drm_device *dev);
42253-};
42254+} __do_const;
42255
42256 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
42257 struct intel_dmi_quirk {
42258 void (*hook)(struct drm_device *dev);
42259 const struct dmi_system_id (*dmi_id_list)[];
42260-};
42261+} __do_const;
42262
42263 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
42264 {
42265@@ -11618,18 +11618,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
42266 return 1;
42267 }
42268
42269-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
42270+static const struct dmi_system_id intel_dmi_quirks_table[] = {
42271 {
42272- .dmi_id_list = &(const struct dmi_system_id[]) {
42273- {
42274- .callback = intel_dmi_reverse_brightness,
42275- .ident = "NCR Corporation",
42276- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
42277- DMI_MATCH(DMI_PRODUCT_NAME, ""),
42278- },
42279- },
42280- { } /* terminating entry */
42281+ .callback = intel_dmi_reverse_brightness,
42282+ .ident = "NCR Corporation",
42283+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
42284+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
42285 },
42286+ },
42287+ { } /* terminating entry */
42288+};
42289+
42290+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
42291+ {
42292+ .dmi_id_list = &intel_dmi_quirks_table,
42293 .hook = quirk_invert_brightness,
42294 },
42295 };
42296diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
42297index fe45321..836fdca 100644
42298--- a/drivers/gpu/drm/mga/mga_drv.h
42299+++ b/drivers/gpu/drm/mga/mga_drv.h
42300@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
42301 u32 clear_cmd;
42302 u32 maccess;
42303
42304- atomic_t vbl_received; /**< Number of vblanks received. */
42305+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
42306 wait_queue_head_t fence_queue;
42307- atomic_t last_fence_retired;
42308+ atomic_unchecked_t last_fence_retired;
42309 u32 next_fence_to_post;
42310
42311 unsigned int fb_cpp;
42312diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
42313index 729bfd5..ead8823 100644
42314--- a/drivers/gpu/drm/mga/mga_ioc32.c
42315+++ b/drivers/gpu/drm/mga/mga_ioc32.c
42316@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
42317 return 0;
42318 }
42319
42320-drm_ioctl_compat_t *mga_compat_ioctls[] = {
42321+drm_ioctl_compat_t mga_compat_ioctls[] = {
42322 [DRM_MGA_INIT] = compat_mga_init,
42323 [DRM_MGA_GETPARAM] = compat_mga_getparam,
42324 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
42325@@ -208,18 +208,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
42326 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42327 {
42328 unsigned int nr = DRM_IOCTL_NR(cmd);
42329- drm_ioctl_compat_t *fn = NULL;
42330 int ret;
42331
42332 if (nr < DRM_COMMAND_BASE)
42333 return drm_compat_ioctl(filp, cmd, arg);
42334
42335- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
42336- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
42337-
42338- if (fn != NULL)
42339+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls)) {
42340+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
42341 ret = (*fn) (filp, cmd, arg);
42342- else
42343+ } else
42344 ret = drm_ioctl(filp, cmd, arg);
42345
42346 return ret;
42347diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
42348index 1b071b8..de8601a 100644
42349--- a/drivers/gpu/drm/mga/mga_irq.c
42350+++ b/drivers/gpu/drm/mga/mga_irq.c
42351@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
42352 if (crtc != 0)
42353 return 0;
42354
42355- return atomic_read(&dev_priv->vbl_received);
42356+ return atomic_read_unchecked(&dev_priv->vbl_received);
42357 }
42358
42359
42360@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
42361 /* VBLANK interrupt */
42362 if (status & MGA_VLINEPEN) {
42363 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
42364- atomic_inc(&dev_priv->vbl_received);
42365+ atomic_inc_unchecked(&dev_priv->vbl_received);
42366 drm_handle_vblank(dev, 0);
42367 handled = 1;
42368 }
42369@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
42370 if ((prim_start & ~0x03) != (prim_end & ~0x03))
42371 MGA_WRITE(MGA_PRIMEND, prim_end);
42372
42373- atomic_inc(&dev_priv->last_fence_retired);
42374+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
42375 wake_up(&dev_priv->fence_queue);
42376 handled = 1;
42377 }
42378@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
42379 * using fences.
42380 */
42381 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
42382- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
42383+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
42384 - *sequence) <= (1 << 23)));
42385
42386 *sequence = cur_fence;
42387diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
42388index 8268a4c..5105708 100644
42389--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
42390+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
42391@@ -965,7 +965,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
42392 struct bit_table {
42393 const char id;
42394 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
42395-};
42396+} __no_const;
42397
42398 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
42399
42400diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
42401index 7efbafa..19f8087 100644
42402--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
42403+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
42404@@ -97,7 +97,6 @@ struct nouveau_drm {
42405 struct drm_global_reference mem_global_ref;
42406 struct ttm_bo_global_ref bo_global_ref;
42407 struct ttm_bo_device bdev;
42408- atomic_t validate_sequence;
42409 int (*move)(struct nouveau_channel *,
42410 struct ttm_buffer_object *,
42411 struct ttm_mem_reg *, struct ttm_mem_reg *);
42412diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
42413index 462679a..88e32a7 100644
42414--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
42415+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
42416@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
42417 unsigned long arg)
42418 {
42419 unsigned int nr = DRM_IOCTL_NR(cmd);
42420- drm_ioctl_compat_t *fn = NULL;
42421+ drm_ioctl_compat_t fn = NULL;
42422 int ret;
42423
42424 if (nr < DRM_COMMAND_BASE)
42425diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
42426index ab0228f..20b756b 100644
42427--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
42428+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
42429@@ -130,11 +130,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
42430 }
42431
42432 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
42433- nouveau_vram_manager_init,
42434- nouveau_vram_manager_fini,
42435- nouveau_vram_manager_new,
42436- nouveau_vram_manager_del,
42437- nouveau_vram_manager_debug
42438+ .init = nouveau_vram_manager_init,
42439+ .takedown = nouveau_vram_manager_fini,
42440+ .get_node = nouveau_vram_manager_new,
42441+ .put_node = nouveau_vram_manager_del,
42442+ .debug = nouveau_vram_manager_debug
42443 };
42444
42445 static int
42446@@ -199,11 +199,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
42447 }
42448
42449 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
42450- nouveau_gart_manager_init,
42451- nouveau_gart_manager_fini,
42452- nouveau_gart_manager_new,
42453- nouveau_gart_manager_del,
42454- nouveau_gart_manager_debug
42455+ .init = nouveau_gart_manager_init,
42456+ .takedown = nouveau_gart_manager_fini,
42457+ .get_node = nouveau_gart_manager_new,
42458+ .put_node = nouveau_gart_manager_del,
42459+ .debug = nouveau_gart_manager_debug
42460 };
42461
42462 #include <core/subdev/vm/nv04.h>
42463@@ -271,11 +271,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
42464 }
42465
42466 const struct ttm_mem_type_manager_func nv04_gart_manager = {
42467- nv04_gart_manager_init,
42468- nv04_gart_manager_fini,
42469- nv04_gart_manager_new,
42470- nv04_gart_manager_del,
42471- nv04_gart_manager_debug
42472+ .init = nv04_gart_manager_init,
42473+ .takedown = nv04_gart_manager_fini,
42474+ .get_node = nv04_gart_manager_new,
42475+ .put_node = nv04_gart_manager_del,
42476+ .debug = nv04_gart_manager_debug
42477 };
42478
42479 int
42480diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
42481index 4f4c3fe..2cce716 100644
42482--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
42483+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
42484@@ -70,7 +70,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
42485 * locking inversion with the driver load path. And the access here is
42486 * completely racy anyway. So don't bother with locking for now.
42487 */
42488- return dev->open_count == 0;
42489+ return local_read(&dev->open_count) == 0;
42490 }
42491
42492 static const struct vga_switcheroo_client_ops
42493diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
42494index eb89653..613cf71 100644
42495--- a/drivers/gpu/drm/qxl/qxl_cmd.c
42496+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
42497@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
42498 int ret;
42499
42500 mutex_lock(&qdev->async_io_mutex);
42501- irq_num = atomic_read(&qdev->irq_received_io_cmd);
42502+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
42503 if (qdev->last_sent_io_cmd > irq_num) {
42504 if (intr)
42505 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
42506- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42507+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42508 else
42509 ret = wait_event_timeout(qdev->io_cmd_event,
42510- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42511+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42512 /* 0 is timeout, just bail the "hw" has gone away */
42513 if (ret <= 0)
42514 goto out;
42515- irq_num = atomic_read(&qdev->irq_received_io_cmd);
42516+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
42517 }
42518 outb(val, addr);
42519 qdev->last_sent_io_cmd = irq_num + 1;
42520 if (intr)
42521 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
42522- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42523+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42524 else
42525 ret = wait_event_timeout(qdev->io_cmd_event,
42526- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42527+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42528 out:
42529 if (ret > 0)
42530 ret = 0;
42531diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
42532index c3c2bbd..bc3c0fb 100644
42533--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
42534+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
42535@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
42536 struct drm_info_node *node = (struct drm_info_node *) m->private;
42537 struct qxl_device *qdev = node->minor->dev->dev_private;
42538
42539- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
42540- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
42541- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
42542- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
42543+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
42544+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
42545+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
42546+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
42547 seq_printf(m, "%d\n", qdev->irq_received_error);
42548 return 0;
42549 }
42550diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
42551index 36ed40b..0397633 100644
42552--- a/drivers/gpu/drm/qxl/qxl_drv.h
42553+++ b/drivers/gpu/drm/qxl/qxl_drv.h
42554@@ -290,10 +290,10 @@ struct qxl_device {
42555 unsigned int last_sent_io_cmd;
42556
42557 /* interrupt handling */
42558- atomic_t irq_received;
42559- atomic_t irq_received_display;
42560- atomic_t irq_received_cursor;
42561- atomic_t irq_received_io_cmd;
42562+ atomic_unchecked_t irq_received;
42563+ atomic_unchecked_t irq_received_display;
42564+ atomic_unchecked_t irq_received_cursor;
42565+ atomic_unchecked_t irq_received_io_cmd;
42566 unsigned irq_received_error;
42567 wait_queue_head_t display_event;
42568 wait_queue_head_t cursor_event;
42569diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
42570index b110883..dd06418 100644
42571--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
42572+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
42573@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
42574
42575 /* TODO copy slow path code from i915 */
42576 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
42577- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
42578+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
42579
42580 {
42581 struct qxl_drawable *draw = fb_cmd;
42582@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
42583 struct drm_qxl_reloc reloc;
42584
42585 if (copy_from_user(&reloc,
42586- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
42587+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
42588 sizeof(reloc))) {
42589 ret = -EFAULT;
42590 goto out_free_bos;
42591@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
42592
42593 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
42594
42595- struct drm_qxl_command *commands =
42596- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
42597+ struct drm_qxl_command __user *commands =
42598+ (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
42599
42600- if (copy_from_user(&user_cmd, &commands[cmd_num],
42601+ if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
42602 sizeof(user_cmd)))
42603 return -EFAULT;
42604
42605diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
42606index 0bf1e20..42a7310 100644
42607--- a/drivers/gpu/drm/qxl/qxl_irq.c
42608+++ b/drivers/gpu/drm/qxl/qxl_irq.c
42609@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
42610 if (!pending)
42611 return IRQ_NONE;
42612
42613- atomic_inc(&qdev->irq_received);
42614+ atomic_inc_unchecked(&qdev->irq_received);
42615
42616 if (pending & QXL_INTERRUPT_DISPLAY) {
42617- atomic_inc(&qdev->irq_received_display);
42618+ atomic_inc_unchecked(&qdev->irq_received_display);
42619 wake_up_all(&qdev->display_event);
42620 qxl_queue_garbage_collect(qdev, false);
42621 }
42622 if (pending & QXL_INTERRUPT_CURSOR) {
42623- atomic_inc(&qdev->irq_received_cursor);
42624+ atomic_inc_unchecked(&qdev->irq_received_cursor);
42625 wake_up_all(&qdev->cursor_event);
42626 }
42627 if (pending & QXL_INTERRUPT_IO_CMD) {
42628- atomic_inc(&qdev->irq_received_io_cmd);
42629+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
42630 wake_up_all(&qdev->io_cmd_event);
42631 }
42632 if (pending & QXL_INTERRUPT_ERROR) {
42633@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
42634 init_waitqueue_head(&qdev->io_cmd_event);
42635 INIT_WORK(&qdev->client_monitors_config_work,
42636 qxl_client_monitors_config_work_func);
42637- atomic_set(&qdev->irq_received, 0);
42638- atomic_set(&qdev->irq_received_display, 0);
42639- atomic_set(&qdev->irq_received_cursor, 0);
42640- atomic_set(&qdev->irq_received_io_cmd, 0);
42641+ atomic_set_unchecked(&qdev->irq_received, 0);
42642+ atomic_set_unchecked(&qdev->irq_received_display, 0);
42643+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
42644+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
42645 qdev->irq_received_error = 0;
42646 ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
42647 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
42648diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
42649index 71a1bae..cb1f103 100644
42650--- a/drivers/gpu/drm/qxl/qxl_ttm.c
42651+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
42652@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
42653 }
42654 }
42655
42656-static struct vm_operations_struct qxl_ttm_vm_ops;
42657+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
42658 static const struct vm_operations_struct *ttm_vm_ops;
42659
42660 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42661@@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
42662 return r;
42663 if (unlikely(ttm_vm_ops == NULL)) {
42664 ttm_vm_ops = vma->vm_ops;
42665+ pax_open_kernel();
42666 qxl_ttm_vm_ops = *ttm_vm_ops;
42667 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
42668+ pax_close_kernel();
42669 }
42670 vma->vm_ops = &qxl_ttm_vm_ops;
42671 return 0;
42672@@ -555,25 +557,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
42673 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
42674 {
42675 #if defined(CONFIG_DEBUG_FS)
42676- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
42677- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
42678- unsigned i;
42679+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
42680+ {
42681+ .name = "qxl_mem_mm",
42682+ .show = &qxl_mm_dump_table,
42683+ },
42684+ {
42685+ .name = "qxl_surf_mm",
42686+ .show = &qxl_mm_dump_table,
42687+ }
42688+ };
42689
42690- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
42691- if (i == 0)
42692- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
42693- else
42694- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
42695- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
42696- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
42697- qxl_mem_types_list[i].driver_features = 0;
42698- if (i == 0)
42699- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
42700- else
42701- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
42702+ pax_open_kernel();
42703+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
42704+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
42705+ pax_close_kernel();
42706
42707- }
42708- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
42709+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
42710 #else
42711 return 0;
42712 #endif
42713diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
42714index 59459fe..be26b31 100644
42715--- a/drivers/gpu/drm/r128/r128_cce.c
42716+++ b/drivers/gpu/drm/r128/r128_cce.c
42717@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
42718
42719 /* GH: Simple idle check.
42720 */
42721- atomic_set(&dev_priv->idle_count, 0);
42722+ atomic_set_unchecked(&dev_priv->idle_count, 0);
42723
42724 /* We don't support anything other than bus-mastering ring mode,
42725 * but the ring can be in either AGP or PCI space for the ring
42726diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
42727index 5bf3f5f..7000661 100644
42728--- a/drivers/gpu/drm/r128/r128_drv.h
42729+++ b/drivers/gpu/drm/r128/r128_drv.h
42730@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
42731 int is_pci;
42732 unsigned long cce_buffers_offset;
42733
42734- atomic_t idle_count;
42735+ atomic_unchecked_t idle_count;
42736
42737 int page_flipping;
42738 int current_page;
42739 u32 crtc_offset;
42740 u32 crtc_offset_cntl;
42741
42742- atomic_t vbl_received;
42743+ atomic_unchecked_t vbl_received;
42744
42745 u32 color_fmt;
42746 unsigned int front_offset;
42747diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
42748index 663f38c..c689495 100644
42749--- a/drivers/gpu/drm/r128/r128_ioc32.c
42750+++ b/drivers/gpu/drm/r128/r128_ioc32.c
42751@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
42752 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
42753 }
42754
42755-drm_ioctl_compat_t *r128_compat_ioctls[] = {
42756+drm_ioctl_compat_t r128_compat_ioctls[] = {
42757 [DRM_R128_INIT] = compat_r128_init,
42758 [DRM_R128_DEPTH] = compat_r128_depth,
42759 [DRM_R128_STIPPLE] = compat_r128_stipple,
42760@@ -197,18 +197,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
42761 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42762 {
42763 unsigned int nr = DRM_IOCTL_NR(cmd);
42764- drm_ioctl_compat_t *fn = NULL;
42765 int ret;
42766
42767 if (nr < DRM_COMMAND_BASE)
42768 return drm_compat_ioctl(filp, cmd, arg);
42769
42770- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
42771- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
42772-
42773- if (fn != NULL)
42774+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls)) {
42775+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
42776 ret = (*fn) (filp, cmd, arg);
42777- else
42778+ } else
42779 ret = drm_ioctl(filp, cmd, arg);
42780
42781 return ret;
42782diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
42783index c2ae496..30b5993 100644
42784--- a/drivers/gpu/drm/r128/r128_irq.c
42785+++ b/drivers/gpu/drm/r128/r128_irq.c
42786@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
42787 if (crtc != 0)
42788 return 0;
42789
42790- return atomic_read(&dev_priv->vbl_received);
42791+ return atomic_read_unchecked(&dev_priv->vbl_received);
42792 }
42793
42794 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
42795@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
42796 /* VBLANK interrupt */
42797 if (status & R128_CRTC_VBLANK_INT) {
42798 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
42799- atomic_inc(&dev_priv->vbl_received);
42800+ atomic_inc_unchecked(&dev_priv->vbl_received);
42801 drm_handle_vblank(dev, 0);
42802 return IRQ_HANDLED;
42803 }
42804diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
42805index 575e986..66e62ca 100644
42806--- a/drivers/gpu/drm/r128/r128_state.c
42807+++ b/drivers/gpu/drm/r128/r128_state.c
42808@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
42809
42810 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
42811 {
42812- if (atomic_read(&dev_priv->idle_count) == 0)
42813+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
42814 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
42815 else
42816- atomic_set(&dev_priv->idle_count, 0);
42817+ atomic_set_unchecked(&dev_priv->idle_count, 0);
42818 }
42819
42820 #endif
42821diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
42822index 4a85bb6..aaea819 100644
42823--- a/drivers/gpu/drm/radeon/mkregtable.c
42824+++ b/drivers/gpu/drm/radeon/mkregtable.c
42825@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
42826 regex_t mask_rex;
42827 regmatch_t match[4];
42828 char buf[1024];
42829- size_t end;
42830+ long end;
42831 int len;
42832 int done = 0;
42833 int r;
42834 unsigned o;
42835 struct offset *offset;
42836 char last_reg_s[10];
42837- int last_reg;
42838+ unsigned long last_reg;
42839
42840 if (regcomp
42841 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
42842diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
42843index 697add2..9860f5b 100644
42844--- a/drivers/gpu/drm/radeon/radeon_device.c
42845+++ b/drivers/gpu/drm/radeon/radeon_device.c
42846@@ -1169,7 +1169,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
42847 * locking inversion with the driver load path. And the access here is
42848 * completely racy anyway. So don't bother with locking for now.
42849 */
42850- return dev->open_count == 0;
42851+ return local_read(&dev->open_count) == 0;
42852 }
42853
42854 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
42855diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
42856index dafd812..1bf20c7 100644
42857--- a/drivers/gpu/drm/radeon/radeon_drv.h
42858+++ b/drivers/gpu/drm/radeon/radeon_drv.h
42859@@ -262,7 +262,7 @@ typedef struct drm_radeon_private {
42860
42861 /* SW interrupt */
42862 wait_queue_head_t swi_queue;
42863- atomic_t swi_emitted;
42864+ atomic_unchecked_t swi_emitted;
42865 int vblank_crtc;
42866 uint32_t irq_enable_reg;
42867 uint32_t r500_disp_irq_reg;
42868diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
42869index 0b98ea1..0881827 100644
42870--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
42871+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
42872@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
42873 request = compat_alloc_user_space(sizeof(*request));
42874 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
42875 || __put_user(req32.param, &request->param)
42876- || __put_user((void __user *)(unsigned long)req32.value,
42877+ || __put_user((unsigned long)req32.value,
42878 &request->value))
42879 return -EFAULT;
42880
42881@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
42882 #define compat_radeon_cp_setparam NULL
42883 #endif /* X86_64 || IA64 */
42884
42885-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
42886+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
42887 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
42888 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
42889 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
42890@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
42891 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42892 {
42893 unsigned int nr = DRM_IOCTL_NR(cmd);
42894- drm_ioctl_compat_t *fn = NULL;
42895 int ret;
42896
42897 if (nr < DRM_COMMAND_BASE)
42898 return drm_compat_ioctl(filp, cmd, arg);
42899
42900- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
42901- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
42902-
42903- if (fn != NULL)
42904+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls)) {
42905+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
42906 ret = (*fn) (filp, cmd, arg);
42907- else
42908+ } else
42909 ret = drm_ioctl(filp, cmd, arg);
42910
42911 return ret;
42912diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
42913index 244b19b..c19226d 100644
42914--- a/drivers/gpu/drm/radeon/radeon_irq.c
42915+++ b/drivers/gpu/drm/radeon/radeon_irq.c
42916@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
42917 unsigned int ret;
42918 RING_LOCALS;
42919
42920- atomic_inc(&dev_priv->swi_emitted);
42921- ret = atomic_read(&dev_priv->swi_emitted);
42922+ atomic_inc_unchecked(&dev_priv->swi_emitted);
42923+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
42924
42925 BEGIN_RING(4);
42926 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
42927@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
42928 drm_radeon_private_t *dev_priv =
42929 (drm_radeon_private_t *) dev->dev_private;
42930
42931- atomic_set(&dev_priv->swi_emitted, 0);
42932+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
42933 init_waitqueue_head(&dev_priv->swi_queue);
42934
42935 dev->max_vblank_count = 0x001fffff;
42936diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
42937index 23bb64f..69d7234 100644
42938--- a/drivers/gpu/drm/radeon/radeon_state.c
42939+++ b/drivers/gpu/drm/radeon/radeon_state.c
42940@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
42941 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
42942 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
42943
42944- if (copy_from_user(&depth_boxes, clear->depth_boxes,
42945+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
42946 sarea_priv->nbox * sizeof(depth_boxes[0])))
42947 return -EFAULT;
42948
42949@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
42950 {
42951 drm_radeon_private_t *dev_priv = dev->dev_private;
42952 drm_radeon_getparam_t *param = data;
42953- int value;
42954+ int value = 0;
42955
42956 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
42957
42958diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
42959index c8a8a51..219dacc 100644
42960--- a/drivers/gpu/drm/radeon/radeon_ttm.c
42961+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
42962@@ -797,7 +797,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
42963 man->size = size >> PAGE_SHIFT;
42964 }
42965
42966-static struct vm_operations_struct radeon_ttm_vm_ops;
42967+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
42968 static const struct vm_operations_struct *ttm_vm_ops = NULL;
42969
42970 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42971@@ -838,8 +838,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
42972 }
42973 if (unlikely(ttm_vm_ops == NULL)) {
42974 ttm_vm_ops = vma->vm_ops;
42975+ pax_open_kernel();
42976 radeon_ttm_vm_ops = *ttm_vm_ops;
42977 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
42978+ pax_close_kernel();
42979 }
42980 vma->vm_ops = &radeon_ttm_vm_ops;
42981 return 0;
42982diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
42983index ef40381..347463e 100644
42984--- a/drivers/gpu/drm/tegra/dc.c
42985+++ b/drivers/gpu/drm/tegra/dc.c
42986@@ -1173,7 +1173,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
42987 }
42988
42989 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
42990- dc->debugfs_files[i].data = dc;
42991+ *(void **)&dc->debugfs_files[i].data = dc;
42992
42993 err = drm_debugfs_create_files(dc->debugfs_files,
42994 ARRAY_SIZE(debugfs_files),
42995diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
42996index bd56f2a..255af4b 100644
42997--- a/drivers/gpu/drm/tegra/dsi.c
42998+++ b/drivers/gpu/drm/tegra/dsi.c
42999@@ -41,7 +41,7 @@ struct tegra_dsi {
43000 struct clk *clk_lp;
43001 struct clk *clk;
43002
43003- struct drm_info_list *debugfs_files;
43004+ drm_info_list_no_const *debugfs_files;
43005 struct drm_minor *minor;
43006 struct dentry *debugfs;
43007
43008diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
43009index ba067bb..23afbbd 100644
43010--- a/drivers/gpu/drm/tegra/hdmi.c
43011+++ b/drivers/gpu/drm/tegra/hdmi.c
43012@@ -60,7 +60,7 @@ struct tegra_hdmi {
43013 bool stereo;
43014 bool dvi;
43015
43016- struct drm_info_list *debugfs_files;
43017+ drm_info_list_no_const *debugfs_files;
43018 struct drm_minor *minor;
43019 struct dentry *debugfs;
43020 };
43021diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
43022index bd850c9..d9f3573 100644
43023--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
43024+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
43025@@ -146,10 +146,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
43026 }
43027
43028 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
43029- ttm_bo_man_init,
43030- ttm_bo_man_takedown,
43031- ttm_bo_man_get_node,
43032- ttm_bo_man_put_node,
43033- ttm_bo_man_debug
43034+ .init = ttm_bo_man_init,
43035+ .takedown = ttm_bo_man_takedown,
43036+ .get_node = ttm_bo_man_get_node,
43037+ .put_node = ttm_bo_man_put_node,
43038+ .debug = ttm_bo_man_debug
43039 };
43040 EXPORT_SYMBOL(ttm_bo_manager_func);
43041diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
43042index dbc2def..0a9f710 100644
43043--- a/drivers/gpu/drm/ttm/ttm_memory.c
43044+++ b/drivers/gpu/drm/ttm/ttm_memory.c
43045@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
43046 zone->glob = glob;
43047 glob->zone_kernel = zone;
43048 ret = kobject_init_and_add(
43049- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
43050+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
43051 if (unlikely(ret != 0)) {
43052 kobject_put(&zone->kobj);
43053 return ret;
43054@@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
43055 zone->glob = glob;
43056 glob->zone_dma32 = zone;
43057 ret = kobject_init_and_add(
43058- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
43059+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
43060 if (unlikely(ret != 0)) {
43061 kobject_put(&zone->kobj);
43062 return ret;
43063diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
43064index 863bef9..cba15cf 100644
43065--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
43066+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
43067@@ -391,9 +391,9 @@ out:
43068 static unsigned long
43069 ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
43070 {
43071- static atomic_t start_pool = ATOMIC_INIT(0);
43072+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
43073 unsigned i;
43074- unsigned pool_offset = atomic_add_return(1, &start_pool);
43075+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
43076 struct ttm_page_pool *pool;
43077 int shrink_pages = sc->nr_to_scan;
43078 unsigned long freed = 0;
43079diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
43080index 3771763..883f206 100644
43081--- a/drivers/gpu/drm/udl/udl_fb.c
43082+++ b/drivers/gpu/drm/udl/udl_fb.c
43083@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
43084 fb_deferred_io_cleanup(info);
43085 kfree(info->fbdefio);
43086 info->fbdefio = NULL;
43087- info->fbops->fb_mmap = udl_fb_mmap;
43088 }
43089
43090 pr_warn("released /dev/fb%d user=%d count=%d\n",
43091diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
43092index ad02732..144f5ed 100644
43093--- a/drivers/gpu/drm/via/via_drv.h
43094+++ b/drivers/gpu/drm/via/via_drv.h
43095@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
43096 typedef uint32_t maskarray_t[5];
43097
43098 typedef struct drm_via_irq {
43099- atomic_t irq_received;
43100+ atomic_unchecked_t irq_received;
43101 uint32_t pending_mask;
43102 uint32_t enable_mask;
43103 wait_queue_head_t irq_queue;
43104@@ -75,7 +75,7 @@ typedef struct drm_via_private {
43105 struct timeval last_vblank;
43106 int last_vblank_valid;
43107 unsigned usec_per_vblank;
43108- atomic_t vbl_received;
43109+ atomic_unchecked_t vbl_received;
43110 drm_via_state_t hc_state;
43111 char pci_buf[VIA_PCI_BUF_SIZE];
43112 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
43113diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
43114index 1319433..a993b0c 100644
43115--- a/drivers/gpu/drm/via/via_irq.c
43116+++ b/drivers/gpu/drm/via/via_irq.c
43117@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
43118 if (crtc != 0)
43119 return 0;
43120
43121- return atomic_read(&dev_priv->vbl_received);
43122+ return atomic_read_unchecked(&dev_priv->vbl_received);
43123 }
43124
43125 irqreturn_t via_driver_irq_handler(int irq, void *arg)
43126@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
43127
43128 status = VIA_READ(VIA_REG_INTERRUPT);
43129 if (status & VIA_IRQ_VBLANK_PENDING) {
43130- atomic_inc(&dev_priv->vbl_received);
43131- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
43132+ atomic_inc_unchecked(&dev_priv->vbl_received);
43133+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
43134 do_gettimeofday(&cur_vblank);
43135 if (dev_priv->last_vblank_valid) {
43136 dev_priv->usec_per_vblank =
43137@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
43138 dev_priv->last_vblank = cur_vblank;
43139 dev_priv->last_vblank_valid = 1;
43140 }
43141- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
43142+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
43143 DRM_DEBUG("US per vblank is: %u\n",
43144 dev_priv->usec_per_vblank);
43145 }
43146@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
43147
43148 for (i = 0; i < dev_priv->num_irqs; ++i) {
43149 if (status & cur_irq->pending_mask) {
43150- atomic_inc(&cur_irq->irq_received);
43151+ atomic_inc_unchecked(&cur_irq->irq_received);
43152 wake_up(&cur_irq->irq_queue);
43153 handled = 1;
43154 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
43155@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
43156 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
43157 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
43158 masks[irq][4]));
43159- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
43160+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
43161 } else {
43162 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
43163 (((cur_irq_sequence =
43164- atomic_read(&cur_irq->irq_received)) -
43165+ atomic_read_unchecked(&cur_irq->irq_received)) -
43166 *sequence) <= (1 << 23)));
43167 }
43168 *sequence = cur_irq_sequence;
43169@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
43170 }
43171
43172 for (i = 0; i < dev_priv->num_irqs; ++i) {
43173- atomic_set(&cur_irq->irq_received, 0);
43174+ atomic_set_unchecked(&cur_irq->irq_received, 0);
43175 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
43176 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
43177 init_waitqueue_head(&cur_irq->irq_queue);
43178@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
43179 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
43180 case VIA_IRQ_RELATIVE:
43181 irqwait->request.sequence +=
43182- atomic_read(&cur_irq->irq_received);
43183+ atomic_read_unchecked(&cur_irq->irq_received);
43184 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
43185 case VIA_IRQ_ABSOLUTE:
43186 break;
43187diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
43188index 6b252a8..5975dfe 100644
43189--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
43190+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
43191@@ -437,7 +437,7 @@ struct vmw_private {
43192 * Fencing and IRQs.
43193 */
43194
43195- atomic_t marker_seq;
43196+ atomic_unchecked_t marker_seq;
43197 wait_queue_head_t fence_queue;
43198 wait_queue_head_t fifo_queue;
43199 int fence_queue_waiters; /* Protected by hw_mutex */
43200diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
43201index 6ccd993..618d592 100644
43202--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
43203+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
43204@@ -154,7 +154,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
43205 (unsigned int) min,
43206 (unsigned int) fifo->capabilities);
43207
43208- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
43209+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
43210 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
43211 vmw_marker_queue_init(&fifo->marker_queue);
43212 return vmw_fifo_send_fence(dev_priv, &dummy);
43213@@ -372,7 +372,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
43214 if (reserveable)
43215 iowrite32(bytes, fifo_mem +
43216 SVGA_FIFO_RESERVED);
43217- return fifo_mem + (next_cmd >> 2);
43218+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
43219 } else {
43220 need_bounce = true;
43221 }
43222@@ -492,7 +492,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
43223
43224 fm = vmw_fifo_reserve(dev_priv, bytes);
43225 if (unlikely(fm == NULL)) {
43226- *seqno = atomic_read(&dev_priv->marker_seq);
43227+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
43228 ret = -ENOMEM;
43229 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
43230 false, 3*HZ);
43231@@ -500,7 +500,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
43232 }
43233
43234 do {
43235- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
43236+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
43237 } while (*seqno == 0);
43238
43239 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
43240diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
43241index b1273e8..9c274fd 100644
43242--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
43243+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
43244@@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
43245 }
43246
43247 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
43248- vmw_gmrid_man_init,
43249- vmw_gmrid_man_takedown,
43250- vmw_gmrid_man_get_node,
43251- vmw_gmrid_man_put_node,
43252- vmw_gmrid_man_debug
43253+ .init = vmw_gmrid_man_init,
43254+ .takedown = vmw_gmrid_man_takedown,
43255+ .get_node = vmw_gmrid_man_get_node,
43256+ .put_node = vmw_gmrid_man_put_node,
43257+ .debug = vmw_gmrid_man_debug
43258 };
43259diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
43260index 37881ec..319065d 100644
43261--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
43262+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
43263@@ -235,7 +235,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
43264 int ret;
43265
43266 num_clips = arg->num_clips;
43267- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
43268+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
43269
43270 if (unlikely(num_clips == 0))
43271 return 0;
43272@@ -318,7 +318,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
43273 int ret;
43274
43275 num_clips = arg->num_clips;
43276- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
43277+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
43278
43279 if (unlikely(num_clips == 0))
43280 return 0;
43281diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
43282index 0c42376..6febe77 100644
43283--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
43284+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
43285@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
43286 * emitted. Then the fence is stale and signaled.
43287 */
43288
43289- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
43290+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
43291 > VMW_FENCE_WRAP);
43292
43293 return ret;
43294@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
43295
43296 if (fifo_idle)
43297 down_read(&fifo_state->rwsem);
43298- signal_seq = atomic_read(&dev_priv->marker_seq);
43299+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
43300 ret = 0;
43301
43302 for (;;) {
43303diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
43304index 8a8725c2..afed796 100644
43305--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
43306+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
43307@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
43308 while (!vmw_lag_lt(queue, us)) {
43309 spin_lock(&queue->lock);
43310 if (list_empty(&queue->head))
43311- seqno = atomic_read(&dev_priv->marker_seq);
43312+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
43313 else {
43314 marker = list_first_entry(&queue->head,
43315 struct vmw_marker, head);
43316diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
43317index 6866448..2ad2b34 100644
43318--- a/drivers/gpu/vga/vga_switcheroo.c
43319+++ b/drivers/gpu/vga/vga_switcheroo.c
43320@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
43321
43322 /* this version is for the case where the power switch is separate
43323 to the device being powered down. */
43324-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
43325+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
43326 {
43327 /* copy over all the bus versions */
43328 if (dev->bus && dev->bus->pm) {
43329@@ -689,7 +689,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
43330 return ret;
43331 }
43332
43333-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
43334+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
43335 {
43336 /* copy over all the bus versions */
43337 if (dev->bus && dev->bus->pm) {
43338diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
43339index 8ed66fd..38ff772 100644
43340--- a/drivers/hid/hid-core.c
43341+++ b/drivers/hid/hid-core.c
43342@@ -2488,7 +2488,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
43343
43344 int hid_add_device(struct hid_device *hdev)
43345 {
43346- static atomic_t id = ATOMIC_INIT(0);
43347+ static atomic_unchecked_t id = ATOMIC_INIT(0);
43348 int ret;
43349
43350 if (WARN_ON(hdev->status & HID_STAT_ADDED))
43351@@ -2530,7 +2530,7 @@ int hid_add_device(struct hid_device *hdev)
43352 /* XXX hack, any other cleaner solution after the driver core
43353 * is converted to allow more than 20 bytes as the device name? */
43354 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
43355- hdev->vendor, hdev->product, atomic_inc_return(&id));
43356+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
43357
43358 hid_debug_register(hdev, dev_name(&hdev->dev));
43359 ret = device_add(&hdev->dev);
43360diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
43361index ecc2cbf..29a74c1 100644
43362--- a/drivers/hid/hid-magicmouse.c
43363+++ b/drivers/hid/hid-magicmouse.c
43364@@ -290,6 +290,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
43365 if (size < 4 || ((size - 4) % 9) != 0)
43366 return 0;
43367 npoints = (size - 4) / 9;
43368+ if (npoints > 15) {
43369+ hid_warn(hdev, "invalid size value (%d) for TRACKPAD_REPORT_ID\n",
43370+ size);
43371+ return 0;
43372+ }
43373 msc->ntouches = 0;
43374 for (ii = 0; ii < npoints; ii++)
43375 magicmouse_emit_touch(msc, ii, data + ii * 9 + 4);
43376@@ -307,6 +312,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
43377 if (size < 6 || ((size - 6) % 8) != 0)
43378 return 0;
43379 npoints = (size - 6) / 8;
43380+ if (npoints > 15) {
43381+ hid_warn(hdev, "invalid size value (%d) for MOUSE_REPORT_ID\n",
43382+ size);
43383+ return 0;
43384+ }
43385 msc->ntouches = 0;
43386 for (ii = 0; ii < npoints; ii++)
43387 magicmouse_emit_touch(msc, ii, data + ii * 8 + 6);
43388diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
43389index acbb0210..020df3c 100644
43390--- a/drivers/hid/hid-picolcd_core.c
43391+++ b/drivers/hid/hid-picolcd_core.c
43392@@ -350,6 +350,12 @@ static int picolcd_raw_event(struct hid_device *hdev,
43393 if (!data)
43394 return 1;
43395
43396+ if (size > 64) {
43397+ hid_warn(hdev, "invalid size value (%d) for picolcd raw event\n",
43398+ size);
43399+ return 0;
43400+ }
43401+
43402 if (report->id == REPORT_KEY_STATE) {
43403 if (data->input_keys)
43404 ret = picolcd_raw_keypad(data, report, raw_data+1, size-1);
43405diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
43406index c13fb5b..55a3802 100644
43407--- a/drivers/hid/hid-wiimote-debug.c
43408+++ b/drivers/hid/hid-wiimote-debug.c
43409@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
43410 else if (size == 0)
43411 return -EIO;
43412
43413- if (copy_to_user(u, buf, size))
43414+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
43415 return -EFAULT;
43416
43417 *off += size;
43418diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
43419index 0cb92e3..c7d453d 100644
43420--- a/drivers/hid/uhid.c
43421+++ b/drivers/hid/uhid.c
43422@@ -47,7 +47,7 @@ struct uhid_device {
43423 struct mutex report_lock;
43424 wait_queue_head_t report_wait;
43425 atomic_t report_done;
43426- atomic_t report_id;
43427+ atomic_unchecked_t report_id;
43428 struct uhid_event report_buf;
43429 };
43430
43431@@ -163,7 +163,7 @@ static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum,
43432
43433 spin_lock_irqsave(&uhid->qlock, flags);
43434 ev->type = UHID_FEATURE;
43435- ev->u.feature.id = atomic_inc_return(&uhid->report_id);
43436+ ev->u.feature.id = atomic_inc_return_unchecked(&uhid->report_id);
43437 ev->u.feature.rnum = rnum;
43438 ev->u.feature.rtype = report_type;
43439
43440@@ -538,7 +538,7 @@ static int uhid_dev_feature_answer(struct uhid_device *uhid,
43441 spin_lock_irqsave(&uhid->qlock, flags);
43442
43443 /* id for old report; drop it silently */
43444- if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id)
43445+ if (atomic_read_unchecked(&uhid->report_id) != ev->u.feature_answer.id)
43446 goto unlock;
43447 if (atomic_read(&uhid->report_done))
43448 goto unlock;
43449diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
43450index 284cf66..084c627 100644
43451--- a/drivers/hv/channel.c
43452+++ b/drivers/hv/channel.c
43453@@ -365,8 +365,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
43454 int ret = 0;
43455 int t;
43456
43457- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
43458- atomic_inc(&vmbus_connection.next_gpadl_handle);
43459+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
43460+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
43461
43462 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
43463 if (ret)
43464diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
43465index edfc848..d83e195 100644
43466--- a/drivers/hv/hv.c
43467+++ b/drivers/hv/hv.c
43468@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
43469 u64 output_address = (output) ? virt_to_phys(output) : 0;
43470 u32 output_address_hi = output_address >> 32;
43471 u32 output_address_lo = output_address & 0xFFFFFFFF;
43472- void *hypercall_page = hv_context.hypercall_page;
43473+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
43474
43475 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
43476 "=a"(hv_status_lo) : "d" (control_hi),
43477@@ -154,7 +154,7 @@ int hv_init(void)
43478 /* See if the hypercall page is already set */
43479 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
43480
43481- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
43482+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
43483
43484 if (!virtaddr)
43485 goto cleanup;
43486diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
43487index 5e90c5d..d8fcefb 100644
43488--- a/drivers/hv/hv_balloon.c
43489+++ b/drivers/hv/hv_balloon.c
43490@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
43491
43492 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
43493 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
43494-static atomic_t trans_id = ATOMIC_INIT(0);
43495+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
43496
43497 static int dm_ring_size = (5 * PAGE_SIZE);
43498
43499@@ -893,7 +893,7 @@ static void hot_add_req(struct work_struct *dummy)
43500 pr_info("Memory hot add failed\n");
43501
43502 dm->state = DM_INITIALIZED;
43503- resp.hdr.trans_id = atomic_inc_return(&trans_id);
43504+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43505 vmbus_sendpacket(dm->dev->channel, &resp,
43506 sizeof(struct dm_hot_add_response),
43507 (unsigned long)NULL,
43508@@ -973,7 +973,7 @@ static void post_status(struct hv_dynmem_device *dm)
43509 memset(&status, 0, sizeof(struct dm_status));
43510 status.hdr.type = DM_STATUS_REPORT;
43511 status.hdr.size = sizeof(struct dm_status);
43512- status.hdr.trans_id = atomic_inc_return(&trans_id);
43513+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43514
43515 /*
43516 * The host expects the guest to report free memory.
43517@@ -993,7 +993,7 @@ static void post_status(struct hv_dynmem_device *dm)
43518 * send the status. This can happen if we were interrupted
43519 * after we picked our transaction ID.
43520 */
43521- if (status.hdr.trans_id != atomic_read(&trans_id))
43522+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
43523 return;
43524
43525 /*
43526@@ -1129,7 +1129,7 @@ static void balloon_up(struct work_struct *dummy)
43527 */
43528
43529 do {
43530- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
43531+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43532 ret = vmbus_sendpacket(dm_device.dev->channel,
43533 bl_resp,
43534 bl_resp->hdr.size,
43535@@ -1175,7 +1175,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
43536
43537 memset(&resp, 0, sizeof(struct dm_unballoon_response));
43538 resp.hdr.type = DM_UNBALLOON_RESPONSE;
43539- resp.hdr.trans_id = atomic_inc_return(&trans_id);
43540+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43541 resp.hdr.size = sizeof(struct dm_unballoon_response);
43542
43543 vmbus_sendpacket(dm_device.dev->channel, &resp,
43544@@ -1239,7 +1239,7 @@ static void version_resp(struct hv_dynmem_device *dm,
43545 memset(&version_req, 0, sizeof(struct dm_version_request));
43546 version_req.hdr.type = DM_VERSION_REQUEST;
43547 version_req.hdr.size = sizeof(struct dm_version_request);
43548- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
43549+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43550 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
43551 version_req.is_last_attempt = 1;
43552
43553@@ -1409,7 +1409,7 @@ static int balloon_probe(struct hv_device *dev,
43554 memset(&version_req, 0, sizeof(struct dm_version_request));
43555 version_req.hdr.type = DM_VERSION_REQUEST;
43556 version_req.hdr.size = sizeof(struct dm_version_request);
43557- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
43558+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43559 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
43560 version_req.is_last_attempt = 0;
43561
43562@@ -1440,7 +1440,7 @@ static int balloon_probe(struct hv_device *dev,
43563 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
43564 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
43565 cap_msg.hdr.size = sizeof(struct dm_capabilities);
43566- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
43567+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43568
43569 cap_msg.caps.cap_bits.balloon = 1;
43570 cap_msg.caps.cap_bits.hot_add = 1;
43571diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
43572index 22b7507..fc2fc47 100644
43573--- a/drivers/hv/hyperv_vmbus.h
43574+++ b/drivers/hv/hyperv_vmbus.h
43575@@ -607,7 +607,7 @@ enum vmbus_connect_state {
43576 struct vmbus_connection {
43577 enum vmbus_connect_state conn_state;
43578
43579- atomic_t next_gpadl_handle;
43580+ atomic_unchecked_t next_gpadl_handle;
43581
43582 /*
43583 * Represents channel interrupts. Each bit position represents a
43584diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
43585index 4d6b269..2e23b86 100644
43586--- a/drivers/hv/vmbus_drv.c
43587+++ b/drivers/hv/vmbus_drv.c
43588@@ -807,10 +807,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
43589 {
43590 int ret = 0;
43591
43592- static atomic_t device_num = ATOMIC_INIT(0);
43593+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
43594
43595 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
43596- atomic_inc_return(&device_num));
43597+ atomic_inc_return_unchecked(&device_num));
43598
43599 child_device_obj->device.bus = &hv_bus;
43600 child_device_obj->device.parent = &hv_acpi_dev->dev;
43601diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
43602index 579bdf9..75118b5 100644
43603--- a/drivers/hwmon/acpi_power_meter.c
43604+++ b/drivers/hwmon/acpi_power_meter.c
43605@@ -116,7 +116,7 @@ struct sensor_template {
43606 struct device_attribute *devattr,
43607 const char *buf, size_t count);
43608 int index;
43609-};
43610+} __do_const;
43611
43612 /* Averaging interval */
43613 static int update_avg_interval(struct acpi_power_meter_resource *resource)
43614@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
43615 struct sensor_template *attrs)
43616 {
43617 struct device *dev = &resource->acpi_dev->dev;
43618- struct sensor_device_attribute *sensors =
43619+ sensor_device_attribute_no_const *sensors =
43620 &resource->sensors[resource->num_sensors];
43621 int res = 0;
43622
43623diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
43624index 3288f13..71cfb4e 100644
43625--- a/drivers/hwmon/applesmc.c
43626+++ b/drivers/hwmon/applesmc.c
43627@@ -1106,7 +1106,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
43628 {
43629 struct applesmc_node_group *grp;
43630 struct applesmc_dev_attr *node;
43631- struct attribute *attr;
43632+ attribute_no_const *attr;
43633 int ret, i;
43634
43635 for (grp = groups; grp->format; grp++) {
43636diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
43637index ae208f6..48b6c5b 100644
43638--- a/drivers/hwmon/asus_atk0110.c
43639+++ b/drivers/hwmon/asus_atk0110.c
43640@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
43641 struct atk_sensor_data {
43642 struct list_head list;
43643 struct atk_data *data;
43644- struct device_attribute label_attr;
43645- struct device_attribute input_attr;
43646- struct device_attribute limit1_attr;
43647- struct device_attribute limit2_attr;
43648+ device_attribute_no_const label_attr;
43649+ device_attribute_no_const input_attr;
43650+ device_attribute_no_const limit1_attr;
43651+ device_attribute_no_const limit2_attr;
43652 char label_attr_name[ATTR_NAME_SIZE];
43653 char input_attr_name[ATTR_NAME_SIZE];
43654 char limit1_attr_name[ATTR_NAME_SIZE];
43655@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
43656 static struct device_attribute atk_name_attr =
43657 __ATTR(name, 0444, atk_name_show, NULL);
43658
43659-static void atk_init_attribute(struct device_attribute *attr, char *name,
43660+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
43661 sysfs_show_func show)
43662 {
43663 sysfs_attr_init(&attr->attr);
43664diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
43665index d76f0b7..55ae976 100644
43666--- a/drivers/hwmon/coretemp.c
43667+++ b/drivers/hwmon/coretemp.c
43668@@ -784,7 +784,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
43669 return NOTIFY_OK;
43670 }
43671
43672-static struct notifier_block coretemp_cpu_notifier __refdata = {
43673+static struct notifier_block coretemp_cpu_notifier = {
43674 .notifier_call = coretemp_cpu_callback,
43675 };
43676
43677diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
43678index 632f1dc..57e6a58 100644
43679--- a/drivers/hwmon/ibmaem.c
43680+++ b/drivers/hwmon/ibmaem.c
43681@@ -926,7 +926,7 @@ static int aem_register_sensors(struct aem_data *data,
43682 struct aem_rw_sensor_template *rw)
43683 {
43684 struct device *dev = &data->pdev->dev;
43685- struct sensor_device_attribute *sensors = data->sensors;
43686+ sensor_device_attribute_no_const *sensors = data->sensors;
43687 int err;
43688
43689 /* Set up read-only sensors */
43690diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
43691index 14c82da..09b25d7 100644
43692--- a/drivers/hwmon/iio_hwmon.c
43693+++ b/drivers/hwmon/iio_hwmon.c
43694@@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
43695 {
43696 struct device *dev = &pdev->dev;
43697 struct iio_hwmon_state *st;
43698- struct sensor_device_attribute *a;
43699+ sensor_device_attribute_no_const *a;
43700 int ret, i;
43701 int in_i = 1, temp_i = 1, curr_i = 1;
43702 enum iio_chan_type type;
43703diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
43704index 7710f46..427a28d 100644
43705--- a/drivers/hwmon/nct6683.c
43706+++ b/drivers/hwmon/nct6683.c
43707@@ -397,11 +397,11 @@ static struct attribute_group *
43708 nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
43709 int repeat)
43710 {
43711- struct sensor_device_attribute_2 *a2;
43712- struct sensor_device_attribute *a;
43713+ sensor_device_attribute_2_no_const *a2;
43714+ sensor_device_attribute_no_const *a;
43715 struct sensor_device_template **t;
43716 struct sensor_device_attr_u *su;
43717- struct attribute_group *group;
43718+ attribute_group_no_const *group;
43719 struct attribute **attrs;
43720 int i, j, count;
43721
43722diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
43723index 59d9a3f..2298fa4 100644
43724--- a/drivers/hwmon/nct6775.c
43725+++ b/drivers/hwmon/nct6775.c
43726@@ -944,10 +944,10 @@ static struct attribute_group *
43727 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
43728 int repeat)
43729 {
43730- struct attribute_group *group;
43731+ attribute_group_no_const *group;
43732 struct sensor_device_attr_u *su;
43733- struct sensor_device_attribute *a;
43734- struct sensor_device_attribute_2 *a2;
43735+ sensor_device_attribute_no_const *a;
43736+ sensor_device_attribute_2_no_const *a2;
43737 struct attribute **attrs;
43738 struct sensor_device_template **t;
43739 int i, count;
43740diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
43741index 291d11f..3f0dbbd 100644
43742--- a/drivers/hwmon/pmbus/pmbus_core.c
43743+++ b/drivers/hwmon/pmbus/pmbus_core.c
43744@@ -783,7 +783,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
43745 return 0;
43746 }
43747
43748-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
43749+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
43750 const char *name,
43751 umode_t mode,
43752 ssize_t (*show)(struct device *dev,
43753@@ -800,7 +800,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
43754 dev_attr->store = store;
43755 }
43756
43757-static void pmbus_attr_init(struct sensor_device_attribute *a,
43758+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
43759 const char *name,
43760 umode_t mode,
43761 ssize_t (*show)(struct device *dev,
43762@@ -822,7 +822,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
43763 u16 reg, u8 mask)
43764 {
43765 struct pmbus_boolean *boolean;
43766- struct sensor_device_attribute *a;
43767+ sensor_device_attribute_no_const *a;
43768
43769 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
43770 if (!boolean)
43771@@ -847,7 +847,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
43772 bool update, bool readonly)
43773 {
43774 struct pmbus_sensor *sensor;
43775- struct device_attribute *a;
43776+ device_attribute_no_const *a;
43777
43778 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
43779 if (!sensor)
43780@@ -878,7 +878,7 @@ static int pmbus_add_label(struct pmbus_data *data,
43781 const char *lstring, int index)
43782 {
43783 struct pmbus_label *label;
43784- struct device_attribute *a;
43785+ device_attribute_no_const *a;
43786
43787 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
43788 if (!label)
43789diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
43790index 97cd45a..ac54d8b 100644
43791--- a/drivers/hwmon/sht15.c
43792+++ b/drivers/hwmon/sht15.c
43793@@ -169,7 +169,7 @@ struct sht15_data {
43794 int supply_uv;
43795 bool supply_uv_valid;
43796 struct work_struct update_supply_work;
43797- atomic_t interrupt_handled;
43798+ atomic_unchecked_t interrupt_handled;
43799 };
43800
43801 /**
43802@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
43803 ret = gpio_direction_input(data->pdata->gpio_data);
43804 if (ret)
43805 return ret;
43806- atomic_set(&data->interrupt_handled, 0);
43807+ atomic_set_unchecked(&data->interrupt_handled, 0);
43808
43809 enable_irq(gpio_to_irq(data->pdata->gpio_data));
43810 if (gpio_get_value(data->pdata->gpio_data) == 0) {
43811 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
43812 /* Only relevant if the interrupt hasn't occurred. */
43813- if (!atomic_read(&data->interrupt_handled))
43814+ if (!atomic_read_unchecked(&data->interrupt_handled))
43815 schedule_work(&data->read_work);
43816 }
43817 ret = wait_event_timeout(data->wait_queue,
43818@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
43819
43820 /* First disable the interrupt */
43821 disable_irq_nosync(irq);
43822- atomic_inc(&data->interrupt_handled);
43823+ atomic_inc_unchecked(&data->interrupt_handled);
43824 /* Then schedule a reading work struct */
43825 if (data->state != SHT15_READING_NOTHING)
43826 schedule_work(&data->read_work);
43827@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
43828 * If not, then start the interrupt again - care here as could
43829 * have gone low in meantime so verify it hasn't!
43830 */
43831- atomic_set(&data->interrupt_handled, 0);
43832+ atomic_set_unchecked(&data->interrupt_handled, 0);
43833 enable_irq(gpio_to_irq(data->pdata->gpio_data));
43834 /* If still not occurred or another handler was scheduled */
43835 if (gpio_get_value(data->pdata->gpio_data)
43836- || atomic_read(&data->interrupt_handled))
43837+ || atomic_read_unchecked(&data->interrupt_handled))
43838 return;
43839 }
43840
43841diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
43842index 8df43c5..b07b91d 100644
43843--- a/drivers/hwmon/via-cputemp.c
43844+++ b/drivers/hwmon/via-cputemp.c
43845@@ -296,7 +296,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
43846 return NOTIFY_OK;
43847 }
43848
43849-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
43850+static struct notifier_block via_cputemp_cpu_notifier = {
43851 .notifier_call = via_cputemp_cpu_callback,
43852 };
43853
43854diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
43855index 41fc683..a39cfea 100644
43856--- a/drivers/i2c/busses/i2c-amd756-s4882.c
43857+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
43858@@ -43,7 +43,7 @@
43859 extern struct i2c_adapter amd756_smbus;
43860
43861 static struct i2c_adapter *s4882_adapter;
43862-static struct i2c_algorithm *s4882_algo;
43863+static i2c_algorithm_no_const *s4882_algo;
43864
43865 /* Wrapper access functions for multiplexed SMBus */
43866 static DEFINE_MUTEX(amd756_lock);
43867diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
43868index b19a310..d6eece0 100644
43869--- a/drivers/i2c/busses/i2c-diolan-u2c.c
43870+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
43871@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
43872 /* usb layer */
43873
43874 /* Send command to device, and get response. */
43875-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
43876+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
43877 {
43878 int ret = 0;
43879 int actual;
43880diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
43881index b170bdf..3c76427 100644
43882--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
43883+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
43884@@ -41,7 +41,7 @@
43885 extern struct i2c_adapter *nforce2_smbus;
43886
43887 static struct i2c_adapter *s4985_adapter;
43888-static struct i2c_algorithm *s4985_algo;
43889+static i2c_algorithm_no_const *s4985_algo;
43890
43891 /* Wrapper access functions for multiplexed SMBus */
43892 static DEFINE_MUTEX(nforce2_lock);
43893diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
43894index 80b47e8..1a6040d9 100644
43895--- a/drivers/i2c/i2c-dev.c
43896+++ b/drivers/i2c/i2c-dev.c
43897@@ -277,7 +277,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
43898 break;
43899 }
43900
43901- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
43902+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
43903 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
43904 if (IS_ERR(rdwr_pa[i].buf)) {
43905 res = PTR_ERR(rdwr_pa[i].buf);
43906diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
43907index 0b510ba..4fbb5085 100644
43908--- a/drivers/ide/ide-cd.c
43909+++ b/drivers/ide/ide-cd.c
43910@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
43911 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
43912 if ((unsigned long)buf & alignment
43913 || blk_rq_bytes(rq) & q->dma_pad_mask
43914- || object_is_on_stack(buf))
43915+ || object_starts_on_stack(buf))
43916 drive->dma = 0;
43917 }
43918 }
43919diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
43920index 4b1f375..770b95f 100644
43921--- a/drivers/iio/industrialio-core.c
43922+++ b/drivers/iio/industrialio-core.c
43923@@ -551,7 +551,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
43924 }
43925
43926 static
43927-int __iio_device_attr_init(struct device_attribute *dev_attr,
43928+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
43929 const char *postfix,
43930 struct iio_chan_spec const *chan,
43931 ssize_t (*readfunc)(struct device *dev,
43932diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
43933index c323917..6ddea8b 100644
43934--- a/drivers/infiniband/core/cm.c
43935+++ b/drivers/infiniband/core/cm.c
43936@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
43937
43938 struct cm_counter_group {
43939 struct kobject obj;
43940- atomic_long_t counter[CM_ATTR_COUNT];
43941+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
43942 };
43943
43944 struct cm_counter_attribute {
43945@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work,
43946 struct ib_mad_send_buf *msg = NULL;
43947 int ret;
43948
43949- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43950+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43951 counter[CM_REQ_COUNTER]);
43952
43953 /* Quick state check to discard duplicate REQs. */
43954@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
43955 if (!cm_id_priv)
43956 return;
43957
43958- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43959+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43960 counter[CM_REP_COUNTER]);
43961 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
43962 if (ret)
43963@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work)
43964 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
43965 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
43966 spin_unlock_irq(&cm_id_priv->lock);
43967- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43968+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43969 counter[CM_RTU_COUNTER]);
43970 goto out;
43971 }
43972@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work)
43973 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
43974 dreq_msg->local_comm_id);
43975 if (!cm_id_priv) {
43976- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43977+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43978 counter[CM_DREQ_COUNTER]);
43979 cm_issue_drep(work->port, work->mad_recv_wc);
43980 return -EINVAL;
43981@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work)
43982 case IB_CM_MRA_REP_RCVD:
43983 break;
43984 case IB_CM_TIMEWAIT:
43985- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43986+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43987 counter[CM_DREQ_COUNTER]);
43988 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
43989 goto unlock;
43990@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work)
43991 cm_free_msg(msg);
43992 goto deref;
43993 case IB_CM_DREQ_RCVD:
43994- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43995+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43996 counter[CM_DREQ_COUNTER]);
43997 goto unlock;
43998 default:
43999@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
44000 ib_modify_mad(cm_id_priv->av.port->mad_agent,
44001 cm_id_priv->msg, timeout)) {
44002 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
44003- atomic_long_inc(&work->port->
44004+ atomic_long_inc_unchecked(&work->port->
44005 counter_group[CM_RECV_DUPLICATES].
44006 counter[CM_MRA_COUNTER]);
44007 goto out;
44008@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work)
44009 break;
44010 case IB_CM_MRA_REQ_RCVD:
44011 case IB_CM_MRA_REP_RCVD:
44012- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44013+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44014 counter[CM_MRA_COUNTER]);
44015 /* fall through */
44016 default:
44017@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work)
44018 case IB_CM_LAP_IDLE:
44019 break;
44020 case IB_CM_MRA_LAP_SENT:
44021- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44022+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44023 counter[CM_LAP_COUNTER]);
44024 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
44025 goto unlock;
44026@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work)
44027 cm_free_msg(msg);
44028 goto deref;
44029 case IB_CM_LAP_RCVD:
44030- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44031+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44032 counter[CM_LAP_COUNTER]);
44033 goto unlock;
44034 default:
44035@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
44036 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
44037 if (cur_cm_id_priv) {
44038 spin_unlock_irq(&cm.lock);
44039- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
44040+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
44041 counter[CM_SIDR_REQ_COUNTER]);
44042 goto out; /* Duplicate message. */
44043 }
44044@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
44045 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
44046 msg->retries = 1;
44047
44048- atomic_long_add(1 + msg->retries,
44049+ atomic_long_add_unchecked(1 + msg->retries,
44050 &port->counter_group[CM_XMIT].counter[attr_index]);
44051 if (msg->retries)
44052- atomic_long_add(msg->retries,
44053+ atomic_long_add_unchecked(msg->retries,
44054 &port->counter_group[CM_XMIT_RETRIES].
44055 counter[attr_index]);
44056
44057@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
44058 }
44059
44060 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
44061- atomic_long_inc(&port->counter_group[CM_RECV].
44062+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
44063 counter[attr_id - CM_ATTR_ID_OFFSET]);
44064
44065 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
44066@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
44067 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
44068
44069 return sprintf(buf, "%ld\n",
44070- atomic_long_read(&group->counter[cm_attr->index]));
44071+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
44072 }
44073
44074 static const struct sysfs_ops cm_counter_ops = {
44075diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
44076index 9f5ad7c..588cd84 100644
44077--- a/drivers/infiniband/core/fmr_pool.c
44078+++ b/drivers/infiniband/core/fmr_pool.c
44079@@ -98,8 +98,8 @@ struct ib_fmr_pool {
44080
44081 struct task_struct *thread;
44082
44083- atomic_t req_ser;
44084- atomic_t flush_ser;
44085+ atomic_unchecked_t req_ser;
44086+ atomic_unchecked_t flush_ser;
44087
44088 wait_queue_head_t force_wait;
44089 };
44090@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
44091 struct ib_fmr_pool *pool = pool_ptr;
44092
44093 do {
44094- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
44095+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
44096 ib_fmr_batch_release(pool);
44097
44098- atomic_inc(&pool->flush_ser);
44099+ atomic_inc_unchecked(&pool->flush_ser);
44100 wake_up_interruptible(&pool->force_wait);
44101
44102 if (pool->flush_function)
44103@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
44104 }
44105
44106 set_current_state(TASK_INTERRUPTIBLE);
44107- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
44108+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
44109 !kthread_should_stop())
44110 schedule();
44111 __set_current_state(TASK_RUNNING);
44112@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
44113 pool->dirty_watermark = params->dirty_watermark;
44114 pool->dirty_len = 0;
44115 spin_lock_init(&pool->pool_lock);
44116- atomic_set(&pool->req_ser, 0);
44117- atomic_set(&pool->flush_ser, 0);
44118+ atomic_set_unchecked(&pool->req_ser, 0);
44119+ atomic_set_unchecked(&pool->flush_ser, 0);
44120 init_waitqueue_head(&pool->force_wait);
44121
44122 pool->thread = kthread_run(ib_fmr_cleanup_thread,
44123@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
44124 }
44125 spin_unlock_irq(&pool->pool_lock);
44126
44127- serial = atomic_inc_return(&pool->req_ser);
44128+ serial = atomic_inc_return_unchecked(&pool->req_ser);
44129 wake_up_process(pool->thread);
44130
44131 if (wait_event_interruptible(pool->force_wait,
44132- atomic_read(&pool->flush_ser) - serial >= 0))
44133+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
44134 return -EINTR;
44135
44136 return 0;
44137@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
44138 } else {
44139 list_add_tail(&fmr->list, &pool->dirty_list);
44140 if (++pool->dirty_len >= pool->dirty_watermark) {
44141- atomic_inc(&pool->req_ser);
44142+ atomic_inc_unchecked(&pool->req_ser);
44143 wake_up_process(pool->thread);
44144 }
44145 }
44146diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
44147index ec7a298..8742e59 100644
44148--- a/drivers/infiniband/hw/cxgb4/mem.c
44149+++ b/drivers/infiniband/hw/cxgb4/mem.c
44150@@ -249,7 +249,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
44151 int err;
44152 struct fw_ri_tpte tpt;
44153 u32 stag_idx;
44154- static atomic_t key;
44155+ static atomic_unchecked_t key;
44156
44157 if (c4iw_fatal_error(rdev))
44158 return -EIO;
44159@@ -270,7 +270,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
44160 if (rdev->stats.stag.cur > rdev->stats.stag.max)
44161 rdev->stats.stag.max = rdev->stats.stag.cur;
44162 mutex_unlock(&rdev->stats.lock);
44163- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
44164+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
44165 }
44166 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
44167 __func__, stag_state, type, pdid, stag_idx);
44168diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
44169index 79b3dbc..96e5fcc 100644
44170--- a/drivers/infiniband/hw/ipath/ipath_rc.c
44171+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
44172@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
44173 struct ib_atomic_eth *ateth;
44174 struct ipath_ack_entry *e;
44175 u64 vaddr;
44176- atomic64_t *maddr;
44177+ atomic64_unchecked_t *maddr;
44178 u64 sdata;
44179 u32 rkey;
44180 u8 next;
44181@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
44182 IB_ACCESS_REMOTE_ATOMIC)))
44183 goto nack_acc_unlck;
44184 /* Perform atomic OP and save result. */
44185- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
44186+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
44187 sdata = be64_to_cpu(ateth->swap_data);
44188 e = &qp->s_ack_queue[qp->r_head_ack_queue];
44189 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
44190- (u64) atomic64_add_return(sdata, maddr) - sdata :
44191+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
44192 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
44193 be64_to_cpu(ateth->compare_data),
44194 sdata);
44195diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
44196index 1f95bba..9530f87 100644
44197--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
44198+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
44199@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
44200 unsigned long flags;
44201 struct ib_wc wc;
44202 u64 sdata;
44203- atomic64_t *maddr;
44204+ atomic64_unchecked_t *maddr;
44205 enum ib_wc_status send_status;
44206
44207 /*
44208@@ -382,11 +382,11 @@ again:
44209 IB_ACCESS_REMOTE_ATOMIC)))
44210 goto acc_err;
44211 /* Perform atomic OP and save result. */
44212- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
44213+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
44214 sdata = wqe->wr.wr.atomic.compare_add;
44215 *(u64 *) sqp->s_sge.sge.vaddr =
44216 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
44217- (u64) atomic64_add_return(sdata, maddr) - sdata :
44218+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
44219 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
44220 sdata, wqe->wr.wr.atomic.swap);
44221 goto send_comp;
44222diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
44223index 287ad05..5ae7b44d 100644
44224--- a/drivers/infiniband/hw/mlx4/mad.c
44225+++ b/drivers/infiniband/hw/mlx4/mad.c
44226@@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void)
44227
44228 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
44229 {
44230- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
44231+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
44232 cpu_to_be64(0xff00000000000000LL);
44233 }
44234
44235diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
44236index ed327e6..ca1739e0 100644
44237--- a/drivers/infiniband/hw/mlx4/mcg.c
44238+++ b/drivers/infiniband/hw/mlx4/mcg.c
44239@@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
44240 {
44241 char name[20];
44242
44243- atomic_set(&ctx->tid, 0);
44244+ atomic_set_unchecked(&ctx->tid, 0);
44245 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
44246 ctx->mcg_wq = create_singlethread_workqueue(name);
44247 if (!ctx->mcg_wq)
44248diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
44249index 369da3c..223e6e9 100644
44250--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
44251+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
44252@@ -426,7 +426,7 @@ struct mlx4_ib_demux_ctx {
44253 struct list_head mcg_mgid0_list;
44254 struct workqueue_struct *mcg_wq;
44255 struct mlx4_ib_demux_pv_ctx **tun;
44256- atomic_t tid;
44257+ atomic_unchecked_t tid;
44258 int flushing; /* flushing the work queue */
44259 };
44260
44261diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
44262index 9d3e5c1..6f166df 100644
44263--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
44264+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
44265@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
44266 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
44267 }
44268
44269-int mthca_QUERY_FW(struct mthca_dev *dev)
44270+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
44271 {
44272 struct mthca_mailbox *mailbox;
44273 u32 *outbox;
44274@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44275 CMD_TIME_CLASS_B);
44276 }
44277
44278-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44279+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44280 int num_mtt)
44281 {
44282 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
44283@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
44284 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
44285 }
44286
44287-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44288+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44289 int eq_num)
44290 {
44291 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
44292@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
44293 CMD_TIME_CLASS_B);
44294 }
44295
44296-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
44297+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
44298 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
44299 void *in_mad, void *response_mad)
44300 {
44301diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
44302index ded76c1..0cf0a08 100644
44303--- a/drivers/infiniband/hw/mthca/mthca_main.c
44304+++ b/drivers/infiniband/hw/mthca/mthca_main.c
44305@@ -692,7 +692,7 @@ err_close:
44306 return err;
44307 }
44308
44309-static int mthca_setup_hca(struct mthca_dev *dev)
44310+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
44311 {
44312 int err;
44313
44314diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
44315index ed9a989..6aa5dc2 100644
44316--- a/drivers/infiniband/hw/mthca/mthca_mr.c
44317+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
44318@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
44319 * through the bitmaps)
44320 */
44321
44322-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
44323+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
44324 {
44325 int o;
44326 int m;
44327@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
44328 return key;
44329 }
44330
44331-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
44332+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
44333 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
44334 {
44335 struct mthca_mailbox *mailbox;
44336@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
44337 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
44338 }
44339
44340-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
44341+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
44342 u64 *buffer_list, int buffer_size_shift,
44343 int list_len, u64 iova, u64 total_size,
44344 u32 access, struct mthca_mr *mr)
44345diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
44346index 415f8e1..e34214e 100644
44347--- a/drivers/infiniband/hw/mthca/mthca_provider.c
44348+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
44349@@ -764,7 +764,7 @@ unlock:
44350 return 0;
44351 }
44352
44353-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
44354+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
44355 {
44356 struct mthca_dev *dev = to_mdev(ibcq->device);
44357 struct mthca_cq *cq = to_mcq(ibcq);
44358diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
44359index 3b2a6dc..bce26ff 100644
44360--- a/drivers/infiniband/hw/nes/nes.c
44361+++ b/drivers/infiniband/hw/nes/nes.c
44362@@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
44363 LIST_HEAD(nes_adapter_list);
44364 static LIST_HEAD(nes_dev_list);
44365
44366-atomic_t qps_destroyed;
44367+atomic_unchecked_t qps_destroyed;
44368
44369 static unsigned int ee_flsh_adapter;
44370 static unsigned int sysfs_nonidx_addr;
44371@@ -278,7 +278,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
44372 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
44373 struct nes_adapter *nesadapter = nesdev->nesadapter;
44374
44375- atomic_inc(&qps_destroyed);
44376+ atomic_inc_unchecked(&qps_destroyed);
44377
44378 /* Free the control structures */
44379
44380diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
44381index bd9d132..70d84f4 100644
44382--- a/drivers/infiniband/hw/nes/nes.h
44383+++ b/drivers/infiniband/hw/nes/nes.h
44384@@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
44385 extern unsigned int wqm_quanta;
44386 extern struct list_head nes_adapter_list;
44387
44388-extern atomic_t cm_connects;
44389-extern atomic_t cm_accepts;
44390-extern atomic_t cm_disconnects;
44391-extern atomic_t cm_closes;
44392-extern atomic_t cm_connecteds;
44393-extern atomic_t cm_connect_reqs;
44394-extern atomic_t cm_rejects;
44395-extern atomic_t mod_qp_timouts;
44396-extern atomic_t qps_created;
44397-extern atomic_t qps_destroyed;
44398-extern atomic_t sw_qps_destroyed;
44399+extern atomic_unchecked_t cm_connects;
44400+extern atomic_unchecked_t cm_accepts;
44401+extern atomic_unchecked_t cm_disconnects;
44402+extern atomic_unchecked_t cm_closes;
44403+extern atomic_unchecked_t cm_connecteds;
44404+extern atomic_unchecked_t cm_connect_reqs;
44405+extern atomic_unchecked_t cm_rejects;
44406+extern atomic_unchecked_t mod_qp_timouts;
44407+extern atomic_unchecked_t qps_created;
44408+extern atomic_unchecked_t qps_destroyed;
44409+extern atomic_unchecked_t sw_qps_destroyed;
44410 extern u32 mh_detected;
44411 extern u32 mh_pauses_sent;
44412 extern u32 cm_packets_sent;
44413@@ -199,16 +199,16 @@ extern u32 cm_packets_created;
44414 extern u32 cm_packets_received;
44415 extern u32 cm_packets_dropped;
44416 extern u32 cm_packets_retrans;
44417-extern atomic_t cm_listens_created;
44418-extern atomic_t cm_listens_destroyed;
44419+extern atomic_unchecked_t cm_listens_created;
44420+extern atomic_unchecked_t cm_listens_destroyed;
44421 extern u32 cm_backlog_drops;
44422-extern atomic_t cm_loopbacks;
44423-extern atomic_t cm_nodes_created;
44424-extern atomic_t cm_nodes_destroyed;
44425-extern atomic_t cm_accel_dropped_pkts;
44426-extern atomic_t cm_resets_recvd;
44427-extern atomic_t pau_qps_created;
44428-extern atomic_t pau_qps_destroyed;
44429+extern atomic_unchecked_t cm_loopbacks;
44430+extern atomic_unchecked_t cm_nodes_created;
44431+extern atomic_unchecked_t cm_nodes_destroyed;
44432+extern atomic_unchecked_t cm_accel_dropped_pkts;
44433+extern atomic_unchecked_t cm_resets_recvd;
44434+extern atomic_unchecked_t pau_qps_created;
44435+extern atomic_unchecked_t pau_qps_destroyed;
44436
44437 extern u32 int_mod_timer_init;
44438 extern u32 int_mod_cq_depth_256;
44439diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
44440index 6f09a72..cf4399d 100644
44441--- a/drivers/infiniband/hw/nes/nes_cm.c
44442+++ b/drivers/infiniband/hw/nes/nes_cm.c
44443@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
44444 u32 cm_packets_retrans;
44445 u32 cm_packets_created;
44446 u32 cm_packets_received;
44447-atomic_t cm_listens_created;
44448-atomic_t cm_listens_destroyed;
44449+atomic_unchecked_t cm_listens_created;
44450+atomic_unchecked_t cm_listens_destroyed;
44451 u32 cm_backlog_drops;
44452-atomic_t cm_loopbacks;
44453-atomic_t cm_nodes_created;
44454-atomic_t cm_nodes_destroyed;
44455-atomic_t cm_accel_dropped_pkts;
44456-atomic_t cm_resets_recvd;
44457+atomic_unchecked_t cm_loopbacks;
44458+atomic_unchecked_t cm_nodes_created;
44459+atomic_unchecked_t cm_nodes_destroyed;
44460+atomic_unchecked_t cm_accel_dropped_pkts;
44461+atomic_unchecked_t cm_resets_recvd;
44462
44463 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
44464 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
44465@@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
44466 /* instance of function pointers for client API */
44467 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
44468 static struct nes_cm_ops nes_cm_api = {
44469- mini_cm_accelerated,
44470- mini_cm_listen,
44471- mini_cm_del_listen,
44472- mini_cm_connect,
44473- mini_cm_close,
44474- mini_cm_accept,
44475- mini_cm_reject,
44476- mini_cm_recv_pkt,
44477- mini_cm_dealloc_core,
44478- mini_cm_get,
44479- mini_cm_set
44480+ .accelerated = mini_cm_accelerated,
44481+ .listen = mini_cm_listen,
44482+ .stop_listener = mini_cm_del_listen,
44483+ .connect = mini_cm_connect,
44484+ .close = mini_cm_close,
44485+ .accept = mini_cm_accept,
44486+ .reject = mini_cm_reject,
44487+ .recv_pkt = mini_cm_recv_pkt,
44488+ .destroy_cm_core = mini_cm_dealloc_core,
44489+ .get = mini_cm_get,
44490+ .set = mini_cm_set
44491 };
44492
44493 static struct nes_cm_core *g_cm_core;
44494
44495-atomic_t cm_connects;
44496-atomic_t cm_accepts;
44497-atomic_t cm_disconnects;
44498-atomic_t cm_closes;
44499-atomic_t cm_connecteds;
44500-atomic_t cm_connect_reqs;
44501-atomic_t cm_rejects;
44502+atomic_unchecked_t cm_connects;
44503+atomic_unchecked_t cm_accepts;
44504+atomic_unchecked_t cm_disconnects;
44505+atomic_unchecked_t cm_closes;
44506+atomic_unchecked_t cm_connecteds;
44507+atomic_unchecked_t cm_connect_reqs;
44508+atomic_unchecked_t cm_rejects;
44509
44510 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
44511 {
44512@@ -1436,7 +1436,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
44513 kfree(listener);
44514 listener = NULL;
44515 ret = 0;
44516- atomic_inc(&cm_listens_destroyed);
44517+ atomic_inc_unchecked(&cm_listens_destroyed);
44518 } else {
44519 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
44520 }
44521@@ -1637,7 +1637,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
44522 cm_node->rem_mac);
44523
44524 add_hte_node(cm_core, cm_node);
44525- atomic_inc(&cm_nodes_created);
44526+ atomic_inc_unchecked(&cm_nodes_created);
44527
44528 return cm_node;
44529 }
44530@@ -1698,7 +1698,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
44531 }
44532
44533 atomic_dec(&cm_core->node_cnt);
44534- atomic_inc(&cm_nodes_destroyed);
44535+ atomic_inc_unchecked(&cm_nodes_destroyed);
44536 nesqp = cm_node->nesqp;
44537 if (nesqp) {
44538 nesqp->cm_node = NULL;
44539@@ -1762,7 +1762,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
44540
44541 static void drop_packet(struct sk_buff *skb)
44542 {
44543- atomic_inc(&cm_accel_dropped_pkts);
44544+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
44545 dev_kfree_skb_any(skb);
44546 }
44547
44548@@ -1825,7 +1825,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
44549 {
44550
44551 int reset = 0; /* whether to send reset in case of err.. */
44552- atomic_inc(&cm_resets_recvd);
44553+ atomic_inc_unchecked(&cm_resets_recvd);
44554 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
44555 " refcnt=%d\n", cm_node, cm_node->state,
44556 atomic_read(&cm_node->ref_count));
44557@@ -2492,7 +2492,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
44558 rem_ref_cm_node(cm_node->cm_core, cm_node);
44559 return NULL;
44560 }
44561- atomic_inc(&cm_loopbacks);
44562+ atomic_inc_unchecked(&cm_loopbacks);
44563 loopbackremotenode->loopbackpartner = cm_node;
44564 loopbackremotenode->tcp_cntxt.rcv_wscale =
44565 NES_CM_DEFAULT_RCV_WND_SCALE;
44566@@ -2773,7 +2773,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
44567 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
44568 else {
44569 rem_ref_cm_node(cm_core, cm_node);
44570- atomic_inc(&cm_accel_dropped_pkts);
44571+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
44572 dev_kfree_skb_any(skb);
44573 }
44574 break;
44575@@ -3081,7 +3081,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
44576
44577 if ((cm_id) && (cm_id->event_handler)) {
44578 if (issue_disconn) {
44579- atomic_inc(&cm_disconnects);
44580+ atomic_inc_unchecked(&cm_disconnects);
44581 cm_event.event = IW_CM_EVENT_DISCONNECT;
44582 cm_event.status = disconn_status;
44583 cm_event.local_addr = cm_id->local_addr;
44584@@ -3103,7 +3103,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
44585 }
44586
44587 if (issue_close) {
44588- atomic_inc(&cm_closes);
44589+ atomic_inc_unchecked(&cm_closes);
44590 nes_disconnect(nesqp, 1);
44591
44592 cm_id->provider_data = nesqp;
44593@@ -3241,7 +3241,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
44594
44595 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
44596 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
44597- atomic_inc(&cm_accepts);
44598+ atomic_inc_unchecked(&cm_accepts);
44599
44600 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
44601 netdev_refcnt_read(nesvnic->netdev));
44602@@ -3439,7 +3439,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
44603 struct nes_cm_core *cm_core;
44604 u8 *start_buff;
44605
44606- atomic_inc(&cm_rejects);
44607+ atomic_inc_unchecked(&cm_rejects);
44608 cm_node = (struct nes_cm_node *)cm_id->provider_data;
44609 loopback = cm_node->loopbackpartner;
44610 cm_core = cm_node->cm_core;
44611@@ -3504,7 +3504,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
44612 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
44613 ntohs(laddr->sin_port));
44614
44615- atomic_inc(&cm_connects);
44616+ atomic_inc_unchecked(&cm_connects);
44617 nesqp->active_conn = 1;
44618
44619 /* cache the cm_id in the qp */
44620@@ -3649,7 +3649,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
44621 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
44622 return err;
44623 }
44624- atomic_inc(&cm_listens_created);
44625+ atomic_inc_unchecked(&cm_listens_created);
44626 }
44627
44628 cm_id->add_ref(cm_id);
44629@@ -3756,7 +3756,7 @@ static void cm_event_connected(struct nes_cm_event *event)
44630
44631 if (nesqp->destroyed)
44632 return;
44633- atomic_inc(&cm_connecteds);
44634+ atomic_inc_unchecked(&cm_connecteds);
44635 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
44636 " local port 0x%04X. jiffies = %lu.\n",
44637 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
44638@@ -3941,7 +3941,7 @@ static void cm_event_reset(struct nes_cm_event *event)
44639
44640 cm_id->add_ref(cm_id);
44641 ret = cm_id->event_handler(cm_id, &cm_event);
44642- atomic_inc(&cm_closes);
44643+ atomic_inc_unchecked(&cm_closes);
44644 cm_event.event = IW_CM_EVENT_CLOSE;
44645 cm_event.status = 0;
44646 cm_event.provider_data = cm_id->provider_data;
44647@@ -3981,7 +3981,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
44648 return;
44649 cm_id = cm_node->cm_id;
44650
44651- atomic_inc(&cm_connect_reqs);
44652+ atomic_inc_unchecked(&cm_connect_reqs);
44653 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
44654 cm_node, cm_id, jiffies);
44655
44656@@ -4030,7 +4030,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
44657 return;
44658 cm_id = cm_node->cm_id;
44659
44660- atomic_inc(&cm_connect_reqs);
44661+ atomic_inc_unchecked(&cm_connect_reqs);
44662 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
44663 cm_node, cm_id, jiffies);
44664
44665diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
44666index 4166452..fc952c3 100644
44667--- a/drivers/infiniband/hw/nes/nes_mgt.c
44668+++ b/drivers/infiniband/hw/nes/nes_mgt.c
44669@@ -40,8 +40,8 @@
44670 #include "nes.h"
44671 #include "nes_mgt.h"
44672
44673-atomic_t pau_qps_created;
44674-atomic_t pau_qps_destroyed;
44675+atomic_unchecked_t pau_qps_created;
44676+atomic_unchecked_t pau_qps_destroyed;
44677
44678 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
44679 {
44680@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
44681 {
44682 struct sk_buff *skb;
44683 unsigned long flags;
44684- atomic_inc(&pau_qps_destroyed);
44685+ atomic_inc_unchecked(&pau_qps_destroyed);
44686
44687 /* Free packets that have not yet been forwarded */
44688 /* Lock is acquired by skb_dequeue when removing the skb */
44689@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
44690 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
44691 skb_queue_head_init(&nesqp->pau_list);
44692 spin_lock_init(&nesqp->pau_lock);
44693- atomic_inc(&pau_qps_created);
44694+ atomic_inc_unchecked(&pau_qps_created);
44695 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
44696 }
44697
44698diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
44699index 49eb511..a774366 100644
44700--- a/drivers/infiniband/hw/nes/nes_nic.c
44701+++ b/drivers/infiniband/hw/nes/nes_nic.c
44702@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
44703 target_stat_values[++index] = mh_detected;
44704 target_stat_values[++index] = mh_pauses_sent;
44705 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
44706- target_stat_values[++index] = atomic_read(&cm_connects);
44707- target_stat_values[++index] = atomic_read(&cm_accepts);
44708- target_stat_values[++index] = atomic_read(&cm_disconnects);
44709- target_stat_values[++index] = atomic_read(&cm_connecteds);
44710- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
44711- target_stat_values[++index] = atomic_read(&cm_rejects);
44712- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
44713- target_stat_values[++index] = atomic_read(&qps_created);
44714- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
44715- target_stat_values[++index] = atomic_read(&qps_destroyed);
44716- target_stat_values[++index] = atomic_read(&cm_closes);
44717+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
44718+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
44719+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
44720+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
44721+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
44722+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
44723+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
44724+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
44725+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
44726+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
44727+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
44728 target_stat_values[++index] = cm_packets_sent;
44729 target_stat_values[++index] = cm_packets_bounced;
44730 target_stat_values[++index] = cm_packets_created;
44731 target_stat_values[++index] = cm_packets_received;
44732 target_stat_values[++index] = cm_packets_dropped;
44733 target_stat_values[++index] = cm_packets_retrans;
44734- target_stat_values[++index] = atomic_read(&cm_listens_created);
44735- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
44736+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
44737+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
44738 target_stat_values[++index] = cm_backlog_drops;
44739- target_stat_values[++index] = atomic_read(&cm_loopbacks);
44740- target_stat_values[++index] = atomic_read(&cm_nodes_created);
44741- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
44742- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
44743- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
44744+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
44745+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
44746+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
44747+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
44748+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
44749 target_stat_values[++index] = nesadapter->free_4kpbl;
44750 target_stat_values[++index] = nesadapter->free_256pbl;
44751 target_stat_values[++index] = int_mod_timer_init;
44752 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
44753 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
44754 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
44755- target_stat_values[++index] = atomic_read(&pau_qps_created);
44756- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
44757+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
44758+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
44759 }
44760
44761 /**
44762diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
44763index 218dd35..97ce31d 100644
44764--- a/drivers/infiniband/hw/nes/nes_verbs.c
44765+++ b/drivers/infiniband/hw/nes/nes_verbs.c
44766@@ -46,9 +46,9 @@
44767
44768 #include <rdma/ib_umem.h>
44769
44770-atomic_t mod_qp_timouts;
44771-atomic_t qps_created;
44772-atomic_t sw_qps_destroyed;
44773+atomic_unchecked_t mod_qp_timouts;
44774+atomic_unchecked_t qps_created;
44775+atomic_unchecked_t sw_qps_destroyed;
44776
44777 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
44778
44779@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
44780 if (init_attr->create_flags)
44781 return ERR_PTR(-EINVAL);
44782
44783- atomic_inc(&qps_created);
44784+ atomic_inc_unchecked(&qps_created);
44785 switch (init_attr->qp_type) {
44786 case IB_QPT_RC:
44787 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
44788@@ -1468,7 +1468,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
44789 struct iw_cm_event cm_event;
44790 int ret = 0;
44791
44792- atomic_inc(&sw_qps_destroyed);
44793+ atomic_inc_unchecked(&sw_qps_destroyed);
44794 nesqp->destroyed = 1;
44795
44796 /* Blow away the connection if it exists. */
44797diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
44798index c00ae09..04e91be 100644
44799--- a/drivers/infiniband/hw/qib/qib.h
44800+++ b/drivers/infiniband/hw/qib/qib.h
44801@@ -52,6 +52,7 @@
44802 #include <linux/kref.h>
44803 #include <linux/sched.h>
44804 #include <linux/kthread.h>
44805+#include <linux/slab.h>
44806
44807 #include "qib_common.h"
44808 #include "qib_verbs.h"
44809diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
44810index 24c41ba..102d71f 100644
44811--- a/drivers/input/gameport/gameport.c
44812+++ b/drivers/input/gameport/gameport.c
44813@@ -490,14 +490,14 @@ EXPORT_SYMBOL(gameport_set_phys);
44814 */
44815 static void gameport_init_port(struct gameport *gameport)
44816 {
44817- static atomic_t gameport_no = ATOMIC_INIT(0);
44818+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
44819
44820 __module_get(THIS_MODULE);
44821
44822 mutex_init(&gameport->drv_mutex);
44823 device_initialize(&gameport->dev);
44824 dev_set_name(&gameport->dev, "gameport%lu",
44825- (unsigned long)atomic_inc_return(&gameport_no) - 1);
44826+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
44827 gameport->dev.bus = &gameport_bus;
44828 gameport->dev.release = gameport_release_port;
44829 if (gameport->parent)
44830diff --git a/drivers/input/input.c b/drivers/input/input.c
44831index 29ca0bb..f4bc2e3 100644
44832--- a/drivers/input/input.c
44833+++ b/drivers/input/input.c
44834@@ -1774,7 +1774,7 @@ EXPORT_SYMBOL_GPL(input_class);
44835 */
44836 struct input_dev *input_allocate_device(void)
44837 {
44838- static atomic_t input_no = ATOMIC_INIT(0);
44839+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
44840 struct input_dev *dev;
44841
44842 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
44843@@ -1789,7 +1789,7 @@ struct input_dev *input_allocate_device(void)
44844 INIT_LIST_HEAD(&dev->node);
44845
44846 dev_set_name(&dev->dev, "input%ld",
44847- (unsigned long) atomic_inc_return(&input_no) - 1);
44848+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
44849
44850 __module_get(THIS_MODULE);
44851 }
44852diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
44853index 4a95b22..874c182 100644
44854--- a/drivers/input/joystick/sidewinder.c
44855+++ b/drivers/input/joystick/sidewinder.c
44856@@ -30,6 +30,7 @@
44857 #include <linux/kernel.h>
44858 #include <linux/module.h>
44859 #include <linux/slab.h>
44860+#include <linux/sched.h>
44861 #include <linux/input.h>
44862 #include <linux/gameport.h>
44863 #include <linux/jiffies.h>
44864diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
44865index 603fe0d..f63decc 100644
44866--- a/drivers/input/joystick/xpad.c
44867+++ b/drivers/input/joystick/xpad.c
44868@@ -737,7 +737,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
44869
44870 static int xpad_led_probe(struct usb_xpad *xpad)
44871 {
44872- static atomic_t led_seq = ATOMIC_INIT(0);
44873+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
44874 long led_no;
44875 struct xpad_led *led;
44876 struct led_classdev *led_cdev;
44877@@ -750,7 +750,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
44878 if (!led)
44879 return -ENOMEM;
44880
44881- led_no = (long)atomic_inc_return(&led_seq) - 1;
44882+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
44883
44884 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
44885 led->xpad = xpad;
44886diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
44887index 719410f..1896169 100644
44888--- a/drivers/input/misc/ims-pcu.c
44889+++ b/drivers/input/misc/ims-pcu.c
44890@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
44891
44892 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
44893 {
44894- static atomic_t device_no = ATOMIC_INIT(0);
44895+ static atomic_unchecked_t device_no = ATOMIC_INIT(0);
44896
44897 const struct ims_pcu_device_info *info;
44898 int error;
44899@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
44900 }
44901
44902 /* Device appears to be operable, complete initialization */
44903- pcu->device_no = atomic_inc_return(&device_no) - 1;
44904+ pcu->device_no = atomic_inc_return_unchecked(&device_no) - 1;
44905
44906 /*
44907 * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
44908diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
44909index 2f0b39d..7370f13 100644
44910--- a/drivers/input/mouse/psmouse.h
44911+++ b/drivers/input/mouse/psmouse.h
44912@@ -116,7 +116,7 @@ struct psmouse_attribute {
44913 ssize_t (*set)(struct psmouse *psmouse, void *data,
44914 const char *buf, size_t count);
44915 bool protect;
44916-};
44917+} __do_const;
44918 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
44919
44920 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
44921diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
44922index b604564..3f14ae4 100644
44923--- a/drivers/input/mousedev.c
44924+++ b/drivers/input/mousedev.c
44925@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
44926
44927 spin_unlock_irq(&client->packet_lock);
44928
44929- if (copy_to_user(buffer, data, count))
44930+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
44931 return -EFAULT;
44932
44933 return count;
44934diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
44935index b29134d..394deb0 100644
44936--- a/drivers/input/serio/serio.c
44937+++ b/drivers/input/serio/serio.c
44938@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
44939 */
44940 static void serio_init_port(struct serio *serio)
44941 {
44942- static atomic_t serio_no = ATOMIC_INIT(0);
44943+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
44944
44945 __module_get(THIS_MODULE);
44946
44947@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
44948 mutex_init(&serio->drv_mutex);
44949 device_initialize(&serio->dev);
44950 dev_set_name(&serio->dev, "serio%ld",
44951- (long)atomic_inc_return(&serio_no) - 1);
44952+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
44953 serio->dev.bus = &serio_bus;
44954 serio->dev.release = serio_release_port;
44955 serio->dev.groups = serio_device_attr_groups;
44956diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
44957index c9a02fe..0debc75 100644
44958--- a/drivers/input/serio/serio_raw.c
44959+++ b/drivers/input/serio/serio_raw.c
44960@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
44961
44962 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
44963 {
44964- static atomic_t serio_raw_no = ATOMIC_INIT(0);
44965+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(0);
44966 struct serio_raw *serio_raw;
44967 int err;
44968
44969@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
44970 }
44971
44972 snprintf(serio_raw->name, sizeof(serio_raw->name),
44973- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no) - 1);
44974+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no) - 1);
44975 kref_init(&serio_raw->kref);
44976 INIT_LIST_HEAD(&serio_raw->client_list);
44977 init_waitqueue_head(&serio_raw->wait);
44978diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
44979index e5555fc..937986d 100644
44980--- a/drivers/iommu/iommu.c
44981+++ b/drivers/iommu/iommu.c
44982@@ -588,7 +588,7 @@ static struct notifier_block iommu_bus_nb = {
44983 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
44984 {
44985 bus_register_notifier(bus, &iommu_bus_nb);
44986- bus_for_each_dev(bus, NULL, ops, add_iommu_group);
44987+ bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
44988 }
44989
44990 /**
44991diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
44992index 33c4395..e06447e 100644
44993--- a/drivers/iommu/irq_remapping.c
44994+++ b/drivers/iommu/irq_remapping.c
44995@@ -354,7 +354,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
44996 void panic_if_irq_remap(const char *msg)
44997 {
44998 if (irq_remapping_enabled)
44999- panic(msg);
45000+ panic("%s", msg);
45001 }
45002
45003 static void ir_ack_apic_edge(struct irq_data *data)
45004@@ -375,10 +375,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
45005
45006 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
45007 {
45008- chip->irq_print_chip = ir_print_prefix;
45009- chip->irq_ack = ir_ack_apic_edge;
45010- chip->irq_eoi = ir_ack_apic_level;
45011- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
45012+ pax_open_kernel();
45013+ *(void **)&chip->irq_print_chip = ir_print_prefix;
45014+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
45015+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
45016+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
45017+ pax_close_kernel();
45018 }
45019
45020 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
45021diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
45022index 7c131cf..035129b 100644
45023--- a/drivers/irqchip/irq-gic.c
45024+++ b/drivers/irqchip/irq-gic.c
45025@@ -85,7 +85,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
45026 * Supported arch specific GIC irq extension.
45027 * Default make them NULL.
45028 */
45029-struct irq_chip gic_arch_extn = {
45030+irq_chip_no_const gic_arch_extn = {
45031 .irq_eoi = NULL,
45032 .irq_mask = NULL,
45033 .irq_unmask = NULL,
45034@@ -337,7 +337,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
45035 chained_irq_exit(chip, desc);
45036 }
45037
45038-static struct irq_chip gic_chip = {
45039+static irq_chip_no_const gic_chip __read_only = {
45040 .name = "GIC",
45041 .irq_mask = gic_mask_irq,
45042 .irq_unmask = gic_unmask_irq,
45043diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
45044index 8777065..a4a9967 100644
45045--- a/drivers/irqchip/irq-renesas-irqc.c
45046+++ b/drivers/irqchip/irq-renesas-irqc.c
45047@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev)
45048 struct irqc_priv *p;
45049 struct resource *io;
45050 struct resource *irq;
45051- struct irq_chip *irq_chip;
45052+ irq_chip_no_const *irq_chip;
45053 const char *name = dev_name(&pdev->dev);
45054 int ret;
45055 int k;
45056diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
45057index f9a87ed..3fdd854 100644
45058--- a/drivers/isdn/capi/capi.c
45059+++ b/drivers/isdn/capi/capi.c
45060@@ -81,8 +81,8 @@ struct capiminor {
45061
45062 struct capi20_appl *ap;
45063 u32 ncci;
45064- atomic_t datahandle;
45065- atomic_t msgid;
45066+ atomic_unchecked_t datahandle;
45067+ atomic_unchecked_t msgid;
45068
45069 struct tty_port port;
45070 int ttyinstop;
45071@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
45072 capimsg_setu16(s, 2, mp->ap->applid);
45073 capimsg_setu8 (s, 4, CAPI_DATA_B3);
45074 capimsg_setu8 (s, 5, CAPI_RESP);
45075- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
45076+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
45077 capimsg_setu32(s, 8, mp->ncci);
45078 capimsg_setu16(s, 12, datahandle);
45079 }
45080@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
45081 mp->outbytes -= len;
45082 spin_unlock_bh(&mp->outlock);
45083
45084- datahandle = atomic_inc_return(&mp->datahandle);
45085+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
45086 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
45087 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
45088 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
45089 capimsg_setu16(skb->data, 2, mp->ap->applid);
45090 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
45091 capimsg_setu8 (skb->data, 5, CAPI_REQ);
45092- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
45093+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
45094 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
45095 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
45096 capimsg_setu16(skb->data, 16, len); /* Data length */
45097diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
45098index b7ae0a0..04590fa 100644
45099--- a/drivers/isdn/gigaset/bas-gigaset.c
45100+++ b/drivers/isdn/gigaset/bas-gigaset.c
45101@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
45102
45103
45104 static const struct gigaset_ops gigops = {
45105- gigaset_write_cmd,
45106- gigaset_write_room,
45107- gigaset_chars_in_buffer,
45108- gigaset_brkchars,
45109- gigaset_init_bchannel,
45110- gigaset_close_bchannel,
45111- gigaset_initbcshw,
45112- gigaset_freebcshw,
45113- gigaset_reinitbcshw,
45114- gigaset_initcshw,
45115- gigaset_freecshw,
45116- gigaset_set_modem_ctrl,
45117- gigaset_baud_rate,
45118- gigaset_set_line_ctrl,
45119- gigaset_isoc_send_skb,
45120- gigaset_isoc_input,
45121+ .write_cmd = gigaset_write_cmd,
45122+ .write_room = gigaset_write_room,
45123+ .chars_in_buffer = gigaset_chars_in_buffer,
45124+ .brkchars = gigaset_brkchars,
45125+ .init_bchannel = gigaset_init_bchannel,
45126+ .close_bchannel = gigaset_close_bchannel,
45127+ .initbcshw = gigaset_initbcshw,
45128+ .freebcshw = gigaset_freebcshw,
45129+ .reinitbcshw = gigaset_reinitbcshw,
45130+ .initcshw = gigaset_initcshw,
45131+ .freecshw = gigaset_freecshw,
45132+ .set_modem_ctrl = gigaset_set_modem_ctrl,
45133+ .baud_rate = gigaset_baud_rate,
45134+ .set_line_ctrl = gigaset_set_line_ctrl,
45135+ .send_skb = gigaset_isoc_send_skb,
45136+ .handle_input = gigaset_isoc_input,
45137 };
45138
45139 /* bas_gigaset_init
45140diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
45141index 600c79b..3752bab 100644
45142--- a/drivers/isdn/gigaset/interface.c
45143+++ b/drivers/isdn/gigaset/interface.c
45144@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
45145 }
45146 tty->driver_data = cs;
45147
45148- ++cs->port.count;
45149+ atomic_inc(&cs->port.count);
45150
45151- if (cs->port.count == 1) {
45152+ if (atomic_read(&cs->port.count) == 1) {
45153 tty_port_tty_set(&cs->port, tty);
45154 cs->port.low_latency = 1;
45155 }
45156@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
45157
45158 if (!cs->connected)
45159 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
45160- else if (!cs->port.count)
45161+ else if (!atomic_read(&cs->port.count))
45162 dev_warn(cs->dev, "%s: device not opened\n", __func__);
45163- else if (!--cs->port.count)
45164+ else if (!atomic_dec_return(&cs->port.count))
45165 tty_port_tty_set(&cs->port, NULL);
45166
45167 mutex_unlock(&cs->mutex);
45168diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
45169index 8c91fd5..14f13ce 100644
45170--- a/drivers/isdn/gigaset/ser-gigaset.c
45171+++ b/drivers/isdn/gigaset/ser-gigaset.c
45172@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
45173 }
45174
45175 static const struct gigaset_ops ops = {
45176- gigaset_write_cmd,
45177- gigaset_write_room,
45178- gigaset_chars_in_buffer,
45179- gigaset_brkchars,
45180- gigaset_init_bchannel,
45181- gigaset_close_bchannel,
45182- gigaset_initbcshw,
45183- gigaset_freebcshw,
45184- gigaset_reinitbcshw,
45185- gigaset_initcshw,
45186- gigaset_freecshw,
45187- gigaset_set_modem_ctrl,
45188- gigaset_baud_rate,
45189- gigaset_set_line_ctrl,
45190- gigaset_m10x_send_skb, /* asyncdata.c */
45191- gigaset_m10x_input, /* asyncdata.c */
45192+ .write_cmd = gigaset_write_cmd,
45193+ .write_room = gigaset_write_room,
45194+ .chars_in_buffer = gigaset_chars_in_buffer,
45195+ .brkchars = gigaset_brkchars,
45196+ .init_bchannel = gigaset_init_bchannel,
45197+ .close_bchannel = gigaset_close_bchannel,
45198+ .initbcshw = gigaset_initbcshw,
45199+ .freebcshw = gigaset_freebcshw,
45200+ .reinitbcshw = gigaset_reinitbcshw,
45201+ .initcshw = gigaset_initcshw,
45202+ .freecshw = gigaset_freecshw,
45203+ .set_modem_ctrl = gigaset_set_modem_ctrl,
45204+ .baud_rate = gigaset_baud_rate,
45205+ .set_line_ctrl = gigaset_set_line_ctrl,
45206+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
45207+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
45208 };
45209
45210
45211diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
45212index d0a41cb..b953e50 100644
45213--- a/drivers/isdn/gigaset/usb-gigaset.c
45214+++ b/drivers/isdn/gigaset/usb-gigaset.c
45215@@ -547,7 +547,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
45216 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
45217 memcpy(cs->hw.usb->bchars, buf, 6);
45218 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
45219- 0, 0, &buf, 6, 2000);
45220+ 0, 0, buf, 6, 2000);
45221 }
45222
45223 static void gigaset_freebcshw(struct bc_state *bcs)
45224@@ -869,22 +869,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
45225 }
45226
45227 static const struct gigaset_ops ops = {
45228- gigaset_write_cmd,
45229- gigaset_write_room,
45230- gigaset_chars_in_buffer,
45231- gigaset_brkchars,
45232- gigaset_init_bchannel,
45233- gigaset_close_bchannel,
45234- gigaset_initbcshw,
45235- gigaset_freebcshw,
45236- gigaset_reinitbcshw,
45237- gigaset_initcshw,
45238- gigaset_freecshw,
45239- gigaset_set_modem_ctrl,
45240- gigaset_baud_rate,
45241- gigaset_set_line_ctrl,
45242- gigaset_m10x_send_skb,
45243- gigaset_m10x_input,
45244+ .write_cmd = gigaset_write_cmd,
45245+ .write_room = gigaset_write_room,
45246+ .chars_in_buffer = gigaset_chars_in_buffer,
45247+ .brkchars = gigaset_brkchars,
45248+ .init_bchannel = gigaset_init_bchannel,
45249+ .close_bchannel = gigaset_close_bchannel,
45250+ .initbcshw = gigaset_initbcshw,
45251+ .freebcshw = gigaset_freebcshw,
45252+ .reinitbcshw = gigaset_reinitbcshw,
45253+ .initcshw = gigaset_initcshw,
45254+ .freecshw = gigaset_freecshw,
45255+ .set_modem_ctrl = gigaset_set_modem_ctrl,
45256+ .baud_rate = gigaset_baud_rate,
45257+ .set_line_ctrl = gigaset_set_line_ctrl,
45258+ .send_skb = gigaset_m10x_send_skb,
45259+ .handle_input = gigaset_m10x_input,
45260 };
45261
45262 /*
45263diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
45264index 4d9b195..455075c 100644
45265--- a/drivers/isdn/hardware/avm/b1.c
45266+++ b/drivers/isdn/hardware/avm/b1.c
45267@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
45268 }
45269 if (left) {
45270 if (t4file->user) {
45271- if (copy_from_user(buf, dp, left))
45272+ if (left > sizeof buf || copy_from_user(buf, dp, left))
45273 return -EFAULT;
45274 } else {
45275 memcpy(buf, dp, left);
45276@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
45277 }
45278 if (left) {
45279 if (config->user) {
45280- if (copy_from_user(buf, dp, left))
45281+ if (left > sizeof buf || copy_from_user(buf, dp, left))
45282 return -EFAULT;
45283 } else {
45284 memcpy(buf, dp, left);
45285diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
45286index 9b856e1..fa03c92 100644
45287--- a/drivers/isdn/i4l/isdn_common.c
45288+++ b/drivers/isdn/i4l/isdn_common.c
45289@@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
45290 } else
45291 return -EINVAL;
45292 case IIOCDBGVAR:
45293+ if (!capable(CAP_SYS_RAWIO))
45294+ return -EPERM;
45295 if (arg) {
45296 if (copy_to_user(argp, &dev, sizeof(ulong)))
45297 return -EFAULT;
45298diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
45299index 91d5730..336523e 100644
45300--- a/drivers/isdn/i4l/isdn_concap.c
45301+++ b/drivers/isdn/i4l/isdn_concap.c
45302@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
45303 }
45304
45305 struct concap_device_ops isdn_concap_reliable_dl_dops = {
45306- &isdn_concap_dl_data_req,
45307- &isdn_concap_dl_connect_req,
45308- &isdn_concap_dl_disconn_req
45309+ .data_req = &isdn_concap_dl_data_req,
45310+ .connect_req = &isdn_concap_dl_connect_req,
45311+ .disconn_req = &isdn_concap_dl_disconn_req
45312 };
45313
45314 /* The following should better go into a dedicated source file such that
45315diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
45316index 62f0688..38ceac5 100644
45317--- a/drivers/isdn/i4l/isdn_ppp.c
45318+++ b/drivers/isdn/i4l/isdn_ppp.c
45319@@ -378,15 +378,10 @@ isdn_ppp_release(int min, struct file *file)
45320 is->slcomp = NULL;
45321 #endif
45322 #ifdef CONFIG_IPPP_FILTER
45323- if (is->pass_filter) {
45324- sk_unattached_filter_destroy(is->pass_filter);
45325- is->pass_filter = NULL;
45326- }
45327-
45328- if (is->active_filter) {
45329- sk_unattached_filter_destroy(is->active_filter);
45330- is->active_filter = NULL;
45331- }
45332+ kfree(is->pass_filter);
45333+ is->pass_filter = NULL;
45334+ kfree(is->active_filter);
45335+ is->active_filter = NULL;
45336 #endif
45337
45338 /* TODO: if this was the previous master: link the stuff to the new master */
45339@@ -442,7 +437,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
45340 {
45341 struct sock_fprog uprog;
45342 struct sock_filter *code = NULL;
45343- int len;
45344+ int len, err;
45345
45346 if (copy_from_user(&uprog, arg, sizeof(uprog)))
45347 return -EFAULT;
45348@@ -458,6 +453,12 @@ static int get_filter(void __user *arg, struct sock_filter **p)
45349 if (IS_ERR(code))
45350 return PTR_ERR(code);
45351
45352+ err = sk_chk_filter(code, uprog.len);
45353+ if (err) {
45354+ kfree(code);
45355+ return err;
45356+ }
45357+
45358 *p = code;
45359 return uprog.len;
45360 }
45361@@ -628,53 +629,25 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
45362 #ifdef CONFIG_IPPP_FILTER
45363 case PPPIOCSPASS:
45364 {
45365- struct sock_fprog_kern fprog;
45366 struct sock_filter *code;
45367- int err, len = get_filter(argp, &code);
45368-
45369+ int len = get_filter(argp, &code);
45370 if (len < 0)
45371 return len;
45372-
45373- fprog.len = len;
45374- fprog.filter = code;
45375-
45376- if (is->pass_filter) {
45377- sk_unattached_filter_destroy(is->pass_filter);
45378- is->pass_filter = NULL;
45379- }
45380- if (fprog.filter != NULL)
45381- err = sk_unattached_filter_create(&is->pass_filter,
45382- &fprog);
45383- else
45384- err = 0;
45385- kfree(code);
45386-
45387- return err;
45388+ kfree(is->pass_filter);
45389+ is->pass_filter = code;
45390+ is->pass_len = len;
45391+ break;
45392 }
45393 case PPPIOCSACTIVE:
45394 {
45395- struct sock_fprog_kern fprog;
45396 struct sock_filter *code;
45397- int err, len = get_filter(argp, &code);
45398-
45399+ int len = get_filter(argp, &code);
45400 if (len < 0)
45401 return len;
45402-
45403- fprog.len = len;
45404- fprog.filter = code;
45405-
45406- if (is->active_filter) {
45407- sk_unattached_filter_destroy(is->active_filter);
45408- is->active_filter = NULL;
45409- }
45410- if (fprog.filter != NULL)
45411- err = sk_unattached_filter_create(&is->active_filter,
45412- &fprog);
45413- else
45414- err = 0;
45415- kfree(code);
45416-
45417- return err;
45418+ kfree(is->active_filter);
45419+ is->active_filter = code;
45420+ is->active_len = len;
45421+ break;
45422 }
45423 #endif /* CONFIG_IPPP_FILTER */
45424 default:
45425@@ -1174,14 +1147,14 @@ isdn_ppp_push_higher(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *
45426 }
45427
45428 if (is->pass_filter
45429- && SK_RUN_FILTER(is->pass_filter, skb) == 0) {
45430+ && sk_run_filter(skb, is->pass_filter) == 0) {
45431 if (is->debug & 0x2)
45432 printk(KERN_DEBUG "IPPP: inbound frame filtered.\n");
45433 kfree_skb(skb);
45434 return;
45435 }
45436 if (!(is->active_filter
45437- && SK_RUN_FILTER(is->active_filter, skb) == 0)) {
45438+ && sk_run_filter(skb, is->active_filter) == 0)) {
45439 if (is->debug & 0x2)
45440 printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
45441 lp->huptimer = 0;
45442@@ -1320,14 +1293,14 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
45443 }
45444
45445 if (ipt->pass_filter
45446- && SK_RUN_FILTER(ipt->pass_filter, skb) == 0) {
45447+ && sk_run_filter(skb, ipt->pass_filter) == 0) {
45448 if (ipt->debug & 0x4)
45449 printk(KERN_DEBUG "IPPP: outbound frame filtered.\n");
45450 kfree_skb(skb);
45451 goto unlock;
45452 }
45453 if (!(ipt->active_filter
45454- && SK_RUN_FILTER(ipt->active_filter, skb) == 0)) {
45455+ && sk_run_filter(skb, ipt->active_filter) == 0)) {
45456 if (ipt->debug & 0x4)
45457 printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
45458 lp->huptimer = 0;
45459@@ -1517,9 +1490,9 @@ int isdn_ppp_autodial_filter(struct sk_buff *skb, isdn_net_local *lp)
45460 }
45461
45462 drop |= is->pass_filter
45463- && SK_RUN_FILTER(is->pass_filter, skb) == 0;
45464+ && sk_run_filter(skb, is->pass_filter) == 0;
45465 drop |= is->active_filter
45466- && SK_RUN_FILTER(is->active_filter, skb) == 0;
45467+ && sk_run_filter(skb, is->active_filter) == 0;
45468
45469 skb_push(skb, IPPP_MAX_HEADER - 4);
45470 return drop;
45471diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
45472index 3c5f249..5fac4d0 100644
45473--- a/drivers/isdn/i4l/isdn_tty.c
45474+++ b/drivers/isdn/i4l/isdn_tty.c
45475@@ -1508,9 +1508,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
45476
45477 #ifdef ISDN_DEBUG_MODEM_OPEN
45478 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
45479- port->count);
45480+ atomic_read(&port->count));
45481 #endif
45482- port->count++;
45483+ atomic_inc(&port->count);
45484 port->tty = tty;
45485 /*
45486 * Start up serial port
45487@@ -1554,7 +1554,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
45488 #endif
45489 return;
45490 }
45491- if ((tty->count == 1) && (port->count != 1)) {
45492+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
45493 /*
45494 * Uh, oh. tty->count is 1, which means that the tty
45495 * structure will be freed. Info->count should always
45496@@ -1563,15 +1563,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
45497 * serial port won't be shutdown.
45498 */
45499 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
45500- "info->count is %d\n", port->count);
45501- port->count = 1;
45502+ "info->count is %d\n", atomic_read(&port->count));
45503+ atomic_set(&port->count, 1);
45504 }
45505- if (--port->count < 0) {
45506+ if (atomic_dec_return(&port->count) < 0) {
45507 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
45508- info->line, port->count);
45509- port->count = 0;
45510+ info->line, atomic_read(&port->count));
45511+ atomic_set(&port->count, 0);
45512 }
45513- if (port->count) {
45514+ if (atomic_read(&port->count)) {
45515 #ifdef ISDN_DEBUG_MODEM_OPEN
45516 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
45517 #endif
45518@@ -1625,7 +1625,7 @@ isdn_tty_hangup(struct tty_struct *tty)
45519 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
45520 return;
45521 isdn_tty_shutdown(info);
45522- port->count = 0;
45523+ atomic_set(&port->count, 0);
45524 port->flags &= ~ASYNC_NORMAL_ACTIVE;
45525 port->tty = NULL;
45526 wake_up_interruptible(&port->open_wait);
45527@@ -1970,7 +1970,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
45528 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
45529 modem_info *info = &dev->mdm.info[i];
45530
45531- if (info->port.count == 0)
45532+ if (atomic_read(&info->port.count) == 0)
45533 continue;
45534 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
45535 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
45536diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
45537index e2d4e58..40cd045 100644
45538--- a/drivers/isdn/i4l/isdn_x25iface.c
45539+++ b/drivers/isdn/i4l/isdn_x25iface.c
45540@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
45541
45542
45543 static struct concap_proto_ops ix25_pops = {
45544- &isdn_x25iface_proto_new,
45545- &isdn_x25iface_proto_del,
45546- &isdn_x25iface_proto_restart,
45547- &isdn_x25iface_proto_close,
45548- &isdn_x25iface_xmit,
45549- &isdn_x25iface_receive,
45550- &isdn_x25iface_connect_ind,
45551- &isdn_x25iface_disconn_ind
45552+ .proto_new = &isdn_x25iface_proto_new,
45553+ .proto_del = &isdn_x25iface_proto_del,
45554+ .restart = &isdn_x25iface_proto_restart,
45555+ .close = &isdn_x25iface_proto_close,
45556+ .encap_and_xmit = &isdn_x25iface_xmit,
45557+ .data_ind = &isdn_x25iface_receive,
45558+ .connect_ind = &isdn_x25iface_connect_ind,
45559+ .disconn_ind = &isdn_x25iface_disconn_ind
45560 };
45561
45562 /* error message helper function */
45563diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
45564index 6a7447c..cae33fe 100644
45565--- a/drivers/isdn/icn/icn.c
45566+++ b/drivers/isdn/icn/icn.c
45567@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
45568 if (count > len)
45569 count = len;
45570 if (user) {
45571- if (copy_from_user(msg, buf, count))
45572+ if (count > sizeof msg || copy_from_user(msg, buf, count))
45573 return -EFAULT;
45574 } else
45575 memcpy(msg, buf, count);
45576diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
45577index a4f05c5..1433bc5 100644
45578--- a/drivers/isdn/mISDN/dsp_cmx.c
45579+++ b/drivers/isdn/mISDN/dsp_cmx.c
45580@@ -1628,7 +1628,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
45581 static u16 dsp_count; /* last sample count */
45582 static int dsp_count_valid; /* if we have last sample count */
45583
45584-void
45585+void __intentional_overflow(-1)
45586 dsp_cmx_send(void *arg)
45587 {
45588 struct dsp_conf *conf;
45589diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
45590index f58a354..fbae176 100644
45591--- a/drivers/leds/leds-clevo-mail.c
45592+++ b/drivers/leds/leds-clevo-mail.c
45593@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
45594 * detected as working, but in reality it is not) as low as
45595 * possible.
45596 */
45597-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
45598+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
45599 {
45600 .callback = clevo_mail_led_dmi_callback,
45601 .ident = "Clevo D410J",
45602diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
45603index 2eb3ef6..295891f 100644
45604--- a/drivers/leds/leds-ss4200.c
45605+++ b/drivers/leds/leds-ss4200.c
45606@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
45607 * detected as working, but in reality it is not) as low as
45608 * possible.
45609 */
45610-static struct dmi_system_id nas_led_whitelist[] __initdata = {
45611+static struct dmi_system_id nas_led_whitelist[] __initconst = {
45612 {
45613 .callback = ss4200_led_dmi_callback,
45614 .ident = "Intel SS4200-E",
45615diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
45616index 0bf1e4e..b4bf44e 100644
45617--- a/drivers/lguest/core.c
45618+++ b/drivers/lguest/core.c
45619@@ -97,9 +97,17 @@ static __init int map_switcher(void)
45620 * The end address needs +1 because __get_vm_area allocates an
45621 * extra guard page, so we need space for that.
45622 */
45623+
45624+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
45625+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
45626+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
45627+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
45628+#else
45629 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
45630 VM_ALLOC, switcher_addr, switcher_addr
45631 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
45632+#endif
45633+
45634 if (!switcher_vma) {
45635 err = -ENOMEM;
45636 printk("lguest: could not map switcher pages high\n");
45637@@ -124,7 +132,7 @@ static __init int map_switcher(void)
45638 * Now the Switcher is mapped at the right address, we can't fail!
45639 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
45640 */
45641- memcpy(switcher_vma->addr, start_switcher_text,
45642+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
45643 end_switcher_text - start_switcher_text);
45644
45645 printk(KERN_INFO "lguest: mapped switcher at %p\n",
45646diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
45647index e8b55c3..3514c37 100644
45648--- a/drivers/lguest/page_tables.c
45649+++ b/drivers/lguest/page_tables.c
45650@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
45651 /*:*/
45652
45653 #ifdef CONFIG_X86_PAE
45654-static void release_pmd(pmd_t *spmd)
45655+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
45656 {
45657 /* If the entry's not present, there's nothing to release. */
45658 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
45659diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
45660index 922a1ac..9dd0c2a 100644
45661--- a/drivers/lguest/x86/core.c
45662+++ b/drivers/lguest/x86/core.c
45663@@ -59,7 +59,7 @@ static struct {
45664 /* Offset from where switcher.S was compiled to where we've copied it */
45665 static unsigned long switcher_offset(void)
45666 {
45667- return switcher_addr - (unsigned long)start_switcher_text;
45668+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
45669 }
45670
45671 /* This cpu's struct lguest_pages (after the Switcher text page) */
45672@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
45673 * These copies are pretty cheap, so we do them unconditionally: */
45674 /* Save the current Host top-level page directory.
45675 */
45676+
45677+#ifdef CONFIG_PAX_PER_CPU_PGD
45678+ pages->state.host_cr3 = read_cr3();
45679+#else
45680 pages->state.host_cr3 = __pa(current->mm->pgd);
45681+#endif
45682+
45683 /*
45684 * Set up the Guest's page tables to see this CPU's pages (and no
45685 * other CPU's pages).
45686@@ -477,7 +483,7 @@ void __init lguest_arch_host_init(void)
45687 * compiled-in switcher code and the high-mapped copy we just made.
45688 */
45689 for (i = 0; i < IDT_ENTRIES; i++)
45690- default_idt_entries[i] += switcher_offset();
45691+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
45692
45693 /*
45694 * Set up the Switcher's per-cpu areas.
45695@@ -560,7 +566,7 @@ void __init lguest_arch_host_init(void)
45696 * it will be undisturbed when we switch. To change %cs and jump we
45697 * need this structure to feed to Intel's "lcall" instruction.
45698 */
45699- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
45700+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
45701 lguest_entry.segment = LGUEST_CS;
45702
45703 /*
45704diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
45705index 40634b0..4f5855e 100644
45706--- a/drivers/lguest/x86/switcher_32.S
45707+++ b/drivers/lguest/x86/switcher_32.S
45708@@ -87,6 +87,7 @@
45709 #include <asm/page.h>
45710 #include <asm/segment.h>
45711 #include <asm/lguest.h>
45712+#include <asm/processor-flags.h>
45713
45714 // We mark the start of the code to copy
45715 // It's placed in .text tho it's never run here
45716@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
45717 // Changes type when we load it: damn Intel!
45718 // For after we switch over our page tables
45719 // That entry will be read-only: we'd crash.
45720+
45721+#ifdef CONFIG_PAX_KERNEXEC
45722+ mov %cr0, %edx
45723+ xor $X86_CR0_WP, %edx
45724+ mov %edx, %cr0
45725+#endif
45726+
45727 movl $(GDT_ENTRY_TSS*8), %edx
45728 ltr %dx
45729
45730@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
45731 // Let's clear it again for our return.
45732 // The GDT descriptor of the Host
45733 // Points to the table after two "size" bytes
45734- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
45735+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
45736 // Clear "used" from type field (byte 5, bit 2)
45737- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
45738+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
45739+
45740+#ifdef CONFIG_PAX_KERNEXEC
45741+ mov %cr0, %eax
45742+ xor $X86_CR0_WP, %eax
45743+ mov %eax, %cr0
45744+#endif
45745
45746 // Once our page table's switched, the Guest is live!
45747 // The Host fades as we run this final step.
45748@@ -295,13 +309,12 @@ deliver_to_host:
45749 // I consulted gcc, and it gave
45750 // These instructions, which I gladly credit:
45751 leal (%edx,%ebx,8), %eax
45752- movzwl (%eax),%edx
45753- movl 4(%eax), %eax
45754- xorw %ax, %ax
45755- orl %eax, %edx
45756+ movl 4(%eax), %edx
45757+ movw (%eax), %dx
45758 // Now the address of the handler's in %edx
45759 // We call it now: its "iret" drops us home.
45760- jmp *%edx
45761+ ljmp $__KERNEL_CS, $1f
45762+1: jmp *%edx
45763
45764 // Every interrupt can come to us here
45765 // But we must truly tell each apart.
45766diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
45767index a08e3ee..df8ade2 100644
45768--- a/drivers/md/bcache/closure.h
45769+++ b/drivers/md/bcache/closure.h
45770@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
45771 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
45772 struct workqueue_struct *wq)
45773 {
45774- BUG_ON(object_is_on_stack(cl));
45775+ BUG_ON(object_starts_on_stack(cl));
45776 closure_set_ip(cl);
45777 cl->fn = fn;
45778 cl->wq = wq;
45779diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
45780index 67f8b31..9418f2b 100644
45781--- a/drivers/md/bitmap.c
45782+++ b/drivers/md/bitmap.c
45783@@ -1775,7 +1775,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
45784 chunk_kb ? "KB" : "B");
45785 if (bitmap->storage.file) {
45786 seq_printf(seq, ", file: ");
45787- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
45788+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
45789 }
45790
45791 seq_printf(seq, "\n");
45792diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
45793index 5152142..623d141 100644
45794--- a/drivers/md/dm-ioctl.c
45795+++ b/drivers/md/dm-ioctl.c
45796@@ -1769,7 +1769,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
45797 cmd == DM_LIST_VERSIONS_CMD)
45798 return 0;
45799
45800- if ((cmd == DM_DEV_CREATE_CMD)) {
45801+ if (cmd == DM_DEV_CREATE_CMD) {
45802 if (!*param->name) {
45803 DMWARN("name not supplied when creating device");
45804 return -EINVAL;
45805diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
45806index 7dfdb5c..4caada6 100644
45807--- a/drivers/md/dm-raid1.c
45808+++ b/drivers/md/dm-raid1.c
45809@@ -40,7 +40,7 @@ enum dm_raid1_error {
45810
45811 struct mirror {
45812 struct mirror_set *ms;
45813- atomic_t error_count;
45814+ atomic_unchecked_t error_count;
45815 unsigned long error_type;
45816 struct dm_dev *dev;
45817 sector_t offset;
45818@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
45819 struct mirror *m;
45820
45821 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
45822- if (!atomic_read(&m->error_count))
45823+ if (!atomic_read_unchecked(&m->error_count))
45824 return m;
45825
45826 return NULL;
45827@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
45828 * simple way to tell if a device has encountered
45829 * errors.
45830 */
45831- atomic_inc(&m->error_count);
45832+ atomic_inc_unchecked(&m->error_count);
45833
45834 if (test_and_set_bit(error_type, &m->error_type))
45835 return;
45836@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
45837 struct mirror *m = get_default_mirror(ms);
45838
45839 do {
45840- if (likely(!atomic_read(&m->error_count)))
45841+ if (likely(!atomic_read_unchecked(&m->error_count)))
45842 return m;
45843
45844 if (m-- == ms->mirror)
45845@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
45846 {
45847 struct mirror *default_mirror = get_default_mirror(m->ms);
45848
45849- return !atomic_read(&default_mirror->error_count);
45850+ return !atomic_read_unchecked(&default_mirror->error_count);
45851 }
45852
45853 static int mirror_available(struct mirror_set *ms, struct bio *bio)
45854@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
45855 */
45856 if (likely(region_in_sync(ms, region, 1)))
45857 m = choose_mirror(ms, bio->bi_iter.bi_sector);
45858- else if (m && atomic_read(&m->error_count))
45859+ else if (m && atomic_read_unchecked(&m->error_count))
45860 m = NULL;
45861
45862 if (likely(m))
45863@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
45864 }
45865
45866 ms->mirror[mirror].ms = ms;
45867- atomic_set(&(ms->mirror[mirror].error_count), 0);
45868+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
45869 ms->mirror[mirror].error_type = 0;
45870 ms->mirror[mirror].offset = offset;
45871
45872@@ -1342,7 +1342,7 @@ static void mirror_resume(struct dm_target *ti)
45873 */
45874 static char device_status_char(struct mirror *m)
45875 {
45876- if (!atomic_read(&(m->error_count)))
45877+ if (!atomic_read_unchecked(&(m->error_count)))
45878 return 'A';
45879
45880 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
45881diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
45882index 28a9012..9c0f6a5 100644
45883--- a/drivers/md/dm-stats.c
45884+++ b/drivers/md/dm-stats.c
45885@@ -382,7 +382,7 @@ do_sync_free:
45886 synchronize_rcu_expedited();
45887 dm_stat_free(&s->rcu_head);
45888 } else {
45889- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
45890+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
45891 call_rcu(&s->rcu_head, dm_stat_free);
45892 }
45893 return 0;
45894@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
45895 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
45896 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
45897 ));
45898- ACCESS_ONCE(last->last_sector) = end_sector;
45899- ACCESS_ONCE(last->last_rw) = bi_rw;
45900+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
45901+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
45902 }
45903
45904 rcu_read_lock();
45905diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
45906index d1600d2..4c3af3a 100644
45907--- a/drivers/md/dm-stripe.c
45908+++ b/drivers/md/dm-stripe.c
45909@@ -21,7 +21,7 @@ struct stripe {
45910 struct dm_dev *dev;
45911 sector_t physical_start;
45912
45913- atomic_t error_count;
45914+ atomic_unchecked_t error_count;
45915 };
45916
45917 struct stripe_c {
45918@@ -186,7 +186,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
45919 kfree(sc);
45920 return r;
45921 }
45922- atomic_set(&(sc->stripe[i].error_count), 0);
45923+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
45924 }
45925
45926 ti->private = sc;
45927@@ -330,7 +330,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
45928 DMEMIT("%d ", sc->stripes);
45929 for (i = 0; i < sc->stripes; i++) {
45930 DMEMIT("%s ", sc->stripe[i].dev->name);
45931- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
45932+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
45933 'D' : 'A';
45934 }
45935 buffer[i] = '\0';
45936@@ -375,8 +375,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
45937 */
45938 for (i = 0; i < sc->stripes; i++)
45939 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
45940- atomic_inc(&(sc->stripe[i].error_count));
45941- if (atomic_read(&(sc->stripe[i].error_count)) <
45942+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
45943+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
45944 DM_IO_ERROR_THRESHOLD)
45945 schedule_work(&sc->trigger_event);
45946 }
45947diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
45948index 5f59f1e..01bd02e 100644
45949--- a/drivers/md/dm-table.c
45950+++ b/drivers/md/dm-table.c
45951@@ -274,7 +274,7 @@ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
45952 static int open_dev(struct dm_dev_internal *d, dev_t dev,
45953 struct mapped_device *md)
45954 {
45955- static char *_claim_ptr = "I belong to device-mapper";
45956+ static char _claim_ptr[] = "I belong to device-mapper";
45957 struct block_device *bdev;
45958
45959 int r;
45960@@ -342,7 +342,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
45961 if (!dev_size)
45962 return 0;
45963
45964- if ((start >= dev_size) || (start + len > dev_size)) {
45965+ if ((start >= dev_size) || (len > dev_size - start)) {
45966 DMWARN("%s: %s too small for target: "
45967 "start=%llu, len=%llu, dev_size=%llu",
45968 dm_device_name(ti->table->md), bdevname(bdev, b),
45969diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
45970index e9d33ad..dae9880d 100644
45971--- a/drivers/md/dm-thin-metadata.c
45972+++ b/drivers/md/dm-thin-metadata.c
45973@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
45974 {
45975 pmd->info.tm = pmd->tm;
45976 pmd->info.levels = 2;
45977- pmd->info.value_type.context = pmd->data_sm;
45978+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
45979 pmd->info.value_type.size = sizeof(__le64);
45980 pmd->info.value_type.inc = data_block_inc;
45981 pmd->info.value_type.dec = data_block_dec;
45982@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
45983
45984 pmd->bl_info.tm = pmd->tm;
45985 pmd->bl_info.levels = 1;
45986- pmd->bl_info.value_type.context = pmd->data_sm;
45987+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
45988 pmd->bl_info.value_type.size = sizeof(__le64);
45989 pmd->bl_info.value_type.inc = data_block_inc;
45990 pmd->bl_info.value_type.dec = data_block_dec;
45991diff --git a/drivers/md/dm.c b/drivers/md/dm.c
45992index 32b958d..34011e8 100644
45993--- a/drivers/md/dm.c
45994+++ b/drivers/md/dm.c
45995@@ -180,9 +180,9 @@ struct mapped_device {
45996 /*
45997 * Event handling.
45998 */
45999- atomic_t event_nr;
46000+ atomic_unchecked_t event_nr;
46001 wait_queue_head_t eventq;
46002- atomic_t uevent_seq;
46003+ atomic_unchecked_t uevent_seq;
46004 struct list_head uevent_list;
46005 spinlock_t uevent_lock; /* Protect access to uevent_list */
46006
46007@@ -1952,8 +1952,8 @@ static struct mapped_device *alloc_dev(int minor)
46008 spin_lock_init(&md->deferred_lock);
46009 atomic_set(&md->holders, 1);
46010 atomic_set(&md->open_count, 0);
46011- atomic_set(&md->event_nr, 0);
46012- atomic_set(&md->uevent_seq, 0);
46013+ atomic_set_unchecked(&md->event_nr, 0);
46014+ atomic_set_unchecked(&md->uevent_seq, 0);
46015 INIT_LIST_HEAD(&md->uevent_list);
46016 spin_lock_init(&md->uevent_lock);
46017
46018@@ -2107,7 +2107,7 @@ static void event_callback(void *context)
46019
46020 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
46021
46022- atomic_inc(&md->event_nr);
46023+ atomic_inc_unchecked(&md->event_nr);
46024 wake_up(&md->eventq);
46025 }
46026
46027@@ -2800,18 +2800,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
46028
46029 uint32_t dm_next_uevent_seq(struct mapped_device *md)
46030 {
46031- return atomic_add_return(1, &md->uevent_seq);
46032+ return atomic_add_return_unchecked(1, &md->uevent_seq);
46033 }
46034
46035 uint32_t dm_get_event_nr(struct mapped_device *md)
46036 {
46037- return atomic_read(&md->event_nr);
46038+ return atomic_read_unchecked(&md->event_nr);
46039 }
46040
46041 int dm_wait_event(struct mapped_device *md, int event_nr)
46042 {
46043 return wait_event_interruptible(md->eventq,
46044- (event_nr != atomic_read(&md->event_nr)));
46045+ (event_nr != atomic_read_unchecked(&md->event_nr)));
46046 }
46047
46048 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
46049diff --git a/drivers/md/md.c b/drivers/md/md.c
46050index 32fc19c..cb6eba3 100644
46051--- a/drivers/md/md.c
46052+++ b/drivers/md/md.c
46053@@ -194,10 +194,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
46054 * start build, activate spare
46055 */
46056 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
46057-static atomic_t md_event_count;
46058+static atomic_unchecked_t md_event_count;
46059 void md_new_event(struct mddev *mddev)
46060 {
46061- atomic_inc(&md_event_count);
46062+ atomic_inc_unchecked(&md_event_count);
46063 wake_up(&md_event_waiters);
46064 }
46065 EXPORT_SYMBOL_GPL(md_new_event);
46066@@ -207,7 +207,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
46067 */
46068 static void md_new_event_inintr(struct mddev *mddev)
46069 {
46070- atomic_inc(&md_event_count);
46071+ atomic_inc_unchecked(&md_event_count);
46072 wake_up(&md_event_waiters);
46073 }
46074
46075@@ -1462,7 +1462,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
46076 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
46077 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
46078 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
46079- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
46080+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
46081
46082 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
46083 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
46084@@ -1713,7 +1713,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
46085 else
46086 sb->resync_offset = cpu_to_le64(0);
46087
46088- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
46089+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
46090
46091 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
46092 sb->size = cpu_to_le64(mddev->dev_sectors);
46093@@ -2725,7 +2725,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
46094 static ssize_t
46095 errors_show(struct md_rdev *rdev, char *page)
46096 {
46097- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
46098+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
46099 }
46100
46101 static ssize_t
46102@@ -2734,7 +2734,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
46103 char *e;
46104 unsigned long n = simple_strtoul(buf, &e, 10);
46105 if (*buf && (*e == 0 || *e == '\n')) {
46106- atomic_set(&rdev->corrected_errors, n);
46107+ atomic_set_unchecked(&rdev->corrected_errors, n);
46108 return len;
46109 }
46110 return -EINVAL;
46111@@ -3183,8 +3183,8 @@ int md_rdev_init(struct md_rdev *rdev)
46112 rdev->sb_loaded = 0;
46113 rdev->bb_page = NULL;
46114 atomic_set(&rdev->nr_pending, 0);
46115- atomic_set(&rdev->read_errors, 0);
46116- atomic_set(&rdev->corrected_errors, 0);
46117+ atomic_set_unchecked(&rdev->read_errors, 0);
46118+ atomic_set_unchecked(&rdev->corrected_errors, 0);
46119
46120 INIT_LIST_HEAD(&rdev->same_set);
46121 init_waitqueue_head(&rdev->blocked_wait);
46122@@ -7068,7 +7068,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
46123
46124 spin_unlock(&pers_lock);
46125 seq_printf(seq, "\n");
46126- seq->poll_event = atomic_read(&md_event_count);
46127+ seq->poll_event = atomic_read_unchecked(&md_event_count);
46128 return 0;
46129 }
46130 if (v == (void*)2) {
46131@@ -7171,7 +7171,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
46132 return error;
46133
46134 seq = file->private_data;
46135- seq->poll_event = atomic_read(&md_event_count);
46136+ seq->poll_event = atomic_read_unchecked(&md_event_count);
46137 return error;
46138 }
46139
46140@@ -7188,7 +7188,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
46141 /* always allow read */
46142 mask = POLLIN | POLLRDNORM;
46143
46144- if (seq->poll_event != atomic_read(&md_event_count))
46145+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
46146 mask |= POLLERR | POLLPRI;
46147 return mask;
46148 }
46149@@ -7232,7 +7232,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
46150 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
46151 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
46152 (int)part_stat_read(&disk->part0, sectors[1]) -
46153- atomic_read(&disk->sync_io);
46154+ atomic_read_unchecked(&disk->sync_io);
46155 /* sync IO will cause sync_io to increase before the disk_stats
46156 * as sync_io is counted when a request starts, and
46157 * disk_stats is counted when it completes.
46158diff --git a/drivers/md/md.h b/drivers/md/md.h
46159index a49d991..3582bb7 100644
46160--- a/drivers/md/md.h
46161+++ b/drivers/md/md.h
46162@@ -94,13 +94,13 @@ struct md_rdev {
46163 * only maintained for arrays that
46164 * support hot removal
46165 */
46166- atomic_t read_errors; /* number of consecutive read errors that
46167+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
46168 * we have tried to ignore.
46169 */
46170 struct timespec last_read_error; /* monotonic time since our
46171 * last read error
46172 */
46173- atomic_t corrected_errors; /* number of corrected read errors,
46174+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
46175 * for reporting to userspace and storing
46176 * in superblock.
46177 */
46178@@ -449,7 +449,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
46179
46180 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
46181 {
46182- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
46183+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
46184 }
46185
46186 struct md_personality
46187diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
46188index 786b689..ea8c956 100644
46189--- a/drivers/md/persistent-data/dm-space-map-metadata.c
46190+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
46191@@ -679,7 +679,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
46192 * Flick into a mode where all blocks get allocated in the new area.
46193 */
46194 smm->begin = old_len;
46195- memcpy(sm, &bootstrap_ops, sizeof(*sm));
46196+ memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
46197
46198 /*
46199 * Extend.
46200@@ -710,7 +710,7 @@ out:
46201 /*
46202 * Switch back to normal behaviour.
46203 */
46204- memcpy(sm, &ops, sizeof(*sm));
46205+ memcpy((void *)sm, &ops, sizeof(*sm));
46206 return r;
46207 }
46208
46209diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
46210index 3e6d115..ffecdeb 100644
46211--- a/drivers/md/persistent-data/dm-space-map.h
46212+++ b/drivers/md/persistent-data/dm-space-map.h
46213@@ -71,6 +71,7 @@ struct dm_space_map {
46214 dm_sm_threshold_fn fn,
46215 void *context);
46216 };
46217+typedef struct dm_space_map __no_const dm_space_map_no_const;
46218
46219 /*----------------------------------------------------------------*/
46220
46221diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
46222index 56e24c0..e1c8e1f 100644
46223--- a/drivers/md/raid1.c
46224+++ b/drivers/md/raid1.c
46225@@ -1931,7 +1931,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
46226 if (r1_sync_page_io(rdev, sect, s,
46227 bio->bi_io_vec[idx].bv_page,
46228 READ) != 0)
46229- atomic_add(s, &rdev->corrected_errors);
46230+ atomic_add_unchecked(s, &rdev->corrected_errors);
46231 }
46232 sectors -= s;
46233 sect += s;
46234@@ -2165,7 +2165,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
46235 test_bit(In_sync, &rdev->flags)) {
46236 if (r1_sync_page_io(rdev, sect, s,
46237 conf->tmppage, READ)) {
46238- atomic_add(s, &rdev->corrected_errors);
46239+ atomic_add_unchecked(s, &rdev->corrected_errors);
46240 printk(KERN_INFO
46241 "md/raid1:%s: read error corrected "
46242 "(%d sectors at %llu on %s)\n",
46243diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
46244index cb882aa..cb8aeca 100644
46245--- a/drivers/md/raid10.c
46246+++ b/drivers/md/raid10.c
46247@@ -1949,7 +1949,7 @@ static void end_sync_read(struct bio *bio, int error)
46248 /* The write handler will notice the lack of
46249 * R10BIO_Uptodate and record any errors etc
46250 */
46251- atomic_add(r10_bio->sectors,
46252+ atomic_add_unchecked(r10_bio->sectors,
46253 &conf->mirrors[d].rdev->corrected_errors);
46254
46255 /* for reconstruct, we always reschedule after a read.
46256@@ -2307,7 +2307,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
46257 {
46258 struct timespec cur_time_mon;
46259 unsigned long hours_since_last;
46260- unsigned int read_errors = atomic_read(&rdev->read_errors);
46261+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
46262
46263 ktime_get_ts(&cur_time_mon);
46264
46265@@ -2329,9 +2329,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
46266 * overflowing the shift of read_errors by hours_since_last.
46267 */
46268 if (hours_since_last >= 8 * sizeof(read_errors))
46269- atomic_set(&rdev->read_errors, 0);
46270+ atomic_set_unchecked(&rdev->read_errors, 0);
46271 else
46272- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
46273+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
46274 }
46275
46276 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
46277@@ -2385,8 +2385,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
46278 return;
46279
46280 check_decay_read_errors(mddev, rdev);
46281- atomic_inc(&rdev->read_errors);
46282- if (atomic_read(&rdev->read_errors) > max_read_errors) {
46283+ atomic_inc_unchecked(&rdev->read_errors);
46284+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
46285 char b[BDEVNAME_SIZE];
46286 bdevname(rdev->bdev, b);
46287
46288@@ -2394,7 +2394,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
46289 "md/raid10:%s: %s: Raid device exceeded "
46290 "read_error threshold [cur %d:max %d]\n",
46291 mdname(mddev), b,
46292- atomic_read(&rdev->read_errors), max_read_errors);
46293+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
46294 printk(KERN_NOTICE
46295 "md/raid10:%s: %s: Failing raid device\n",
46296 mdname(mddev), b);
46297@@ -2549,7 +2549,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
46298 sect +
46299 choose_data_offset(r10_bio, rdev)),
46300 bdevname(rdev->bdev, b));
46301- atomic_add(s, &rdev->corrected_errors);
46302+ atomic_add_unchecked(s, &rdev->corrected_errors);
46303 }
46304
46305 rdev_dec_pending(rdev, mddev);
46306@@ -2954,6 +2954,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
46307 */
46308 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
46309 end_reshape(conf);
46310+ close_sync(conf);
46311 return 0;
46312 }
46313
46314@@ -4411,7 +4412,7 @@ read_more:
46315 read_bio->bi_private = r10_bio;
46316 read_bio->bi_end_io = end_sync_read;
46317 read_bio->bi_rw = READ;
46318- read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
46319+ read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
46320 read_bio->bi_flags |= 1 << BIO_UPTODATE;
46321 read_bio->bi_vcnt = 0;
46322 read_bio->bi_iter.bi_size = 0;
46323diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
46324index 6234b2e..4990801 100644
46325--- a/drivers/md/raid5.c
46326+++ b/drivers/md/raid5.c
46327@@ -1731,6 +1731,10 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
46328 return 1;
46329 }
46330
46331+#ifdef CONFIG_GRKERNSEC_HIDESYM
46332+static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
46333+#endif
46334+
46335 static int grow_stripes(struct r5conf *conf, int num)
46336 {
46337 struct kmem_cache *sc;
46338@@ -1742,7 +1746,11 @@ static int grow_stripes(struct r5conf *conf, int num)
46339 "raid%d-%s", conf->level, mdname(conf->mddev));
46340 else
46341 sprintf(conf->cache_name[0],
46342+#ifdef CONFIG_GRKERNSEC_HIDESYM
46343+ "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
46344+#else
46345 "raid%d-%p", conf->level, conf->mddev);
46346+#endif
46347 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
46348
46349 conf->active_name = 0;
46350@@ -2018,21 +2026,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
46351 mdname(conf->mddev), STRIPE_SECTORS,
46352 (unsigned long long)s,
46353 bdevname(rdev->bdev, b));
46354- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
46355+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
46356 clear_bit(R5_ReadError, &sh->dev[i].flags);
46357 clear_bit(R5_ReWrite, &sh->dev[i].flags);
46358 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
46359 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
46360
46361- if (atomic_read(&rdev->read_errors))
46362- atomic_set(&rdev->read_errors, 0);
46363+ if (atomic_read_unchecked(&rdev->read_errors))
46364+ atomic_set_unchecked(&rdev->read_errors, 0);
46365 } else {
46366 const char *bdn = bdevname(rdev->bdev, b);
46367 int retry = 0;
46368 int set_bad = 0;
46369
46370 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
46371- atomic_inc(&rdev->read_errors);
46372+ atomic_inc_unchecked(&rdev->read_errors);
46373 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
46374 printk_ratelimited(
46375 KERN_WARNING
46376@@ -2060,7 +2068,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
46377 mdname(conf->mddev),
46378 (unsigned long long)s,
46379 bdn);
46380- } else if (atomic_read(&rdev->read_errors)
46381+ } else if (atomic_read_unchecked(&rdev->read_errors)
46382 > conf->max_nr_stripes)
46383 printk(KERN_WARNING
46384 "md/raid:%s: Too many read errors, failing device %s.\n",
46385@@ -3817,6 +3825,8 @@ static void handle_stripe(struct stripe_head *sh)
46386 set_bit(R5_Wantwrite, &dev->flags);
46387 if (prexor)
46388 continue;
46389+ if (s.failed > 1)
46390+ continue;
46391 if (!test_bit(R5_Insync, &dev->flags) ||
46392 ((i == sh->pd_idx || i == sh->qd_idx) &&
46393 s.failed == 0))
46394diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
46395index 983db75..ef9248c 100644
46396--- a/drivers/media/dvb-core/dvbdev.c
46397+++ b/drivers/media/dvb-core/dvbdev.c
46398@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
46399 const struct dvb_device *template, void *priv, int type)
46400 {
46401 struct dvb_device *dvbdev;
46402- struct file_operations *dvbdevfops;
46403+ file_operations_no_const *dvbdevfops;
46404 struct device *clsdev;
46405 int minor;
46406 int id;
46407diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
46408index 539f4db..cdd403b 100644
46409--- a/drivers/media/dvb-frontends/af9033.h
46410+++ b/drivers/media/dvb-frontends/af9033.h
46411@@ -82,7 +82,7 @@ struct af9033_ops {
46412 int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
46413 int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
46414 int onoff);
46415-};
46416+} __no_const;
46417
46418
46419 #if IS_ENABLED(CONFIG_DVB_AF9033)
46420diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
46421index 9b6c3bb..baeb5c7 100644
46422--- a/drivers/media/dvb-frontends/dib3000.h
46423+++ b/drivers/media/dvb-frontends/dib3000.h
46424@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
46425 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
46426 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
46427 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
46428-};
46429+} __no_const;
46430
46431 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
46432 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
46433diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
46434index ed8cb90..5ef7f79 100644
46435--- a/drivers/media/pci/cx88/cx88-video.c
46436+++ b/drivers/media/pci/cx88/cx88-video.c
46437@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
46438
46439 /* ------------------------------------------------------------------ */
46440
46441-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46442-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46443-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46444+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46445+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46446+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46447
46448 module_param_array(video_nr, int, NULL, 0444);
46449 module_param_array(vbi_nr, int, NULL, 0444);
46450diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
46451index 802642d..5534900 100644
46452--- a/drivers/media/pci/ivtv/ivtv-driver.c
46453+++ b/drivers/media/pci/ivtv/ivtv-driver.c
46454@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
46455 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
46456
46457 /* ivtv instance counter */
46458-static atomic_t ivtv_instance = ATOMIC_INIT(0);
46459+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
46460
46461 /* Parameter declarations */
46462 static int cardtype[IVTV_MAX_CARDS];
46463diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
46464index 9a726ea..f5e9b52 100644
46465--- a/drivers/media/platform/omap/omap_vout.c
46466+++ b/drivers/media/platform/omap/omap_vout.c
46467@@ -63,7 +63,6 @@ enum omap_vout_channels {
46468 OMAP_VIDEO2,
46469 };
46470
46471-static struct videobuf_queue_ops video_vbq_ops;
46472 /* Variables configurable through module params*/
46473 static u32 video1_numbuffers = 3;
46474 static u32 video2_numbuffers = 3;
46475@@ -1015,6 +1014,12 @@ static int omap_vout_open(struct file *file)
46476 {
46477 struct videobuf_queue *q;
46478 struct omap_vout_device *vout = NULL;
46479+ static struct videobuf_queue_ops video_vbq_ops = {
46480+ .buf_setup = omap_vout_buffer_setup,
46481+ .buf_prepare = omap_vout_buffer_prepare,
46482+ .buf_release = omap_vout_buffer_release,
46483+ .buf_queue = omap_vout_buffer_queue,
46484+ };
46485
46486 vout = video_drvdata(file);
46487 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
46488@@ -1032,10 +1037,6 @@ static int omap_vout_open(struct file *file)
46489 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
46490
46491 q = &vout->vbq;
46492- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
46493- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
46494- video_vbq_ops.buf_release = omap_vout_buffer_release;
46495- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
46496 spin_lock_init(&vout->vbq_lock);
46497
46498 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
46499diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
46500index fb2acc5..a2fcbdc4 100644
46501--- a/drivers/media/platform/s5p-tv/mixer.h
46502+++ b/drivers/media/platform/s5p-tv/mixer.h
46503@@ -156,7 +156,7 @@ struct mxr_layer {
46504 /** layer index (unique identifier) */
46505 int idx;
46506 /** callbacks for layer methods */
46507- struct mxr_layer_ops ops;
46508+ struct mxr_layer_ops *ops;
46509 /** format array */
46510 const struct mxr_format **fmt_array;
46511 /** size of format array */
46512diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46513index 74344c7..a39e70e 100644
46514--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46515+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46516@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
46517 {
46518 struct mxr_layer *layer;
46519 int ret;
46520- struct mxr_layer_ops ops = {
46521+ static struct mxr_layer_ops ops = {
46522 .release = mxr_graph_layer_release,
46523 .buffer_set = mxr_graph_buffer_set,
46524 .stream_set = mxr_graph_stream_set,
46525diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
46526index b713403..53cb5ad 100644
46527--- a/drivers/media/platform/s5p-tv/mixer_reg.c
46528+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
46529@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
46530 layer->update_buf = next;
46531 }
46532
46533- layer->ops.buffer_set(layer, layer->update_buf);
46534+ layer->ops->buffer_set(layer, layer->update_buf);
46535
46536 if (done && done != layer->shadow_buf)
46537 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
46538diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
46539index 8a8dbc8..b74c62d 100644
46540--- a/drivers/media/platform/s5p-tv/mixer_video.c
46541+++ b/drivers/media/platform/s5p-tv/mixer_video.c
46542@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
46543 layer->geo.src.height = layer->geo.src.full_height;
46544
46545 mxr_geometry_dump(mdev, &layer->geo);
46546- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46547+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46548 mxr_geometry_dump(mdev, &layer->geo);
46549 }
46550
46551@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
46552 layer->geo.dst.full_width = mbus_fmt.width;
46553 layer->geo.dst.full_height = mbus_fmt.height;
46554 layer->geo.dst.field = mbus_fmt.field;
46555- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46556+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46557
46558 mxr_geometry_dump(mdev, &layer->geo);
46559 }
46560@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
46561 /* set source size to highest accepted value */
46562 geo->src.full_width = max(geo->dst.full_width, pix->width);
46563 geo->src.full_height = max(geo->dst.full_height, pix->height);
46564- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46565+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46566 mxr_geometry_dump(mdev, &layer->geo);
46567 /* set cropping to total visible screen */
46568 geo->src.width = pix->width;
46569@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
46570 geo->src.x_offset = 0;
46571 geo->src.y_offset = 0;
46572 /* assure consistency of geometry */
46573- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
46574+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
46575 mxr_geometry_dump(mdev, &layer->geo);
46576 /* set full size to lowest possible value */
46577 geo->src.full_width = 0;
46578 geo->src.full_height = 0;
46579- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46580+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46581 mxr_geometry_dump(mdev, &layer->geo);
46582
46583 /* returning results */
46584@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
46585 target->width = s->r.width;
46586 target->height = s->r.height;
46587
46588- layer->ops.fix_geometry(layer, stage, s->flags);
46589+ layer->ops->fix_geometry(layer, stage, s->flags);
46590
46591 /* retrieve update selection rectangle */
46592 res.left = target->x_offset;
46593@@ -954,13 +954,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
46594 mxr_output_get(mdev);
46595
46596 mxr_layer_update_output(layer);
46597- layer->ops.format_set(layer);
46598+ layer->ops->format_set(layer);
46599 /* enabling layer in hardware */
46600 spin_lock_irqsave(&layer->enq_slock, flags);
46601 layer->state = MXR_LAYER_STREAMING;
46602 spin_unlock_irqrestore(&layer->enq_slock, flags);
46603
46604- layer->ops.stream_set(layer, MXR_ENABLE);
46605+ layer->ops->stream_set(layer, MXR_ENABLE);
46606 mxr_streamer_get(mdev);
46607
46608 return 0;
46609@@ -1030,7 +1030,7 @@ static void stop_streaming(struct vb2_queue *vq)
46610 spin_unlock_irqrestore(&layer->enq_slock, flags);
46611
46612 /* disabling layer in hardware */
46613- layer->ops.stream_set(layer, MXR_DISABLE);
46614+ layer->ops->stream_set(layer, MXR_DISABLE);
46615 /* remove one streamer */
46616 mxr_streamer_put(mdev);
46617 /* allow changes in output configuration */
46618@@ -1068,8 +1068,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
46619
46620 void mxr_layer_release(struct mxr_layer *layer)
46621 {
46622- if (layer->ops.release)
46623- layer->ops.release(layer);
46624+ if (layer->ops->release)
46625+ layer->ops->release(layer);
46626 }
46627
46628 void mxr_base_layer_release(struct mxr_layer *layer)
46629@@ -1095,7 +1095,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
46630
46631 layer->mdev = mdev;
46632 layer->idx = idx;
46633- layer->ops = *ops;
46634+ layer->ops = ops;
46635
46636 spin_lock_init(&layer->enq_slock);
46637 INIT_LIST_HEAD(&layer->enq_list);
46638diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46639index c9388c4..ce71ece 100644
46640--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46641+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46642@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
46643 {
46644 struct mxr_layer *layer;
46645 int ret;
46646- struct mxr_layer_ops ops = {
46647+ static struct mxr_layer_ops ops = {
46648 .release = mxr_vp_layer_release,
46649 .buffer_set = mxr_vp_buffer_set,
46650 .stream_set = mxr_vp_stream_set,
46651diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c
46652index d00bf3d..1301a0c 100644
46653--- a/drivers/media/platform/vivi.c
46654+++ b/drivers/media/platform/vivi.c
46655@@ -58,8 +58,8 @@ MODULE_AUTHOR("Mauro Carvalho Chehab, Ted Walther and John Sokol");
46656 MODULE_LICENSE("Dual BSD/GPL");
46657 MODULE_VERSION(VIVI_VERSION);
46658
46659-static unsigned video_nr = -1;
46660-module_param(video_nr, uint, 0644);
46661+static int video_nr = -1;
46662+module_param(video_nr, int, 0644);
46663 MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
46664
46665 static unsigned n_devs = 1;
46666diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
46667index d719e59..63f3470 100644
46668--- a/drivers/media/radio/radio-cadet.c
46669+++ b/drivers/media/radio/radio-cadet.c
46670@@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
46671 unsigned char readbuf[RDS_BUFFER];
46672 int i = 0;
46673
46674+ if (count > RDS_BUFFER)
46675+ return -EFAULT;
46676 mutex_lock(&dev->lock);
46677 if (dev->rdsstat == 0)
46678 cadet_start_rds(dev);
46679@@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
46680 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
46681 mutex_unlock(&dev->lock);
46682
46683- if (i && copy_to_user(data, readbuf, i))
46684- return -EFAULT;
46685+ if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
46686+ i = -EFAULT;
46687+
46688 return i;
46689 }
46690
46691diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
46692index 5236035..c622c74 100644
46693--- a/drivers/media/radio/radio-maxiradio.c
46694+++ b/drivers/media/radio/radio-maxiradio.c
46695@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
46696 /* TEA5757 pin mappings */
46697 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
46698
46699-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
46700+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
46701
46702 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
46703 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
46704diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
46705index 050b3bb..79f62b9 100644
46706--- a/drivers/media/radio/radio-shark.c
46707+++ b/drivers/media/radio/radio-shark.c
46708@@ -79,7 +79,7 @@ struct shark_device {
46709 u32 last_val;
46710 };
46711
46712-static atomic_t shark_instance = ATOMIC_INIT(0);
46713+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
46714
46715 static void shark_write_val(struct snd_tea575x *tea, u32 val)
46716 {
46717diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
46718index 8654e0d..0608a64 100644
46719--- a/drivers/media/radio/radio-shark2.c
46720+++ b/drivers/media/radio/radio-shark2.c
46721@@ -74,7 +74,7 @@ struct shark_device {
46722 u8 *transfer_buffer;
46723 };
46724
46725-static atomic_t shark_instance = ATOMIC_INIT(0);
46726+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
46727
46728 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
46729 {
46730diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
46731index 2fd9009..278cc1e 100644
46732--- a/drivers/media/radio/radio-si476x.c
46733+++ b/drivers/media/radio/radio-si476x.c
46734@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
46735 struct si476x_radio *radio;
46736 struct v4l2_ctrl *ctrl;
46737
46738- static atomic_t instance = ATOMIC_INIT(0);
46739+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
46740
46741 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
46742 if (!radio)
46743diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
46744index 9fd1527..8927230 100644
46745--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
46746+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
46747@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
46748
46749 static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
46750 {
46751- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
46752- char result[64];
46753- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
46754- sizeof(result), 0);
46755+ char *buf;
46756+ char *result;
46757+ int retval;
46758+
46759+ buf = kmalloc(2, GFP_KERNEL);
46760+ if (buf == NULL)
46761+ return -ENOMEM;
46762+ result = kmalloc(64, GFP_KERNEL);
46763+ if (result == NULL) {
46764+ kfree(buf);
46765+ return -ENOMEM;
46766+ }
46767+
46768+ buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
46769+ buf[1] = enable ? 1 : 0;
46770+
46771+ retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
46772+
46773+ kfree(buf);
46774+ kfree(result);
46775+ return retval;
46776 }
46777
46778 static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
46779 {
46780- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
46781- char state[3];
46782- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
46783+ char *buf;
46784+ char *state;
46785+ int retval;
46786+
46787+ buf = kmalloc(2, GFP_KERNEL);
46788+ if (buf == NULL)
46789+ return -ENOMEM;
46790+ state = kmalloc(3, GFP_KERNEL);
46791+ if (state == NULL) {
46792+ kfree(buf);
46793+ return -ENOMEM;
46794+ }
46795+
46796+ buf[0] = CINERGYT2_EP1_SLEEP_MODE;
46797+ buf[1] = enable ? 1 : 0;
46798+
46799+ retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
46800+
46801+ kfree(buf);
46802+ kfree(state);
46803+ return retval;
46804 }
46805
46806 static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
46807 {
46808- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
46809- char state[3];
46810+ char *query;
46811+ char *state;
46812 int ret;
46813+ query = kmalloc(1, GFP_KERNEL);
46814+ if (query == NULL)
46815+ return -ENOMEM;
46816+ state = kmalloc(3, GFP_KERNEL);
46817+ if (state == NULL) {
46818+ kfree(query);
46819+ return -ENOMEM;
46820+ }
46821+
46822+ query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
46823
46824 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
46825
46826- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
46827- sizeof(state), 0);
46828+ ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
46829 if (ret < 0) {
46830 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
46831 "state info\n");
46832@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
46833
46834 /* Copy this pointer as we are gonna need it in the release phase */
46835 cinergyt2_usb_device = adap->dev;
46836-
46837+ kfree(query);
46838+ kfree(state);
46839 return 0;
46840 }
46841
46842@@ -141,12 +186,23 @@ static int repeatable_keys[] = {
46843 static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46844 {
46845 struct cinergyt2_state *st = d->priv;
46846- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
46847+ u8 *key, *cmd;
46848 int i;
46849
46850+ cmd = kmalloc(1, GFP_KERNEL);
46851+ if (cmd == NULL)
46852+ return -EINVAL;
46853+ key = kzalloc(5, GFP_KERNEL);
46854+ if (key == NULL) {
46855+ kfree(cmd);
46856+ return -EINVAL;
46857+ }
46858+
46859+ cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
46860+
46861 *state = REMOTE_NO_KEY_PRESSED;
46862
46863- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
46864+ dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
46865 if (key[4] == 0xff) {
46866 /* key repeat */
46867 st->rc_counter++;
46868@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46869 *event = d->last_event;
46870 deb_rc("repeat key, event %x\n",
46871 *event);
46872- return 0;
46873+ goto out;
46874 }
46875 }
46876 deb_rc("repeated key (non repeatable)\n");
46877 }
46878- return 0;
46879+ goto out;
46880 }
46881
46882 /* hack to pass checksum on the custom field */
46883@@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46884
46885 deb_rc("key: %*ph\n", 5, key);
46886 }
46887+out:
46888+ kfree(cmd);
46889+ kfree(key);
46890 return 0;
46891 }
46892
46893diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46894index c890fe4..f9b2ae6 100644
46895--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46896+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46897@@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
46898 fe_status_t *status)
46899 {
46900 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46901- struct dvbt_get_status_msg result;
46902- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46903+ struct dvbt_get_status_msg *result;
46904+ u8 *cmd;
46905 int ret;
46906
46907- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
46908- sizeof(result), 0);
46909+ cmd = kmalloc(1, GFP_KERNEL);
46910+ if (cmd == NULL)
46911+ return -ENOMEM;
46912+ result = kmalloc(sizeof(*result), GFP_KERNEL);
46913+ if (result == NULL) {
46914+ kfree(cmd);
46915+ return -ENOMEM;
46916+ }
46917+
46918+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46919+
46920+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
46921+ sizeof(*result), 0);
46922 if (ret < 0)
46923- return ret;
46924+ goto out;
46925
46926 *status = 0;
46927
46928- if (0xffff - le16_to_cpu(result.gain) > 30)
46929+ if (0xffff - le16_to_cpu(result->gain) > 30)
46930 *status |= FE_HAS_SIGNAL;
46931- if (result.lock_bits & (1 << 6))
46932+ if (result->lock_bits & (1 << 6))
46933 *status |= FE_HAS_LOCK;
46934- if (result.lock_bits & (1 << 5))
46935+ if (result->lock_bits & (1 << 5))
46936 *status |= FE_HAS_SYNC;
46937- if (result.lock_bits & (1 << 4))
46938+ if (result->lock_bits & (1 << 4))
46939 *status |= FE_HAS_CARRIER;
46940- if (result.lock_bits & (1 << 1))
46941+ if (result->lock_bits & (1 << 1))
46942 *status |= FE_HAS_VITERBI;
46943
46944 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
46945 (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
46946 *status &= ~FE_HAS_LOCK;
46947
46948- return 0;
46949+out:
46950+ kfree(cmd);
46951+ kfree(result);
46952+ return ret;
46953 }
46954
46955 static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
46956 {
46957 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46958- struct dvbt_get_status_msg status;
46959- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46960+ struct dvbt_get_status_msg *status;
46961+ char *cmd;
46962 int ret;
46963
46964- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46965- sizeof(status), 0);
46966+ cmd = kmalloc(1, GFP_KERNEL);
46967+ if (cmd == NULL)
46968+ return -ENOMEM;
46969+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46970+ if (status == NULL) {
46971+ kfree(cmd);
46972+ return -ENOMEM;
46973+ }
46974+
46975+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46976+
46977+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46978+ sizeof(*status), 0);
46979 if (ret < 0)
46980- return ret;
46981+ goto out;
46982
46983- *ber = le32_to_cpu(status.viterbi_error_rate);
46984+ *ber = le32_to_cpu(status->viterbi_error_rate);
46985+out:
46986+ kfree(cmd);
46987+ kfree(status);
46988 return 0;
46989 }
46990
46991 static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
46992 {
46993 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46994- struct dvbt_get_status_msg status;
46995- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46996+ struct dvbt_get_status_msg *status;
46997+ u8 *cmd;
46998 int ret;
46999
47000- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
47001- sizeof(status), 0);
47002+ cmd = kmalloc(1, GFP_KERNEL);
47003+ if (cmd == NULL)
47004+ return -ENOMEM;
47005+ status = kmalloc(sizeof(*status), GFP_KERNEL);
47006+ if (status == NULL) {
47007+ kfree(cmd);
47008+ return -ENOMEM;
47009+ }
47010+
47011+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
47012+
47013+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
47014+ sizeof(*status), 0);
47015 if (ret < 0) {
47016 err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
47017 ret);
47018- return ret;
47019+ goto out;
47020 }
47021- *unc = le32_to_cpu(status.uncorrected_block_count);
47022- return 0;
47023+ *unc = le32_to_cpu(status->uncorrected_block_count);
47024+
47025+out:
47026+ kfree(cmd);
47027+ kfree(status);
47028+ return ret;
47029 }
47030
47031 static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
47032 u16 *strength)
47033 {
47034 struct cinergyt2_fe_state *state = fe->demodulator_priv;
47035- struct dvbt_get_status_msg status;
47036- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
47037+ struct dvbt_get_status_msg *status;
47038+ char *cmd;
47039 int ret;
47040
47041- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
47042- sizeof(status), 0);
47043+ cmd = kmalloc(1, GFP_KERNEL);
47044+ if (cmd == NULL)
47045+ return -ENOMEM;
47046+ status = kmalloc(sizeof(*status), GFP_KERNEL);
47047+ if (status == NULL) {
47048+ kfree(cmd);
47049+ return -ENOMEM;
47050+ }
47051+
47052+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
47053+
47054+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
47055+ sizeof(*status), 0);
47056 if (ret < 0) {
47057 err("cinergyt2_fe_read_signal_strength() Failed!"
47058 " (Error=%d)\n", ret);
47059- return ret;
47060+ goto out;
47061 }
47062- *strength = (0xffff - le16_to_cpu(status.gain));
47063+ *strength = (0xffff - le16_to_cpu(status->gain));
47064+
47065+out:
47066+ kfree(cmd);
47067+ kfree(status);
47068 return 0;
47069 }
47070
47071 static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
47072 {
47073 struct cinergyt2_fe_state *state = fe->demodulator_priv;
47074- struct dvbt_get_status_msg status;
47075- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
47076+ struct dvbt_get_status_msg *status;
47077+ char *cmd;
47078 int ret;
47079
47080- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
47081- sizeof(status), 0);
47082+ cmd = kmalloc(1, GFP_KERNEL);
47083+ if (cmd == NULL)
47084+ return -ENOMEM;
47085+ status = kmalloc(sizeof(*status), GFP_KERNEL);
47086+ if (status == NULL) {
47087+ kfree(cmd);
47088+ return -ENOMEM;
47089+ }
47090+
47091+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
47092+
47093+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
47094+ sizeof(*status), 0);
47095 if (ret < 0) {
47096 err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
47097- return ret;
47098+ goto out;
47099 }
47100- *snr = (status.snr << 8) | status.snr;
47101- return 0;
47102+ *snr = (status->snr << 8) | status->snr;
47103+
47104+out:
47105+ kfree(cmd);
47106+ kfree(status);
47107+ return ret;
47108 }
47109
47110 static int cinergyt2_fe_init(struct dvb_frontend *fe)
47111@@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
47112 {
47113 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
47114 struct cinergyt2_fe_state *state = fe->demodulator_priv;
47115- struct dvbt_set_parameters_msg param;
47116- char result[2];
47117+ struct dvbt_set_parameters_msg *param;
47118+ char *result;
47119 int err;
47120
47121- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
47122- param.tps = cpu_to_le16(compute_tps(fep));
47123- param.freq = cpu_to_le32(fep->frequency / 1000);
47124- param.flags = 0;
47125+ result = kmalloc(2, GFP_KERNEL);
47126+ if (result == NULL)
47127+ return -ENOMEM;
47128+ param = kmalloc(sizeof(*param), GFP_KERNEL);
47129+ if (param == NULL) {
47130+ kfree(result);
47131+ return -ENOMEM;
47132+ }
47133+
47134+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
47135+ param->tps = cpu_to_le16(compute_tps(fep));
47136+ param->freq = cpu_to_le32(fep->frequency / 1000);
47137+ param->flags = 0;
47138
47139 switch (fep->bandwidth_hz) {
47140 default:
47141 case 8000000:
47142- param.bandwidth = 8;
47143+ param->bandwidth = 8;
47144 break;
47145 case 7000000:
47146- param.bandwidth = 7;
47147+ param->bandwidth = 7;
47148 break;
47149 case 6000000:
47150- param.bandwidth = 6;
47151+ param->bandwidth = 6;
47152 break;
47153 }
47154
47155 err = dvb_usb_generic_rw(state->d,
47156- (char *)&param, sizeof(param),
47157- result, sizeof(result), 0);
47158+ (char *)param, sizeof(*param),
47159+ result, 2, 0);
47160 if (err < 0)
47161 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
47162
47163- return (err < 0) ? err : 0;
47164+ kfree(result);
47165+ kfree(param);
47166+ return err;
47167 }
47168
47169 static void cinergyt2_fe_release(struct dvb_frontend *fe)
47170diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
47171index a1c641e..3007da9 100644
47172--- a/drivers/media/usb/dvb-usb/cxusb.c
47173+++ b/drivers/media/usb/dvb-usb/cxusb.c
47174@@ -1112,7 +1112,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
47175
47176 struct dib0700_adapter_state {
47177 int (*set_param_save) (struct dvb_frontend *);
47178-};
47179+} __no_const;
47180
47181 static int dib7070_set_param_override(struct dvb_frontend *fe)
47182 {
47183diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
47184index 733a7ff..f8b52e3 100644
47185--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
47186+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
47187@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
47188
47189 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
47190 {
47191- struct hexline hx;
47192- u8 reset;
47193+ struct hexline *hx;
47194+ u8 *reset;
47195 int ret,pos=0;
47196
47197+ reset = kmalloc(1, GFP_KERNEL);
47198+ if (reset == NULL)
47199+ return -ENOMEM;
47200+
47201+ hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
47202+ if (hx == NULL) {
47203+ kfree(reset);
47204+ return -ENOMEM;
47205+ }
47206+
47207 /* stop the CPU */
47208- reset = 1;
47209- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
47210+ reset[0] = 1;
47211+ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
47212 err("could not stop the USB controller CPU.");
47213
47214- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
47215- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
47216- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
47217+ while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
47218+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
47219+ ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
47220
47221- if (ret != hx.len) {
47222+ if (ret != hx->len) {
47223 err("error while transferring firmware "
47224 "(transferred size: %d, block size: %d)",
47225- ret,hx.len);
47226+ ret,hx->len);
47227 ret = -EINVAL;
47228 break;
47229 }
47230 }
47231 if (ret < 0) {
47232 err("firmware download failed at %d with %d",pos,ret);
47233+ kfree(reset);
47234+ kfree(hx);
47235 return ret;
47236 }
47237
47238 if (ret == 0) {
47239 /* restart the CPU */
47240- reset = 0;
47241- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
47242+ reset[0] = 0;
47243+ if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
47244 err("could not restart the USB controller CPU.");
47245 ret = -EINVAL;
47246 }
47247 } else
47248 ret = -EIO;
47249
47250+ kfree(reset);
47251+ kfree(hx);
47252+
47253 return ret;
47254 }
47255 EXPORT_SYMBOL(usb_cypress_load_firmware);
47256diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
47257index ae0f56a..ec71784 100644
47258--- a/drivers/media/usb/dvb-usb/dw2102.c
47259+++ b/drivers/media/usb/dvb-usb/dw2102.c
47260@@ -118,7 +118,7 @@ struct su3000_state {
47261
47262 struct s6x0_state {
47263 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
47264-};
47265+} __no_const;
47266
47267 /* debug */
47268 static int dvb_usb_dw2102_debug;
47269diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
47270index d947e03..87fef42 100644
47271--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
47272+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
47273@@ -87,8 +87,11 @@ struct technisat_usb2_state {
47274 static int technisat_usb2_i2c_access(struct usb_device *udev,
47275 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
47276 {
47277- u8 b[64];
47278- int ret, actual_length;
47279+ u8 *b = kmalloc(64, GFP_KERNEL);
47280+ int ret, actual_length, error = 0;
47281+
47282+ if (b == NULL)
47283+ return -ENOMEM;
47284
47285 deb_i2c("i2c-access: %02x, tx: ", device_addr);
47286 debug_dump(tx, txlen, deb_i2c);
47287@@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47288
47289 if (ret < 0) {
47290 err("i2c-error: out failed %02x = %d", device_addr, ret);
47291- return -ENODEV;
47292+ error = -ENODEV;
47293+ goto out;
47294 }
47295
47296 ret = usb_bulk_msg(udev,
47297@@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47298 b, 64, &actual_length, 1000);
47299 if (ret < 0) {
47300 err("i2c-error: in failed %02x = %d", device_addr, ret);
47301- return -ENODEV;
47302+ error = -ENODEV;
47303+ goto out;
47304 }
47305
47306 if (b[0] != I2C_STATUS_OK) {
47307@@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47308 /* handle tuner-i2c-nak */
47309 if (!(b[0] == I2C_STATUS_NAK &&
47310 device_addr == 0x60
47311- /* && device_is_technisat_usb2 */))
47312- return -ENODEV;
47313+ /* && device_is_technisat_usb2 */)) {
47314+ error = -ENODEV;
47315+ goto out;
47316+ }
47317 }
47318
47319 deb_i2c("status: %d, ", b[0]);
47320@@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47321
47322 deb_i2c("\n");
47323
47324- return 0;
47325+out:
47326+ kfree(b);
47327+ return error;
47328 }
47329
47330 static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
47331@@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
47332 {
47333 int ret;
47334
47335- u8 led[8] = {
47336- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
47337- 0
47338- };
47339+ u8 *led = kzalloc(8, GFP_KERNEL);
47340+
47341+ if (led == NULL)
47342+ return -ENOMEM;
47343
47344 if (disable_led_control && state != TECH_LED_OFF)
47345 return 0;
47346
47347+ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
47348+
47349 switch (state) {
47350 case TECH_LED_ON:
47351 led[1] = 0x82;
47352@@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
47353 red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
47354 USB_TYPE_VENDOR | USB_DIR_OUT,
47355 0, 0,
47356- led, sizeof(led), 500);
47357+ led, 8, 500);
47358
47359 mutex_unlock(&d->i2c_mutex);
47360+
47361+ kfree(led);
47362+
47363 return ret;
47364 }
47365
47366 static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
47367 {
47368 int ret;
47369- u8 b = 0;
47370+ u8 *b = kzalloc(1, GFP_KERNEL);
47371+
47372+ if (b == NULL)
47373+ return -ENOMEM;
47374
47375 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
47376 return -EAGAIN;
47377@@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
47378 SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
47379 USB_TYPE_VENDOR | USB_DIR_OUT,
47380 (red << 8) | green, 0,
47381- &b, 1, 500);
47382+ b, 1, 500);
47383
47384 mutex_unlock(&d->i2c_mutex);
47385
47386+ kfree(b);
47387+
47388 return ret;
47389 }
47390
47391@@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47392 struct dvb_usb_device_description **desc, int *cold)
47393 {
47394 int ret;
47395- u8 version[3];
47396+ u8 *version = kmalloc(3, GFP_KERNEL);
47397
47398 /* first select the interface */
47399 if (usb_set_interface(udev, 0, 1) != 0)
47400@@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47401
47402 *cold = 0; /* by default do not download a firmware - just in case something is wrong */
47403
47404+ if (version == NULL)
47405+ return 0;
47406+
47407 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
47408 GET_VERSION_INFO_VENDOR_REQUEST,
47409 USB_TYPE_VENDOR | USB_DIR_IN,
47410 0, 0,
47411- version, sizeof(version), 500);
47412+ version, 3, 500);
47413
47414 if (ret < 0)
47415 *cold = 1;
47416@@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47417 *cold = 0;
47418 }
47419
47420+ kfree(version);
47421+
47422 return 0;
47423 }
47424
47425@@ -591,10 +615,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
47426
47427 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
47428 {
47429- u8 buf[62], *b;
47430+ u8 *buf, *b;
47431 int ret;
47432 struct ir_raw_event ev;
47433
47434+ buf = kmalloc(62, GFP_KERNEL);
47435+
47436+ if (buf == NULL)
47437+ return -ENOMEM;
47438+
47439 buf[0] = GET_IR_DATA_VENDOR_REQUEST;
47440 buf[1] = 0x08;
47441 buf[2] = 0x8f;
47442@@ -617,16 +646,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
47443 GET_IR_DATA_VENDOR_REQUEST,
47444 USB_TYPE_VENDOR | USB_DIR_IN,
47445 0x8080, 0,
47446- buf, sizeof(buf), 500);
47447+ buf, 62, 500);
47448
47449 unlock:
47450 mutex_unlock(&d->i2c_mutex);
47451
47452- if (ret < 0)
47453+ if (ret < 0) {
47454+ kfree(buf);
47455 return ret;
47456+ }
47457
47458- if (ret == 1)
47459+ if (ret == 1) {
47460+ kfree(buf);
47461 return 0; /* no key pressed */
47462+ }
47463
47464 /* decoding */
47465 b = buf+1;
47466@@ -653,6 +686,8 @@ unlock:
47467
47468 ir_raw_event_handle(d->rc_dev);
47469
47470+ kfree(buf);
47471+
47472 return 1;
47473 }
47474
47475diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47476index 7e2411c..cef73ca 100644
47477--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47478+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47479@@ -328,7 +328,7 @@ struct v4l2_buffer32 {
47480 __u32 reserved;
47481 };
47482
47483-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
47484+static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
47485 enum v4l2_memory memory)
47486 {
47487 void __user *up_pln;
47488@@ -357,7 +357,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
47489 return 0;
47490 }
47491
47492-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
47493+static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
47494 enum v4l2_memory memory)
47495 {
47496 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
47497@@ -427,7 +427,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
47498 * by passing a very big num_planes value */
47499 uplane = compat_alloc_user_space(num_planes *
47500 sizeof(struct v4l2_plane));
47501- kp->m.planes = uplane;
47502+ kp->m.planes = (struct v4l2_plane __force_kernel *)uplane;
47503
47504 while (--num_planes >= 0) {
47505 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
47506@@ -498,7 +498,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
47507 if (num_planes == 0)
47508 return 0;
47509
47510- uplane = kp->m.planes;
47511+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
47512 if (get_user(p, &up->m.planes))
47513 return -EFAULT;
47514 uplane32 = compat_ptr(p);
47515@@ -552,7 +552,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
47516 get_user(kp->capability, &up->capability) ||
47517 get_user(kp->flags, &up->flags))
47518 return -EFAULT;
47519- kp->base = compat_ptr(tmp);
47520+ kp->base = (void __force_kernel *)compat_ptr(tmp);
47521 get_v4l2_pix_format(&kp->fmt, &up->fmt);
47522 return 0;
47523 }
47524@@ -658,7 +658,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
47525 n * sizeof(struct v4l2_ext_control32)))
47526 return -EFAULT;
47527 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
47528- kp->controls = kcontrols;
47529+ kp->controls = (struct v4l2_ext_control __force_kernel *)kcontrols;
47530 while (--n >= 0) {
47531 if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
47532 return -EFAULT;
47533@@ -680,7 +680,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
47534 static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
47535 {
47536 struct v4l2_ext_control32 __user *ucontrols;
47537- struct v4l2_ext_control __user *kcontrols = kp->controls;
47538+ struct v4l2_ext_control __user *kcontrols = (struct v4l2_ext_control __force_user *)kp->controls;
47539 int n = kp->count;
47540 compat_caddr_t p;
47541
47542@@ -774,7 +774,7 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
47543 put_user(kp->start_block, &up->start_block) ||
47544 put_user(kp->blocks, &up->blocks) ||
47545 put_user(tmp, &up->edid) ||
47546- copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
47547+ copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
47548 return -EFAULT;
47549 return 0;
47550 }
47551diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
47552index 55c6832..a91c7a6 100644
47553--- a/drivers/media/v4l2-core/v4l2-ctrls.c
47554+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
47555@@ -1431,8 +1431,8 @@ static int validate_new(const struct v4l2_ctrl *ctrl,
47556 return 0;
47557
47558 case V4L2_CTRL_TYPE_STRING:
47559- len = strlen(c->string);
47560- if (len < ctrl->minimum)
47561+ len = strlen_user(c->string);
47562+ if (!len || len < ctrl->minimum)
47563 return -ERANGE;
47564 if ((len - ctrl->minimum) % ctrl->step)
47565 return -ERANGE;
47566diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
47567index 015f92a..59e311e 100644
47568--- a/drivers/media/v4l2-core/v4l2-device.c
47569+++ b/drivers/media/v4l2-core/v4l2-device.c
47570@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
47571 EXPORT_SYMBOL_GPL(v4l2_device_put);
47572
47573 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
47574- atomic_t *instance)
47575+ atomic_unchecked_t *instance)
47576 {
47577- int num = atomic_inc_return(instance) - 1;
47578+ int num = atomic_inc_return_unchecked(instance) - 1;
47579 int len = strlen(basename);
47580
47581 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
47582diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
47583index 16bffd8..3ab516a 100644
47584--- a/drivers/media/v4l2-core/v4l2-ioctl.c
47585+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
47586@@ -2003,7 +2003,8 @@ struct v4l2_ioctl_info {
47587 struct file *file, void *fh, void *p);
47588 } u;
47589 void (*debug)(const void *arg, bool write_only);
47590-};
47591+} __do_const;
47592+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
47593
47594 /* This control needs a priority check */
47595 #define INFO_FL_PRIO (1 << 0)
47596@@ -2186,7 +2187,7 @@ static long __video_do_ioctl(struct file *file,
47597 struct video_device *vfd = video_devdata(file);
47598 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
47599 bool write_only = false;
47600- struct v4l2_ioctl_info default_info;
47601+ v4l2_ioctl_info_no_const default_info;
47602 const struct v4l2_ioctl_info *info;
47603 void *fh = file->private_data;
47604 struct v4l2_fh *vfh = NULL;
47605@@ -2276,7 +2277,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47606 ret = -EINVAL;
47607 break;
47608 }
47609- *user_ptr = (void __user *)buf->m.planes;
47610+ *user_ptr = (void __force_user *)buf->m.planes;
47611 *kernel_ptr = (void **)&buf->m.planes;
47612 *array_size = sizeof(struct v4l2_plane) * buf->length;
47613 ret = 1;
47614@@ -2293,7 +2294,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47615 ret = -EINVAL;
47616 break;
47617 }
47618- *user_ptr = (void __user *)edid->edid;
47619+ *user_ptr = (void __force_user *)edid->edid;
47620 *kernel_ptr = (void **)&edid->edid;
47621 *array_size = edid->blocks * 128;
47622 ret = 1;
47623@@ -2311,7 +2312,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47624 ret = -EINVAL;
47625 break;
47626 }
47627- *user_ptr = (void __user *)ctrls->controls;
47628+ *user_ptr = (void __force_user *)ctrls->controls;
47629 *kernel_ptr = (void **)&ctrls->controls;
47630 *array_size = sizeof(struct v4l2_ext_control)
47631 * ctrls->count;
47632@@ -2412,7 +2413,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
47633 }
47634
47635 if (has_array_args) {
47636- *kernel_ptr = (void __force *)user_ptr;
47637+ *kernel_ptr = (void __force_kernel *)user_ptr;
47638 if (copy_to_user(user_ptr, mbuf, array_size))
47639 err = -EFAULT;
47640 goto out_array_args;
47641diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
47642index ebc0af7..baed058 100644
47643--- a/drivers/message/fusion/mptbase.c
47644+++ b/drivers/message/fusion/mptbase.c
47645@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
47646 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
47647 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
47648
47649+#ifdef CONFIG_GRKERNSEC_HIDESYM
47650+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
47651+#else
47652 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
47653 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
47654+#endif
47655+
47656 /*
47657 * Rounding UP to nearest 4-kB boundary here...
47658 */
47659@@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
47660 ioc->facts.GlobalCredits);
47661
47662 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
47663+#ifdef CONFIG_GRKERNSEC_HIDESYM
47664+ NULL, NULL);
47665+#else
47666 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
47667+#endif
47668 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
47669 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
47670 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
47671diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
47672index 711fcb5..5da1fb0 100644
47673--- a/drivers/message/fusion/mptsas.c
47674+++ b/drivers/message/fusion/mptsas.c
47675@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
47676 return 0;
47677 }
47678
47679+static inline void
47680+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
47681+{
47682+ if (phy_info->port_details) {
47683+ phy_info->port_details->rphy = rphy;
47684+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
47685+ ioc->name, rphy));
47686+ }
47687+
47688+ if (rphy) {
47689+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
47690+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
47691+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
47692+ ioc->name, rphy, rphy->dev.release));
47693+ }
47694+}
47695+
47696 /* no mutex */
47697 static void
47698 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
47699@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
47700 return NULL;
47701 }
47702
47703-static inline void
47704-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
47705-{
47706- if (phy_info->port_details) {
47707- phy_info->port_details->rphy = rphy;
47708- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
47709- ioc->name, rphy));
47710- }
47711-
47712- if (rphy) {
47713- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
47714- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
47715- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
47716- ioc->name, rphy, rphy->dev.release));
47717- }
47718-}
47719-
47720 static inline struct sas_port *
47721 mptsas_get_port(struct mptsas_phyinfo *phy_info)
47722 {
47723diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
47724index 2a1c6f2..a04c6a2 100644
47725--- a/drivers/message/fusion/mptscsih.c
47726+++ b/drivers/message/fusion/mptscsih.c
47727@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
47728
47729 h = shost_priv(SChost);
47730
47731- if (h) {
47732- if (h->info_kbuf == NULL)
47733- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
47734- return h->info_kbuf;
47735- h->info_kbuf[0] = '\0';
47736+ if (!h)
47737+ return NULL;
47738
47739- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
47740- h->info_kbuf[size-1] = '\0';
47741- }
47742+ if (h->info_kbuf == NULL)
47743+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
47744+ return h->info_kbuf;
47745+ h->info_kbuf[0] = '\0';
47746+
47747+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
47748+ h->info_kbuf[size-1] = '\0';
47749
47750 return h->info_kbuf;
47751 }
47752diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
47753index b7d87cd..3fb36da 100644
47754--- a/drivers/message/i2o/i2o_proc.c
47755+++ b/drivers/message/i2o/i2o_proc.c
47756@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
47757 "Array Controller Device"
47758 };
47759
47760-static char *chtostr(char *tmp, u8 *chars, int n)
47761-{
47762- tmp[0] = 0;
47763- return strncat(tmp, (char *)chars, n);
47764-}
47765-
47766 static int i2o_report_query_status(struct seq_file *seq, int block_status,
47767 char *group)
47768 {
47769@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
47770 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
47771 {
47772 struct i2o_controller *c = (struct i2o_controller *)seq->private;
47773- static u32 work32[5];
47774- static u8 *work8 = (u8 *) work32;
47775- static u16 *work16 = (u16 *) work32;
47776+ u32 work32[5];
47777+ u8 *work8 = (u8 *) work32;
47778+ u16 *work16 = (u16 *) work32;
47779 int token;
47780 u32 hwcap;
47781
47782@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
47783 } *result;
47784
47785 i2o_exec_execute_ddm_table ddm_table;
47786- char tmp[28 + 1];
47787
47788 result = kmalloc(sizeof(*result), GFP_KERNEL);
47789 if (!result)
47790@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
47791
47792 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
47793 seq_printf(seq, "%-#8x", ddm_table.module_id);
47794- seq_printf(seq, "%-29s",
47795- chtostr(tmp, ddm_table.module_name_version, 28));
47796+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
47797 seq_printf(seq, "%9d ", ddm_table.data_size);
47798 seq_printf(seq, "%8d", ddm_table.code_size);
47799
47800@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
47801
47802 i2o_driver_result_table *result;
47803 i2o_driver_store_table *dst;
47804- char tmp[28 + 1];
47805
47806 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
47807 if (result == NULL)
47808@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
47809
47810 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
47811 seq_printf(seq, "%-#8x", dst->module_id);
47812- seq_printf(seq, "%-29s",
47813- chtostr(tmp, dst->module_name_version, 28));
47814- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
47815+ seq_printf(seq, "%-.28s", dst->module_name_version);
47816+ seq_printf(seq, "%-.8s", dst->date);
47817 seq_printf(seq, "%8d ", dst->module_size);
47818 seq_printf(seq, "%8d ", dst->mpb_size);
47819 seq_printf(seq, "0x%04x", dst->module_flags);
47820@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
47821 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
47822 {
47823 struct i2o_device *d = (struct i2o_device *)seq->private;
47824- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
47825+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
47826 // == (allow) 512d bytes (max)
47827- static u16 *work16 = (u16 *) work32;
47828+ u16 *work16 = (u16 *) work32;
47829 int token;
47830- char tmp[16 + 1];
47831
47832 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
47833
47834@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
47835 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
47836 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
47837 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
47838- seq_printf(seq, "Vendor info : %s\n",
47839- chtostr(tmp, (u8 *) (work32 + 2), 16));
47840- seq_printf(seq, "Product info : %s\n",
47841- chtostr(tmp, (u8 *) (work32 + 6), 16));
47842- seq_printf(seq, "Description : %s\n",
47843- chtostr(tmp, (u8 *) (work32 + 10), 16));
47844- seq_printf(seq, "Product rev. : %s\n",
47845- chtostr(tmp, (u8 *) (work32 + 14), 8));
47846+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
47847+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
47848+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
47849+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
47850
47851 seq_printf(seq, "Serial number : ");
47852 print_serial_number(seq, (u8 *) (work32 + 16),
47853@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
47854 u8 pad[256]; // allow up to 256 byte (max) serial number
47855 } result;
47856
47857- char tmp[24 + 1];
47858-
47859 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
47860
47861 if (token < 0) {
47862@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
47863 }
47864
47865 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
47866- seq_printf(seq, "Module name : %s\n",
47867- chtostr(tmp, result.module_name, 24));
47868- seq_printf(seq, "Module revision : %s\n",
47869- chtostr(tmp, result.module_rev, 8));
47870+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
47871+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
47872
47873 seq_printf(seq, "Serial number : ");
47874 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
47875@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
47876 u8 instance_number[4];
47877 } result;
47878
47879- char tmp[64 + 1];
47880-
47881 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
47882
47883 if (token < 0) {
47884@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
47885 return 0;
47886 }
47887
47888- seq_printf(seq, "Device name : %s\n",
47889- chtostr(tmp, result.device_name, 64));
47890- seq_printf(seq, "Service name : %s\n",
47891- chtostr(tmp, result.service_name, 64));
47892- seq_printf(seq, "Physical name : %s\n",
47893- chtostr(tmp, result.physical_location, 64));
47894- seq_printf(seq, "Instance number : %s\n",
47895- chtostr(tmp, result.instance_number, 4));
47896+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
47897+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
47898+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
47899+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
47900
47901 return 0;
47902 }
47903@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
47904 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
47905 {
47906 struct i2o_device *d = (struct i2o_device *)seq->private;
47907- static u32 work32[12];
47908- static u16 *work16 = (u16 *) work32;
47909- static u8 *work8 = (u8 *) work32;
47910+ u32 work32[12];
47911+ u16 *work16 = (u16 *) work32;
47912+ u8 *work8 = (u8 *) work32;
47913 int token;
47914
47915 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
47916diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
47917index 92752fb..a7494f6 100644
47918--- a/drivers/message/i2o/iop.c
47919+++ b/drivers/message/i2o/iop.c
47920@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
47921
47922 spin_lock_irqsave(&c->context_list_lock, flags);
47923
47924- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
47925- atomic_inc(&c->context_list_counter);
47926+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
47927+ atomic_inc_unchecked(&c->context_list_counter);
47928
47929- entry->context = atomic_read(&c->context_list_counter);
47930+ entry->context = atomic_read_unchecked(&c->context_list_counter);
47931
47932 list_add(&entry->list, &c->context_list);
47933
47934@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
47935
47936 #if BITS_PER_LONG == 64
47937 spin_lock_init(&c->context_list_lock);
47938- atomic_set(&c->context_list_counter, 0);
47939+ atomic_set_unchecked(&c->context_list_counter, 0);
47940 INIT_LIST_HEAD(&c->context_list);
47941 #endif
47942
47943diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
47944index d1a22aa..d0f7bf7 100644
47945--- a/drivers/mfd/ab8500-debugfs.c
47946+++ b/drivers/mfd/ab8500-debugfs.c
47947@@ -100,7 +100,7 @@ static int irq_last;
47948 static u32 *irq_count;
47949 static int num_irqs;
47950
47951-static struct device_attribute **dev_attr;
47952+static device_attribute_no_const **dev_attr;
47953 static char **event_name;
47954
47955 static u8 avg_sample = SAMPLE_16;
47956diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
47957index a83eed5..62a58a9 100644
47958--- a/drivers/mfd/max8925-i2c.c
47959+++ b/drivers/mfd/max8925-i2c.c
47960@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
47961 const struct i2c_device_id *id)
47962 {
47963 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
47964- static struct max8925_chip *chip;
47965+ struct max8925_chip *chip;
47966 struct device_node *node = client->dev.of_node;
47967
47968 if (node && !pdata) {
47969diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
47970index f9e42ea..614d240 100644
47971--- a/drivers/mfd/tps65910.c
47972+++ b/drivers/mfd/tps65910.c
47973@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
47974 struct tps65910_platform_data *pdata)
47975 {
47976 int ret = 0;
47977- static struct regmap_irq_chip *tps6591x_irqs_chip;
47978+ struct regmap_irq_chip *tps6591x_irqs_chip;
47979
47980 if (!irq) {
47981 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
47982diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
47983index 596b1f6..5b6ab74 100644
47984--- a/drivers/mfd/twl4030-irq.c
47985+++ b/drivers/mfd/twl4030-irq.c
47986@@ -34,6 +34,7 @@
47987 #include <linux/of.h>
47988 #include <linux/irqdomain.h>
47989 #include <linux/i2c/twl.h>
47990+#include <asm/pgtable.h>
47991
47992 #include "twl-core.h"
47993
47994@@ -725,10 +726,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
47995 * Install an irq handler for each of the SIH modules;
47996 * clone dummy irq_chip since PIH can't *do* anything
47997 */
47998- twl4030_irq_chip = dummy_irq_chip;
47999- twl4030_irq_chip.name = "twl4030";
48000+ pax_open_kernel();
48001+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
48002+ *(const char **)&twl4030_irq_chip.name = "twl4030";
48003
48004- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
48005+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
48006+ pax_close_kernel();
48007
48008 for (i = irq_base; i < irq_end; i++) {
48009 irq_set_chip_and_handler(i, &twl4030_irq_chip,
48010diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
48011index 464419b..64bae8d 100644
48012--- a/drivers/misc/c2port/core.c
48013+++ b/drivers/misc/c2port/core.c
48014@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
48015 goto error_idr_alloc;
48016 c2dev->id = ret;
48017
48018- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
48019+ pax_open_kernel();
48020+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
48021+ pax_close_kernel();
48022
48023 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
48024 "c2port%d", c2dev->id);
48025diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
48026index 3f2b625..945e179 100644
48027--- a/drivers/misc/eeprom/sunxi_sid.c
48028+++ b/drivers/misc/eeprom/sunxi_sid.c
48029@@ -126,7 +126,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
48030
48031 platform_set_drvdata(pdev, sid_data);
48032
48033- sid_bin_attr.size = sid_data->keysize;
48034+ pax_open_kernel();
48035+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
48036+ pax_close_kernel();
48037 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
48038 return -ENODEV;
48039
48040diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
48041index 36f5d52..32311c3 100644
48042--- a/drivers/misc/kgdbts.c
48043+++ b/drivers/misc/kgdbts.c
48044@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
48045 char before[BREAK_INSTR_SIZE];
48046 char after[BREAK_INSTR_SIZE];
48047
48048- probe_kernel_read(before, (char *)kgdbts_break_test,
48049+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
48050 BREAK_INSTR_SIZE);
48051 init_simple_test();
48052 ts.tst = plant_and_detach_test;
48053@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
48054 /* Activate test with initial breakpoint */
48055 if (!is_early)
48056 kgdb_breakpoint();
48057- probe_kernel_read(after, (char *)kgdbts_break_test,
48058+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
48059 BREAK_INSTR_SIZE);
48060 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
48061 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
48062diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
48063index 3ef4627..8d00486 100644
48064--- a/drivers/misc/lis3lv02d/lis3lv02d.c
48065+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
48066@@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
48067 * the lid is closed. This leads to interrupts as soon as a little move
48068 * is done.
48069 */
48070- atomic_inc(&lis3->count);
48071+ atomic_inc_unchecked(&lis3->count);
48072
48073 wake_up_interruptible(&lis3->misc_wait);
48074 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
48075@@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
48076 if (lis3->pm_dev)
48077 pm_runtime_get_sync(lis3->pm_dev);
48078
48079- atomic_set(&lis3->count, 0);
48080+ atomic_set_unchecked(&lis3->count, 0);
48081 return 0;
48082 }
48083
48084@@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
48085 add_wait_queue(&lis3->misc_wait, &wait);
48086 while (true) {
48087 set_current_state(TASK_INTERRUPTIBLE);
48088- data = atomic_xchg(&lis3->count, 0);
48089+ data = atomic_xchg_unchecked(&lis3->count, 0);
48090 if (data)
48091 break;
48092
48093@@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
48094 struct lis3lv02d, miscdev);
48095
48096 poll_wait(file, &lis3->misc_wait, wait);
48097- if (atomic_read(&lis3->count))
48098+ if (atomic_read_unchecked(&lis3->count))
48099 return POLLIN | POLLRDNORM;
48100 return 0;
48101 }
48102diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
48103index c439c82..1f20f57 100644
48104--- a/drivers/misc/lis3lv02d/lis3lv02d.h
48105+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
48106@@ -297,7 +297,7 @@ struct lis3lv02d {
48107 struct input_polled_dev *idev; /* input device */
48108 struct platform_device *pdev; /* platform device */
48109 struct regulator_bulk_data regulators[2];
48110- atomic_t count; /* interrupt count after last read */
48111+ atomic_unchecked_t count; /* interrupt count after last read */
48112 union axis_conversion ac; /* hw -> logical axis */
48113 int mapped_btns[3];
48114
48115diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
48116index 2f30bad..c4c13d0 100644
48117--- a/drivers/misc/sgi-gru/gruhandles.c
48118+++ b/drivers/misc/sgi-gru/gruhandles.c
48119@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
48120 unsigned long nsec;
48121
48122 nsec = CLKS2NSEC(clks);
48123- atomic_long_inc(&mcs_op_statistics[op].count);
48124- atomic_long_add(nsec, &mcs_op_statistics[op].total);
48125+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
48126+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
48127 if (mcs_op_statistics[op].max < nsec)
48128 mcs_op_statistics[op].max = nsec;
48129 }
48130diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
48131index 4f76359..cdfcb2e 100644
48132--- a/drivers/misc/sgi-gru/gruprocfs.c
48133+++ b/drivers/misc/sgi-gru/gruprocfs.c
48134@@ -32,9 +32,9 @@
48135
48136 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
48137
48138-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
48139+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
48140 {
48141- unsigned long val = atomic_long_read(v);
48142+ unsigned long val = atomic_long_read_unchecked(v);
48143
48144 seq_printf(s, "%16lu %s\n", val, id);
48145 }
48146@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
48147
48148 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
48149 for (op = 0; op < mcsop_last; op++) {
48150- count = atomic_long_read(&mcs_op_statistics[op].count);
48151- total = atomic_long_read(&mcs_op_statistics[op].total);
48152+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
48153+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
48154 max = mcs_op_statistics[op].max;
48155 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
48156 count ? total / count : 0, max);
48157diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
48158index 5c3ce24..4915ccb 100644
48159--- a/drivers/misc/sgi-gru/grutables.h
48160+++ b/drivers/misc/sgi-gru/grutables.h
48161@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
48162 * GRU statistics.
48163 */
48164 struct gru_stats_s {
48165- atomic_long_t vdata_alloc;
48166- atomic_long_t vdata_free;
48167- atomic_long_t gts_alloc;
48168- atomic_long_t gts_free;
48169- atomic_long_t gms_alloc;
48170- atomic_long_t gms_free;
48171- atomic_long_t gts_double_allocate;
48172- atomic_long_t assign_context;
48173- atomic_long_t assign_context_failed;
48174- atomic_long_t free_context;
48175- atomic_long_t load_user_context;
48176- atomic_long_t load_kernel_context;
48177- atomic_long_t lock_kernel_context;
48178- atomic_long_t unlock_kernel_context;
48179- atomic_long_t steal_user_context;
48180- atomic_long_t steal_kernel_context;
48181- atomic_long_t steal_context_failed;
48182- atomic_long_t nopfn;
48183- atomic_long_t asid_new;
48184- atomic_long_t asid_next;
48185- atomic_long_t asid_wrap;
48186- atomic_long_t asid_reuse;
48187- atomic_long_t intr;
48188- atomic_long_t intr_cbr;
48189- atomic_long_t intr_tfh;
48190- atomic_long_t intr_spurious;
48191- atomic_long_t intr_mm_lock_failed;
48192- atomic_long_t call_os;
48193- atomic_long_t call_os_wait_queue;
48194- atomic_long_t user_flush_tlb;
48195- atomic_long_t user_unload_context;
48196- atomic_long_t user_exception;
48197- atomic_long_t set_context_option;
48198- atomic_long_t check_context_retarget_intr;
48199- atomic_long_t check_context_unload;
48200- atomic_long_t tlb_dropin;
48201- atomic_long_t tlb_preload_page;
48202- atomic_long_t tlb_dropin_fail_no_asid;
48203- atomic_long_t tlb_dropin_fail_upm;
48204- atomic_long_t tlb_dropin_fail_invalid;
48205- atomic_long_t tlb_dropin_fail_range_active;
48206- atomic_long_t tlb_dropin_fail_idle;
48207- atomic_long_t tlb_dropin_fail_fmm;
48208- atomic_long_t tlb_dropin_fail_no_exception;
48209- atomic_long_t tfh_stale_on_fault;
48210- atomic_long_t mmu_invalidate_range;
48211- atomic_long_t mmu_invalidate_page;
48212- atomic_long_t flush_tlb;
48213- atomic_long_t flush_tlb_gru;
48214- atomic_long_t flush_tlb_gru_tgh;
48215- atomic_long_t flush_tlb_gru_zero_asid;
48216+ atomic_long_unchecked_t vdata_alloc;
48217+ atomic_long_unchecked_t vdata_free;
48218+ atomic_long_unchecked_t gts_alloc;
48219+ atomic_long_unchecked_t gts_free;
48220+ atomic_long_unchecked_t gms_alloc;
48221+ atomic_long_unchecked_t gms_free;
48222+ atomic_long_unchecked_t gts_double_allocate;
48223+ atomic_long_unchecked_t assign_context;
48224+ atomic_long_unchecked_t assign_context_failed;
48225+ atomic_long_unchecked_t free_context;
48226+ atomic_long_unchecked_t load_user_context;
48227+ atomic_long_unchecked_t load_kernel_context;
48228+ atomic_long_unchecked_t lock_kernel_context;
48229+ atomic_long_unchecked_t unlock_kernel_context;
48230+ atomic_long_unchecked_t steal_user_context;
48231+ atomic_long_unchecked_t steal_kernel_context;
48232+ atomic_long_unchecked_t steal_context_failed;
48233+ atomic_long_unchecked_t nopfn;
48234+ atomic_long_unchecked_t asid_new;
48235+ atomic_long_unchecked_t asid_next;
48236+ atomic_long_unchecked_t asid_wrap;
48237+ atomic_long_unchecked_t asid_reuse;
48238+ atomic_long_unchecked_t intr;
48239+ atomic_long_unchecked_t intr_cbr;
48240+ atomic_long_unchecked_t intr_tfh;
48241+ atomic_long_unchecked_t intr_spurious;
48242+ atomic_long_unchecked_t intr_mm_lock_failed;
48243+ atomic_long_unchecked_t call_os;
48244+ atomic_long_unchecked_t call_os_wait_queue;
48245+ atomic_long_unchecked_t user_flush_tlb;
48246+ atomic_long_unchecked_t user_unload_context;
48247+ atomic_long_unchecked_t user_exception;
48248+ atomic_long_unchecked_t set_context_option;
48249+ atomic_long_unchecked_t check_context_retarget_intr;
48250+ atomic_long_unchecked_t check_context_unload;
48251+ atomic_long_unchecked_t tlb_dropin;
48252+ atomic_long_unchecked_t tlb_preload_page;
48253+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
48254+ atomic_long_unchecked_t tlb_dropin_fail_upm;
48255+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
48256+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
48257+ atomic_long_unchecked_t tlb_dropin_fail_idle;
48258+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
48259+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
48260+ atomic_long_unchecked_t tfh_stale_on_fault;
48261+ atomic_long_unchecked_t mmu_invalidate_range;
48262+ atomic_long_unchecked_t mmu_invalidate_page;
48263+ atomic_long_unchecked_t flush_tlb;
48264+ atomic_long_unchecked_t flush_tlb_gru;
48265+ atomic_long_unchecked_t flush_tlb_gru_tgh;
48266+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
48267
48268- atomic_long_t copy_gpa;
48269- atomic_long_t read_gpa;
48270+ atomic_long_unchecked_t copy_gpa;
48271+ atomic_long_unchecked_t read_gpa;
48272
48273- atomic_long_t mesq_receive;
48274- atomic_long_t mesq_receive_none;
48275- atomic_long_t mesq_send;
48276- atomic_long_t mesq_send_failed;
48277- atomic_long_t mesq_noop;
48278- atomic_long_t mesq_send_unexpected_error;
48279- atomic_long_t mesq_send_lb_overflow;
48280- atomic_long_t mesq_send_qlimit_reached;
48281- atomic_long_t mesq_send_amo_nacked;
48282- atomic_long_t mesq_send_put_nacked;
48283- atomic_long_t mesq_page_overflow;
48284- atomic_long_t mesq_qf_locked;
48285- atomic_long_t mesq_qf_noop_not_full;
48286- atomic_long_t mesq_qf_switch_head_failed;
48287- atomic_long_t mesq_qf_unexpected_error;
48288- atomic_long_t mesq_noop_unexpected_error;
48289- atomic_long_t mesq_noop_lb_overflow;
48290- atomic_long_t mesq_noop_qlimit_reached;
48291- atomic_long_t mesq_noop_amo_nacked;
48292- atomic_long_t mesq_noop_put_nacked;
48293- atomic_long_t mesq_noop_page_overflow;
48294+ atomic_long_unchecked_t mesq_receive;
48295+ atomic_long_unchecked_t mesq_receive_none;
48296+ atomic_long_unchecked_t mesq_send;
48297+ atomic_long_unchecked_t mesq_send_failed;
48298+ atomic_long_unchecked_t mesq_noop;
48299+ atomic_long_unchecked_t mesq_send_unexpected_error;
48300+ atomic_long_unchecked_t mesq_send_lb_overflow;
48301+ atomic_long_unchecked_t mesq_send_qlimit_reached;
48302+ atomic_long_unchecked_t mesq_send_amo_nacked;
48303+ atomic_long_unchecked_t mesq_send_put_nacked;
48304+ atomic_long_unchecked_t mesq_page_overflow;
48305+ atomic_long_unchecked_t mesq_qf_locked;
48306+ atomic_long_unchecked_t mesq_qf_noop_not_full;
48307+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
48308+ atomic_long_unchecked_t mesq_qf_unexpected_error;
48309+ atomic_long_unchecked_t mesq_noop_unexpected_error;
48310+ atomic_long_unchecked_t mesq_noop_lb_overflow;
48311+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
48312+ atomic_long_unchecked_t mesq_noop_amo_nacked;
48313+ atomic_long_unchecked_t mesq_noop_put_nacked;
48314+ atomic_long_unchecked_t mesq_noop_page_overflow;
48315
48316 };
48317
48318@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
48319 tghop_invalidate, mcsop_last};
48320
48321 struct mcs_op_statistic {
48322- atomic_long_t count;
48323- atomic_long_t total;
48324+ atomic_long_unchecked_t count;
48325+ atomic_long_unchecked_t total;
48326 unsigned long max;
48327 };
48328
48329@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
48330
48331 #define STAT(id) do { \
48332 if (gru_options & OPT_STATS) \
48333- atomic_long_inc(&gru_stats.id); \
48334+ atomic_long_inc_unchecked(&gru_stats.id); \
48335 } while (0)
48336
48337 #ifdef CONFIG_SGI_GRU_DEBUG
48338diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
48339index c862cd4..0d176fe 100644
48340--- a/drivers/misc/sgi-xp/xp.h
48341+++ b/drivers/misc/sgi-xp/xp.h
48342@@ -288,7 +288,7 @@ struct xpc_interface {
48343 xpc_notify_func, void *);
48344 void (*received) (short, int, void *);
48345 enum xp_retval (*partid_to_nasids) (short, void *);
48346-};
48347+} __no_const;
48348
48349 extern struct xpc_interface xpc_interface;
48350
48351diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
48352index 01be66d..e3a0c7e 100644
48353--- a/drivers/misc/sgi-xp/xp_main.c
48354+++ b/drivers/misc/sgi-xp/xp_main.c
48355@@ -78,13 +78,13 @@ xpc_notloaded(void)
48356 }
48357
48358 struct xpc_interface xpc_interface = {
48359- (void (*)(int))xpc_notloaded,
48360- (void (*)(int))xpc_notloaded,
48361- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
48362- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
48363+ .connect = (void (*)(int))xpc_notloaded,
48364+ .disconnect = (void (*)(int))xpc_notloaded,
48365+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
48366+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
48367 void *))xpc_notloaded,
48368- (void (*)(short, int, void *))xpc_notloaded,
48369- (enum xp_retval(*)(short, void *))xpc_notloaded
48370+ .received = (void (*)(short, int, void *))xpc_notloaded,
48371+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
48372 };
48373 EXPORT_SYMBOL_GPL(xpc_interface);
48374
48375diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
48376index b94d5f7..7f494c5 100644
48377--- a/drivers/misc/sgi-xp/xpc.h
48378+++ b/drivers/misc/sgi-xp/xpc.h
48379@@ -835,6 +835,7 @@ struct xpc_arch_operations {
48380 void (*received_payload) (struct xpc_channel *, void *);
48381 void (*notify_senders_of_disconnect) (struct xpc_channel *);
48382 };
48383+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
48384
48385 /* struct xpc_partition act_state values (for XPC HB) */
48386
48387@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
48388 /* found in xpc_main.c */
48389 extern struct device *xpc_part;
48390 extern struct device *xpc_chan;
48391-extern struct xpc_arch_operations xpc_arch_ops;
48392+extern xpc_arch_operations_no_const xpc_arch_ops;
48393 extern int xpc_disengage_timelimit;
48394 extern int xpc_disengage_timedout;
48395 extern int xpc_activate_IRQ_rcvd;
48396diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
48397index 82dc574..8539ab2 100644
48398--- a/drivers/misc/sgi-xp/xpc_main.c
48399+++ b/drivers/misc/sgi-xp/xpc_main.c
48400@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
48401 .notifier_call = xpc_system_die,
48402 };
48403
48404-struct xpc_arch_operations xpc_arch_ops;
48405+xpc_arch_operations_no_const xpc_arch_ops;
48406
48407 /*
48408 * Timer function to enforce the timelimit on the partition disengage.
48409@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
48410
48411 if (((die_args->trapnr == X86_TRAP_MF) ||
48412 (die_args->trapnr == X86_TRAP_XF)) &&
48413- !user_mode_vm(die_args->regs))
48414+ !user_mode(die_args->regs))
48415 xpc_die_deactivate();
48416
48417 break;
48418diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
48419index 452782b..0c10e40 100644
48420--- a/drivers/mmc/card/block.c
48421+++ b/drivers/mmc/card/block.c
48422@@ -574,7 +574,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
48423 if (idata->ic.postsleep_min_us)
48424 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
48425
48426- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
48427+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
48428 err = -EFAULT;
48429 goto cmd_rel_host;
48430 }
48431diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
48432index f51b5ba..86614a7 100644
48433--- a/drivers/mmc/core/mmc_ops.c
48434+++ b/drivers/mmc/core/mmc_ops.c
48435@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
48436 void *data_buf;
48437 int is_on_stack;
48438
48439- is_on_stack = object_is_on_stack(buf);
48440+ is_on_stack = object_starts_on_stack(buf);
48441 if (is_on_stack) {
48442 /*
48443 * dma onto stack is unsafe/nonportable, but callers to this
48444diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
48445index 738fa24..1568451 100644
48446--- a/drivers/mmc/host/dw_mmc.h
48447+++ b/drivers/mmc/host/dw_mmc.h
48448@@ -257,5 +257,5 @@ struct dw_mci_drv_data {
48449 int (*parse_dt)(struct dw_mci *host);
48450 int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
48451 struct dw_mci_tuning_data *tuning_data);
48452-};
48453+} __do_const;
48454 #endif /* _DW_MMC_H_ */
48455diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
48456index 249ab80..9314ce1 100644
48457--- a/drivers/mmc/host/mmci.c
48458+++ b/drivers/mmc/host/mmci.c
48459@@ -1507,7 +1507,9 @@ static int mmci_probe(struct amba_device *dev,
48460 mmc->caps |= MMC_CAP_CMD23;
48461
48462 if (variant->busy_detect) {
48463- mmci_ops.card_busy = mmci_card_busy;
48464+ pax_open_kernel();
48465+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
48466+ pax_close_kernel();
48467 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
48468 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
48469 mmc->max_busy_timeout = 0;
48470diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
48471index ccec0e3..199f9ce 100644
48472--- a/drivers/mmc/host/sdhci-esdhc-imx.c
48473+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
48474@@ -1034,9 +1034,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
48475 host->mmc->caps |= MMC_CAP_1_8V_DDR;
48476 }
48477
48478- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
48479- sdhci_esdhc_ops.platform_execute_tuning =
48480+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
48481+ pax_open_kernel();
48482+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
48483 esdhc_executing_tuning;
48484+ pax_close_kernel();
48485+ }
48486
48487 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
48488 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
48489diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
48490index fa5954a..56840e5 100644
48491--- a/drivers/mmc/host/sdhci-s3c.c
48492+++ b/drivers/mmc/host/sdhci-s3c.c
48493@@ -584,9 +584,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
48494 * we can use overriding functions instead of default.
48495 */
48496 if (sc->no_divider) {
48497- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
48498- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
48499- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
48500+ pax_open_kernel();
48501+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
48502+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
48503+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
48504+ pax_close_kernel();
48505 }
48506
48507 /* It supports additional host capabilities if needed */
48508diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
48509index 423666b..81ff5eb 100644
48510--- a/drivers/mtd/chips/cfi_cmdset_0020.c
48511+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
48512@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
48513 size_t totlen = 0, thislen;
48514 int ret = 0;
48515 size_t buflen = 0;
48516- static char *buffer;
48517+ char *buffer;
48518
48519 if (!ECCBUF_SIZE) {
48520 /* We should fall back to a general writev implementation.
48521diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
48522index 9f2012a..a81c720 100644
48523--- a/drivers/mtd/nand/denali.c
48524+++ b/drivers/mtd/nand/denali.c
48525@@ -24,6 +24,7 @@
48526 #include <linux/slab.h>
48527 #include <linux/mtd/mtd.h>
48528 #include <linux/module.h>
48529+#include <linux/slab.h>
48530
48531 #include "denali.h"
48532
48533diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48534index f638cd8..2cbf586 100644
48535--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48536+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48537@@ -387,7 +387,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
48538
48539 /* first try to map the upper buffer directly */
48540 if (virt_addr_valid(this->upper_buf) &&
48541- !object_is_on_stack(this->upper_buf)) {
48542+ !object_starts_on_stack(this->upper_buf)) {
48543 sg_init_one(sgl, this->upper_buf, this->upper_len);
48544 ret = dma_map_sg(this->dev, sgl, 1, dr);
48545 if (ret == 0)
48546diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
48547index 51b9d6a..52af9a7 100644
48548--- a/drivers/mtd/nftlmount.c
48549+++ b/drivers/mtd/nftlmount.c
48550@@ -24,6 +24,7 @@
48551 #include <asm/errno.h>
48552 #include <linux/delay.h>
48553 #include <linux/slab.h>
48554+#include <linux/sched.h>
48555 #include <linux/mtd/mtd.h>
48556 #include <linux/mtd/nand.h>
48557 #include <linux/mtd/nftl.h>
48558diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
48559index cf49c22..971b133 100644
48560--- a/drivers/mtd/sm_ftl.c
48561+++ b/drivers/mtd/sm_ftl.c
48562@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
48563 #define SM_CIS_VENDOR_OFFSET 0x59
48564 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
48565 {
48566- struct attribute_group *attr_group;
48567+ attribute_group_no_const *attr_group;
48568 struct attribute **attributes;
48569 struct sm_sysfs_attribute *vendor_attribute;
48570 char *vendor;
48571diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
48572index 5ab3c18..5c3a836 100644
48573--- a/drivers/net/bonding/bond_netlink.c
48574+++ b/drivers/net/bonding/bond_netlink.c
48575@@ -542,7 +542,7 @@ nla_put_failure:
48576 return -EMSGSIZE;
48577 }
48578
48579-struct rtnl_link_ops bond_link_ops __read_mostly = {
48580+struct rtnl_link_ops bond_link_ops = {
48581 .kind = "bond",
48582 .priv_size = sizeof(struct bonding),
48583 .setup = bond_setup,
48584diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
48585index 4168822..f38eeddf 100644
48586--- a/drivers/net/can/Kconfig
48587+++ b/drivers/net/can/Kconfig
48588@@ -98,7 +98,7 @@ config CAN_JANZ_ICAN3
48589
48590 config CAN_FLEXCAN
48591 tristate "Support for Freescale FLEXCAN based chips"
48592- depends on ARM || PPC
48593+ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
48594 ---help---
48595 Say Y here if you want to support for Freescale FlexCAN.
48596
48597diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
48598index 1d162cc..b546a75 100644
48599--- a/drivers/net/ethernet/8390/ax88796.c
48600+++ b/drivers/net/ethernet/8390/ax88796.c
48601@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
48602 if (ax->plat->reg_offsets)
48603 ei_local->reg_offset = ax->plat->reg_offsets;
48604 else {
48605+ resource_size_t _mem_size = mem_size;
48606+ do_div(_mem_size, 0x18);
48607 ei_local->reg_offset = ax->reg_offsets;
48608 for (ret = 0; ret < 0x18; ret++)
48609- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
48610+ ax->reg_offsets[ret] = _mem_size * ret;
48611 }
48612
48613 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
48614diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
48615index 7330681..7e9e463 100644
48616--- a/drivers/net/ethernet/altera/altera_tse_main.c
48617+++ b/drivers/net/ethernet/altera/altera_tse_main.c
48618@@ -1182,7 +1182,7 @@ static int tse_shutdown(struct net_device *dev)
48619 return 0;
48620 }
48621
48622-static struct net_device_ops altera_tse_netdev_ops = {
48623+static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
48624 .ndo_open = tse_open,
48625 .ndo_stop = tse_shutdown,
48626 .ndo_start_xmit = tse_start_xmit,
48627@@ -1439,11 +1439,13 @@ static int altera_tse_probe(struct platform_device *pdev)
48628 ndev->netdev_ops = &altera_tse_netdev_ops;
48629 altera_tse_set_ethtool_ops(ndev);
48630
48631+ pax_open_kernel();
48632 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
48633
48634 if (priv->hash_filter)
48635 altera_tse_netdev_ops.ndo_set_rx_mode =
48636 tse_set_rx_mode_hashfilter;
48637+ pax_close_kernel();
48638
48639 /* Scatter/gather IO is not supported,
48640 * so it is turned off
48641diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48642index bf462ee8..18b8375 100644
48643--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48644+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48645@@ -986,14 +986,14 @@ do { \
48646 * operations, everything works on mask values.
48647 */
48648 #define XMDIO_READ(_pdata, _mmd, _reg) \
48649- ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
48650+ ((_pdata)->hw_if->read_mmd_regs((_pdata), 0, \
48651 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
48652
48653 #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
48654 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
48655
48656 #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
48657- ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
48658+ ((_pdata)->hw_if->write_mmd_regs((_pdata), 0, \
48659 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
48660
48661 #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
48662diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
48663index 6bb76d5..ded47a8 100644
48664--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
48665+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
48666@@ -273,7 +273,7 @@ static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer,
48667 struct xgbe_prv_data *pdata = filp->private_data;
48668 unsigned int value;
48669
48670- value = pdata->hw_if.read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
48671+ value = pdata->hw_if->read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
48672 pdata->debugfs_xpcs_reg);
48673
48674 return xgbe_common_read(buffer, count, ppos, value);
48675@@ -291,7 +291,7 @@ static ssize_t xpcs_reg_value_write(struct file *filp,
48676 if (len < 0)
48677 return len;
48678
48679- pdata->hw_if.write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
48680+ pdata->hw_if->write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
48681 pdata->debugfs_xpcs_reg, value);
48682
48683 return len;
48684diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48685index 6f1c859..e96ac1a 100644
48686--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48687+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48688@@ -236,7 +236,7 @@ err_ring:
48689
48690 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
48691 {
48692- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48693+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48694 struct xgbe_channel *channel;
48695 struct xgbe_ring *ring;
48696 struct xgbe_ring_data *rdata;
48697@@ -277,7 +277,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
48698
48699 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
48700 {
48701- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48702+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48703 struct xgbe_channel *channel;
48704 struct xgbe_ring *ring;
48705 struct xgbe_ring_desc *rdesc;
48706@@ -496,7 +496,7 @@ err_out:
48707 static void xgbe_realloc_skb(struct xgbe_channel *channel)
48708 {
48709 struct xgbe_prv_data *pdata = channel->pdata;
48710- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48711+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48712 struct xgbe_ring *ring = channel->rx_ring;
48713 struct xgbe_ring_data *rdata;
48714 struct sk_buff *skb = NULL;
48715@@ -540,17 +540,12 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel)
48716 DBGPR("<--xgbe_realloc_skb\n");
48717 }
48718
48719-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
48720-{
48721- DBGPR("-->xgbe_init_function_ptrs_desc\n");
48722-
48723- desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
48724- desc_if->free_ring_resources = xgbe_free_ring_resources;
48725- desc_if->map_tx_skb = xgbe_map_tx_skb;
48726- desc_if->realloc_skb = xgbe_realloc_skb;
48727- desc_if->unmap_skb = xgbe_unmap_skb;
48728- desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
48729- desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
48730-
48731- DBGPR("<--xgbe_init_function_ptrs_desc\n");
48732-}
48733+const struct xgbe_desc_if default_xgbe_desc_if = {
48734+ .alloc_ring_resources = xgbe_alloc_ring_resources,
48735+ .free_ring_resources = xgbe_free_ring_resources,
48736+ .map_tx_skb = xgbe_map_tx_skb,
48737+ .realloc_skb = xgbe_realloc_skb,
48738+ .unmap_skb = xgbe_unmap_skb,
48739+ .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
48740+ .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
48741+};
48742diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48743index 002293b..5ced1dd 100644
48744--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48745+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48746@@ -2030,7 +2030,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
48747
48748 static int xgbe_init(struct xgbe_prv_data *pdata)
48749 {
48750- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48751+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48752 int ret;
48753
48754 DBGPR("-->xgbe_init\n");
48755@@ -2096,87 +2096,82 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
48756 return 0;
48757 }
48758
48759-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
48760-{
48761- DBGPR("-->xgbe_init_function_ptrs\n");
48762-
48763- hw_if->tx_complete = xgbe_tx_complete;
48764-
48765- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
48766- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
48767- hw_if->set_addn_mac_addrs = xgbe_set_addn_mac_addrs;
48768- hw_if->set_mac_address = xgbe_set_mac_address;
48769-
48770- hw_if->enable_rx_csum = xgbe_enable_rx_csum;
48771- hw_if->disable_rx_csum = xgbe_disable_rx_csum;
48772-
48773- hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
48774- hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
48775-
48776- hw_if->read_mmd_regs = xgbe_read_mmd_regs;
48777- hw_if->write_mmd_regs = xgbe_write_mmd_regs;
48778-
48779- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
48780- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
48781- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
48782-
48783- hw_if->enable_tx = xgbe_enable_tx;
48784- hw_if->disable_tx = xgbe_disable_tx;
48785- hw_if->enable_rx = xgbe_enable_rx;
48786- hw_if->disable_rx = xgbe_disable_rx;
48787-
48788- hw_if->powerup_tx = xgbe_powerup_tx;
48789- hw_if->powerdown_tx = xgbe_powerdown_tx;
48790- hw_if->powerup_rx = xgbe_powerup_rx;
48791- hw_if->powerdown_rx = xgbe_powerdown_rx;
48792-
48793- hw_if->pre_xmit = xgbe_pre_xmit;
48794- hw_if->dev_read = xgbe_dev_read;
48795- hw_if->enable_int = xgbe_enable_int;
48796- hw_if->disable_int = xgbe_disable_int;
48797- hw_if->init = xgbe_init;
48798- hw_if->exit = xgbe_exit;
48799+const struct xgbe_hw_if default_xgbe_hw_if = {
48800+ .tx_complete = xgbe_tx_complete,
48801+
48802+ .set_promiscuous_mode = xgbe_set_promiscuous_mode,
48803+ .set_all_multicast_mode = xgbe_set_all_multicast_mode,
48804+ .set_addn_mac_addrs = xgbe_set_addn_mac_addrs,
48805+ .set_mac_address = xgbe_set_mac_address,
48806+
48807+ .enable_rx_csum = xgbe_enable_rx_csum,
48808+ .disable_rx_csum = xgbe_disable_rx_csum,
48809+
48810+ .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
48811+ .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
48812+
48813+ .read_mmd_regs = xgbe_read_mmd_regs,
48814+ .write_mmd_regs = xgbe_write_mmd_regs,
48815+
48816+ .set_gmii_speed = xgbe_set_gmii_speed,
48817+ .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
48818+ .set_xgmii_speed = xgbe_set_xgmii_speed,
48819+
48820+ .enable_tx = xgbe_enable_tx,
48821+ .disable_tx = xgbe_disable_tx,
48822+ .enable_rx = xgbe_enable_rx,
48823+ .disable_rx = xgbe_disable_rx,
48824+
48825+ .powerup_tx = xgbe_powerup_tx,
48826+ .powerdown_tx = xgbe_powerdown_tx,
48827+ .powerup_rx = xgbe_powerup_rx,
48828+ .powerdown_rx = xgbe_powerdown_rx,
48829+
48830+ .pre_xmit = xgbe_pre_xmit,
48831+ .dev_read = xgbe_dev_read,
48832+ .enable_int = xgbe_enable_int,
48833+ .disable_int = xgbe_disable_int,
48834+ .init = xgbe_init,
48835+ .exit = xgbe_exit,
48836
48837 /* Descriptor related Sequences have to be initialized here */
48838- hw_if->tx_desc_init = xgbe_tx_desc_init;
48839- hw_if->rx_desc_init = xgbe_rx_desc_init;
48840- hw_if->tx_desc_reset = xgbe_tx_desc_reset;
48841- hw_if->rx_desc_reset = xgbe_rx_desc_reset;
48842- hw_if->is_last_desc = xgbe_is_last_desc;
48843- hw_if->is_context_desc = xgbe_is_context_desc;
48844+ .tx_desc_init = xgbe_tx_desc_init,
48845+ .rx_desc_init = xgbe_rx_desc_init,
48846+ .tx_desc_reset = xgbe_tx_desc_reset,
48847+ .rx_desc_reset = xgbe_rx_desc_reset,
48848+ .is_last_desc = xgbe_is_last_desc,
48849+ .is_context_desc = xgbe_is_context_desc,
48850
48851 /* For FLOW ctrl */
48852- hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
48853- hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
48854+ .config_tx_flow_control = xgbe_config_tx_flow_control,
48855+ .config_rx_flow_control = xgbe_config_rx_flow_control,
48856
48857 /* For RX coalescing */
48858- hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
48859- hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
48860- hw_if->usec_to_riwt = xgbe_usec_to_riwt;
48861- hw_if->riwt_to_usec = xgbe_riwt_to_usec;
48862+ .config_rx_coalesce = xgbe_config_rx_coalesce,
48863+ .config_tx_coalesce = xgbe_config_tx_coalesce,
48864+ .usec_to_riwt = xgbe_usec_to_riwt,
48865+ .riwt_to_usec = xgbe_riwt_to_usec,
48866
48867 /* For RX and TX threshold config */
48868- hw_if->config_rx_threshold = xgbe_config_rx_threshold;
48869- hw_if->config_tx_threshold = xgbe_config_tx_threshold;
48870+ .config_rx_threshold = xgbe_config_rx_threshold,
48871+ .config_tx_threshold = xgbe_config_tx_threshold,
48872
48873 /* For RX and TX Store and Forward Mode config */
48874- hw_if->config_rsf_mode = xgbe_config_rsf_mode;
48875- hw_if->config_tsf_mode = xgbe_config_tsf_mode;
48876+ .config_rsf_mode = xgbe_config_rsf_mode,
48877+ .config_tsf_mode = xgbe_config_tsf_mode,
48878
48879 /* For TX DMA Operating on Second Frame config */
48880- hw_if->config_osp_mode = xgbe_config_osp_mode;
48881+ .config_osp_mode = xgbe_config_osp_mode,
48882
48883 /* For RX and TX PBL config */
48884- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
48885- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
48886- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
48887- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
48888- hw_if->config_pblx8 = xgbe_config_pblx8;
48889+ .config_rx_pbl_val = xgbe_config_rx_pbl_val,
48890+ .get_rx_pbl_val = xgbe_get_rx_pbl_val,
48891+ .config_tx_pbl_val = xgbe_config_tx_pbl_val,
48892+ .get_tx_pbl_val = xgbe_get_tx_pbl_val,
48893+ .config_pblx8 = xgbe_config_pblx8,
48894
48895 /* For MMC statistics support */
48896- hw_if->tx_mmc_int = xgbe_tx_mmc_int;
48897- hw_if->rx_mmc_int = xgbe_rx_mmc_int;
48898- hw_if->read_mmc_stats = xgbe_read_mmc_stats;
48899-
48900- DBGPR("<--xgbe_init_function_ptrs\n");
48901-}
48902+ .tx_mmc_int = xgbe_tx_mmc_int,
48903+ .rx_mmc_int = xgbe_rx_mmc_int,
48904+ .read_mmc_stats = xgbe_read_mmc_stats,
48905+};
48906diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48907index cfe3d93..07a78ae 100644
48908--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48909+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48910@@ -153,7 +153,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
48911
48912 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
48913 {
48914- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48915+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48916 struct xgbe_channel *channel;
48917 unsigned int i;
48918
48919@@ -170,7 +170,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
48920
48921 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
48922 {
48923- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48924+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48925 struct xgbe_channel *channel;
48926 unsigned int i;
48927
48928@@ -188,7 +188,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
48929 static irqreturn_t xgbe_isr(int irq, void *data)
48930 {
48931 struct xgbe_prv_data *pdata = data;
48932- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48933+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48934 struct xgbe_channel *channel;
48935 unsigned int dma_isr, dma_ch_isr;
48936 unsigned int mac_isr;
48937@@ -403,7 +403,7 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata)
48938
48939 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
48940 {
48941- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48942+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48943
48944 DBGPR("-->xgbe_init_tx_coalesce\n");
48945
48946@@ -417,7 +417,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
48947
48948 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
48949 {
48950- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48951+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48952
48953 DBGPR("-->xgbe_init_rx_coalesce\n");
48954
48955@@ -431,7 +431,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
48956
48957 static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
48958 {
48959- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48960+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48961 struct xgbe_channel *channel;
48962 struct xgbe_ring *ring;
48963 struct xgbe_ring_data *rdata;
48964@@ -456,7 +456,7 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
48965
48966 static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
48967 {
48968- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48969+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48970 struct xgbe_channel *channel;
48971 struct xgbe_ring *ring;
48972 struct xgbe_ring_data *rdata;
48973@@ -482,7 +482,7 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
48974 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
48975 {
48976 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48977- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48978+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48979 unsigned long flags;
48980
48981 DBGPR("-->xgbe_powerdown\n");
48982@@ -520,7 +520,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
48983 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
48984 {
48985 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48986- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48987+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48988 unsigned long flags;
48989
48990 DBGPR("-->xgbe_powerup\n");
48991@@ -557,7 +557,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
48992
48993 static int xgbe_start(struct xgbe_prv_data *pdata)
48994 {
48995- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48996+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48997 struct net_device *netdev = pdata->netdev;
48998
48999 DBGPR("-->xgbe_start\n");
49000@@ -583,7 +583,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
49001
49002 static void xgbe_stop(struct xgbe_prv_data *pdata)
49003 {
49004- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49005+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49006 struct net_device *netdev = pdata->netdev;
49007
49008 DBGPR("-->xgbe_stop\n");
49009@@ -603,7 +603,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
49010
49011 static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
49012 {
49013- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49014+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49015
49016 DBGPR("-->xgbe_restart_dev\n");
49017
49018@@ -741,8 +741,8 @@ static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb,
49019 static int xgbe_open(struct net_device *netdev)
49020 {
49021 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49022- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49023- struct xgbe_desc_if *desc_if = &pdata->desc_if;
49024+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49025+ struct xgbe_desc_if *desc_if = pdata->desc_if;
49026 int ret;
49027
49028 DBGPR("-->xgbe_open\n");
49029@@ -804,8 +804,8 @@ err_clk:
49030 static int xgbe_close(struct net_device *netdev)
49031 {
49032 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49033- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49034- struct xgbe_desc_if *desc_if = &pdata->desc_if;
49035+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49036+ struct xgbe_desc_if *desc_if = pdata->desc_if;
49037
49038 DBGPR("-->xgbe_close\n");
49039
49040@@ -835,8 +835,8 @@ static int xgbe_close(struct net_device *netdev)
49041 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
49042 {
49043 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49044- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49045- struct xgbe_desc_if *desc_if = &pdata->desc_if;
49046+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49047+ struct xgbe_desc_if *desc_if = pdata->desc_if;
49048 struct xgbe_channel *channel;
49049 struct xgbe_ring *ring;
49050 struct xgbe_packet_data *packet;
49051@@ -903,7 +903,7 @@ tx_netdev_return:
49052 static void xgbe_set_rx_mode(struct net_device *netdev)
49053 {
49054 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49055- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49056+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49057 unsigned int pr_mode, am_mode;
49058
49059 DBGPR("-->xgbe_set_rx_mode\n");
49060@@ -930,7 +930,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
49061 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
49062 {
49063 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49064- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49065+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49066 struct sockaddr *saddr = addr;
49067
49068 DBGPR("-->xgbe_set_mac_address\n");
49069@@ -976,7 +976,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
49070
49071 DBGPR("-->%s\n", __func__);
49072
49073- pdata->hw_if.read_mmc_stats(pdata);
49074+ pdata->hw_if->read_mmc_stats(pdata);
49075
49076 s->rx_packets = pstats->rxframecount_gb;
49077 s->rx_bytes = pstats->rxoctetcount_gb;
49078@@ -1020,7 +1020,7 @@ static int xgbe_set_features(struct net_device *netdev,
49079 netdev_features_t features)
49080 {
49081 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49082- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49083+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49084 unsigned int rxcsum_enabled, rxvlan_enabled;
49085
49086 rxcsum_enabled = !!(pdata->netdev_features & NETIF_F_RXCSUM);
49087@@ -1072,8 +1072,8 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
49088 static int xgbe_tx_poll(struct xgbe_channel *channel)
49089 {
49090 struct xgbe_prv_data *pdata = channel->pdata;
49091- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49092- struct xgbe_desc_if *desc_if = &pdata->desc_if;
49093+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49094+ struct xgbe_desc_if *desc_if = pdata->desc_if;
49095 struct xgbe_ring *ring = channel->tx_ring;
49096 struct xgbe_ring_data *rdata;
49097 struct xgbe_ring_desc *rdesc;
49098@@ -1124,8 +1124,8 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
49099 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
49100 {
49101 struct xgbe_prv_data *pdata = channel->pdata;
49102- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49103- struct xgbe_desc_if *desc_if = &pdata->desc_if;
49104+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49105+ struct xgbe_desc_if *desc_if = pdata->desc_if;
49106 struct xgbe_ring *ring = channel->rx_ring;
49107 struct xgbe_ring_data *rdata;
49108 struct xgbe_packet_data *packet;
49109diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
49110index 8909f2b..719e767 100644
49111--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
49112+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
49113@@ -202,7 +202,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
49114
49115 DBGPR("-->%s\n", __func__);
49116
49117- pdata->hw_if.read_mmc_stats(pdata);
49118+ pdata->hw_if->read_mmc_stats(pdata);
49119 for (i = 0; i < XGBE_STATS_COUNT; i++) {
49120 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
49121 *data++ = *(u64 *)stat;
49122@@ -387,7 +387,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
49123 struct ethtool_coalesce *ec)
49124 {
49125 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49126- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49127+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49128 unsigned int riwt;
49129
49130 DBGPR("-->xgbe_get_coalesce\n");
49131@@ -410,7 +410,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
49132 struct ethtool_coalesce *ec)
49133 {
49134 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49135- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49136+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49137 unsigned int rx_frames, rx_riwt, rx_usecs;
49138 unsigned int tx_frames, tx_usecs;
49139
49140diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
49141index 5a1891f..1b7888e 100644
49142--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
49143+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
49144@@ -210,12 +210,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
49145 DBGPR("<--xgbe_default_config\n");
49146 }
49147
49148-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
49149-{
49150- xgbe_init_function_ptrs_dev(&pdata->hw_if);
49151- xgbe_init_function_ptrs_desc(&pdata->desc_if);
49152-}
49153-
49154 static int xgbe_probe(struct platform_device *pdev)
49155 {
49156 struct xgbe_prv_data *pdata;
49157@@ -306,9 +300,8 @@ static int xgbe_probe(struct platform_device *pdev)
49158 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
49159
49160 /* Set all the function pointers */
49161- xgbe_init_all_fptrs(pdata);
49162- hw_if = &pdata->hw_if;
49163- desc_if = &pdata->desc_if;
49164+ hw_if = pdata->hw_if = &default_xgbe_hw_if;
49165+ desc_if = pdata->desc_if = &default_xgbe_desc_if;
49166
49167 /* Issue software reset to device */
49168 hw_if->exit(pdata);
49169diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
49170index ea7a5d6..d10a742 100644
49171--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
49172+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
49173@@ -128,7 +128,7 @@
49174 static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
49175 {
49176 struct xgbe_prv_data *pdata = mii->priv;
49177- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49178+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49179 int mmd_data;
49180
49181 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
49182@@ -145,7 +145,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
49183 u16 mmd_val)
49184 {
49185 struct xgbe_prv_data *pdata = mii->priv;
49186- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49187+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49188 int mmd_data = mmd_val;
49189
49190 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
49191@@ -161,7 +161,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
49192 static void xgbe_adjust_link(struct net_device *netdev)
49193 {
49194 struct xgbe_prv_data *pdata = netdev_priv(netdev);
49195- struct xgbe_hw_if *hw_if = &pdata->hw_if;
49196+ struct xgbe_hw_if *hw_if = pdata->hw_if;
49197 struct phy_device *phydev = pdata->phydev;
49198 unsigned long flags;
49199 int new_state = 0;
49200diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
49201index ab06271..a560fa7 100644
49202--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
49203+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
49204@@ -527,8 +527,8 @@ struct xgbe_prv_data {
49205
49206 int irq_number;
49207
49208- struct xgbe_hw_if hw_if;
49209- struct xgbe_desc_if desc_if;
49210+ const struct xgbe_hw_if *hw_if;
49211+ const struct xgbe_desc_if *desc_if;
49212
49213 /* Rings for Tx/Rx on a DMA channel */
49214 struct xgbe_channel *channel;
49215@@ -611,6 +611,9 @@ struct xgbe_prv_data {
49216 #endif
49217 };
49218
49219+extern const struct xgbe_hw_if default_xgbe_hw_if;
49220+extern const struct xgbe_desc_if default_xgbe_desc_if;
49221+
49222 /* Function prototypes*/
49223
49224 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
49225diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
49226index 571427c..e9fe9e7 100644
49227--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
49228+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
49229@@ -1058,7 +1058,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
49230 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
49231 {
49232 /* RX_MODE controlling object */
49233- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
49234+ bnx2x_init_rx_mode_obj(bp);
49235
49236 /* multicast configuration controlling object */
49237 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
49238diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
49239index b193604..8873bfd 100644
49240--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
49241+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
49242@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
49243 return rc;
49244 }
49245
49246-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
49247- struct bnx2x_rx_mode_obj *o)
49248+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
49249 {
49250 if (CHIP_IS_E1x(bp)) {
49251- o->wait_comp = bnx2x_empty_rx_mode_wait;
49252- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
49253+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
49254+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
49255 } else {
49256- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
49257- o->config_rx_mode = bnx2x_set_rx_mode_e2;
49258+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
49259+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
49260 }
49261 }
49262
49263diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
49264index 718ecd2..2183b2f 100644
49265--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
49266+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
49267@@ -1340,8 +1340,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
49268
49269 /********************* RX MODE ****************/
49270
49271-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
49272- struct bnx2x_rx_mode_obj *o);
49273+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
49274
49275 /**
49276 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
49277diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
49278index 461acca..2b546ba 100644
49279--- a/drivers/net/ethernet/broadcom/tg3.h
49280+++ b/drivers/net/ethernet/broadcom/tg3.h
49281@@ -150,6 +150,7 @@
49282 #define CHIPREV_ID_5750_A0 0x4000
49283 #define CHIPREV_ID_5750_A1 0x4001
49284 #define CHIPREV_ID_5750_A3 0x4003
49285+#define CHIPREV_ID_5750_C1 0x4201
49286 #define CHIPREV_ID_5750_C2 0x4202
49287 #define CHIPREV_ID_5752_A0_HW 0x5000
49288 #define CHIPREV_ID_5752_A0 0x6000
49289diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
49290index 13f9636..228040f 100644
49291--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
49292+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
49293@@ -1690,10 +1690,10 @@ bna_cb_ioceth_reset(void *arg)
49294 }
49295
49296 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
49297- bna_cb_ioceth_enable,
49298- bna_cb_ioceth_disable,
49299- bna_cb_ioceth_hbfail,
49300- bna_cb_ioceth_reset
49301+ .enable_cbfn = bna_cb_ioceth_enable,
49302+ .disable_cbfn = bna_cb_ioceth_disable,
49303+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
49304+ .reset_cbfn = bna_cb_ioceth_reset
49305 };
49306
49307 static void bna_attr_init(struct bna_ioceth *ioceth)
49308diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
49309index 8cffcdf..aadf043 100644
49310--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
49311+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
49312@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
49313 */
49314 struct l2t_skb_cb {
49315 arp_failure_handler_func arp_failure_handler;
49316-};
49317+} __no_const;
49318
49319 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
49320
49321diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
49322index a83271c..cf00874 100644
49323--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
49324+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
49325@@ -2174,7 +2174,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
49326
49327 int i;
49328 struct adapter *ap = netdev2adap(dev);
49329- static const unsigned int *reg_ranges;
49330+ const unsigned int *reg_ranges;
49331 int arr_size = 0, buf_size = 0;
49332
49333 if (is_t4(ap->params.chip)) {
49334diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
49335index c05b66d..ed69872 100644
49336--- a/drivers/net/ethernet/dec/tulip/de4x5.c
49337+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
49338@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
49339 for (i=0; i<ETH_ALEN; i++) {
49340 tmp.addr[i] = dev->dev_addr[i];
49341 }
49342- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
49343+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
49344 break;
49345
49346 case DE4X5_SET_HWADDR: /* Set the hardware address */
49347@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
49348 spin_lock_irqsave(&lp->lock, flags);
49349 memcpy(&statbuf, &lp->pktStats, ioc->len);
49350 spin_unlock_irqrestore(&lp->lock, flags);
49351- if (copy_to_user(ioc->data, &statbuf, ioc->len))
49352+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
49353 return -EFAULT;
49354 break;
49355 }
49356diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
49357index 1e187fb..d024547 100644
49358--- a/drivers/net/ethernet/emulex/benet/be_main.c
49359+++ b/drivers/net/ethernet/emulex/benet/be_main.c
49360@@ -533,7 +533,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
49361
49362 if (wrapped)
49363 newacc += 65536;
49364- ACCESS_ONCE(*acc) = newacc;
49365+ ACCESS_ONCE_RW(*acc) = newacc;
49366 }
49367
49368 static void populate_erx_stats(struct be_adapter *adapter,
49369diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
49370index c77fa4a..7fd42fc 100644
49371--- a/drivers/net/ethernet/faraday/ftgmac100.c
49372+++ b/drivers/net/ethernet/faraday/ftgmac100.c
49373@@ -30,6 +30,8 @@
49374 #include <linux/netdevice.h>
49375 #include <linux/phy.h>
49376 #include <linux/platform_device.h>
49377+#include <linux/interrupt.h>
49378+#include <linux/irqreturn.h>
49379 #include <net/ip.h>
49380
49381 #include "ftgmac100.h"
49382diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
49383index 4ff1adc..0ea6bf4 100644
49384--- a/drivers/net/ethernet/faraday/ftmac100.c
49385+++ b/drivers/net/ethernet/faraday/ftmac100.c
49386@@ -31,6 +31,8 @@
49387 #include <linux/module.h>
49388 #include <linux/netdevice.h>
49389 #include <linux/platform_device.h>
49390+#include <linux/interrupt.h>
49391+#include <linux/irqreturn.h>
49392
49393 #include "ftmac100.h"
49394
49395diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
49396index 101f439..59e7ec6 100644
49397--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
49398+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
49399@@ -401,7 +401,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
49400 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
49401
49402 /* Update the base adjustement value. */
49403- ACCESS_ONCE(pf->ptp_base_adj) = incval;
49404+ ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
49405 smp_mb(); /* Force the above update. */
49406 }
49407
49408diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49409index 68f87ec..241dbe3 100644
49410--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49411+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49412@@ -792,7 +792,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
49413 }
49414
49415 /* update the base incval used to calculate frequency adjustment */
49416- ACCESS_ONCE(adapter->base_incval) = incval;
49417+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
49418 smp_mb();
49419
49420 /* need lock to prevent incorrect read while modifying cyclecounter */
49421diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
49422index 2bbd01f..e8baa64 100644
49423--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
49424+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
49425@@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
49426 struct __vxge_hw_fifo *fifo;
49427 struct vxge_hw_fifo_config *config;
49428 u32 txdl_size, txdl_per_memblock;
49429- struct vxge_hw_mempool_cbs fifo_mp_callback;
49430+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
49431+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
49432+ };
49433+
49434 struct __vxge_hw_virtualpath *vpath;
49435
49436 if ((vp == NULL) || (attr == NULL)) {
49437@@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
49438 goto exit;
49439 }
49440
49441- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
49442-
49443 fifo->mempool =
49444 __vxge_hw_mempool_create(vpath->hldev,
49445 fifo->config->memblock_size,
49446diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
49447index 73e6683..464e910 100644
49448--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
49449+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
49450@@ -120,6 +120,10 @@ static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
49451 int data);
49452 static void pch_gbe_set_multi(struct net_device *netdev);
49453
49454+static struct sock_filter ptp_filter[] = {
49455+ PTP_FILTER
49456+};
49457+
49458 static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
49459 {
49460 u8 *data = skb->data;
49461@@ -127,7 +131,7 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
49462 u16 *hi, *id;
49463 u32 lo;
49464
49465- if (ptp_classify_raw(skb) == PTP_CLASS_NONE)
49466+ if (sk_run_filter(skb, ptp_filter) == PTP_CLASS_NONE)
49467 return 0;
49468
49469 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
49470@@ -2631,6 +2635,11 @@ static int pch_gbe_probe(struct pci_dev *pdev,
49471
49472 adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
49473 PCI_DEVFN(12, 4));
49474+ if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
49475+ dev_err(&pdev->dev, "Bad ptp filter\n");
49476+ ret = -EINVAL;
49477+ goto err_free_netdev;
49478+ }
49479
49480 netdev->netdev_ops = &pch_gbe_netdev_ops;
49481 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
49482diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49483index f33559b..c7f50ac 100644
49484--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49485+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49486@@ -2176,7 +2176,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
49487 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
49488 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
49489 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
49490- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
49491+ pax_open_kernel();
49492+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
49493+ pax_close_kernel();
49494 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49495 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
49496 max_tx_rings = QLCNIC_MAX_TX_RINGS;
49497diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49498index be7d7a6..a8983f8 100644
49499--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49500+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49501@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
49502 case QLCNIC_NON_PRIV_FUNC:
49503 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
49504 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49505- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
49506+ pax_open_kernel();
49507+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
49508+ pax_close_kernel();
49509 break;
49510 case QLCNIC_PRIV_FUNC:
49511 ahw->op_mode = QLCNIC_PRIV_FUNC;
49512 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
49513- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
49514+ pax_open_kernel();
49515+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
49516+ pax_close_kernel();
49517 break;
49518 case QLCNIC_MGMT_FUNC:
49519 ahw->op_mode = QLCNIC_MGMT_FUNC;
49520 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49521- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
49522+ pax_open_kernel();
49523+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
49524+ pax_close_kernel();
49525 break;
49526 default:
49527 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
49528diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49529index e46fc39..abe135b 100644
49530--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49531+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49532@@ -1228,7 +1228,7 @@ flash_temp:
49533 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
49534 {
49535 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
49536- static const struct qlcnic_dump_operations *fw_dump_ops;
49537+ const struct qlcnic_dump_operations *fw_dump_ops;
49538 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
49539 u32 entry_offset, dump, no_entries, buf_offset = 0;
49540 int i, k, ops_cnt, ops_index, dump_size = 0;
49541diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
49542index 61623e9..ac97c27 100644
49543--- a/drivers/net/ethernet/realtek/r8169.c
49544+++ b/drivers/net/ethernet/realtek/r8169.c
49545@@ -759,22 +759,22 @@ struct rtl8169_private {
49546 struct mdio_ops {
49547 void (*write)(struct rtl8169_private *, int, int);
49548 int (*read)(struct rtl8169_private *, int);
49549- } mdio_ops;
49550+ } __no_const mdio_ops;
49551
49552 struct pll_power_ops {
49553 void (*down)(struct rtl8169_private *);
49554 void (*up)(struct rtl8169_private *);
49555- } pll_power_ops;
49556+ } __no_const pll_power_ops;
49557
49558 struct jumbo_ops {
49559 void (*enable)(struct rtl8169_private *);
49560 void (*disable)(struct rtl8169_private *);
49561- } jumbo_ops;
49562+ } __no_const jumbo_ops;
49563
49564 struct csi_ops {
49565 void (*write)(struct rtl8169_private *, int, int);
49566 u32 (*read)(struct rtl8169_private *, int);
49567- } csi_ops;
49568+ } __no_const csi_ops;
49569
49570 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
49571 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
49572diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
49573index 6b861e3..204ac86 100644
49574--- a/drivers/net/ethernet/sfc/ptp.c
49575+++ b/drivers/net/ethernet/sfc/ptp.c
49576@@ -822,7 +822,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
49577 ptp->start.dma_addr);
49578
49579 /* Clear flag that signals MC ready */
49580- ACCESS_ONCE(*start) = 0;
49581+ ACCESS_ONCE_RW(*start) = 0;
49582 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
49583 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
49584 EFX_BUG_ON_PARANOID(rc);
49585diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49586index 50617c5..b13724c 100644
49587--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49588+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49589@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
49590
49591 writel(value, ioaddr + MMC_CNTRL);
49592
49593- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
49594- MMC_CNTRL, value);
49595+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
49596+// MMC_CNTRL, value);
49597 }
49598
49599 /* To mask all all interrupts.*/
49600diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
49601index 6b56f85..50e285f 100644
49602--- a/drivers/net/ethernet/ti/cpts.c
49603+++ b/drivers/net/ethernet/ti/cpts.c
49604@@ -33,6 +33,10 @@
49605
49606 #ifdef CONFIG_TI_CPTS
49607
49608+static struct sock_filter ptp_filter[] = {
49609+ PTP_FILTER
49610+};
49611+
49612 #define cpts_read32(c, r) __raw_readl(&c->reg->r)
49613 #define cpts_write32(c, v, r) __raw_writel(v, &c->reg->r)
49614
49615@@ -296,7 +300,7 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
49616 u64 ns = 0;
49617 struct cpts_event *event;
49618 struct list_head *this, *next;
49619- unsigned int class = ptp_classify_raw(skb);
49620+ unsigned int class = sk_run_filter(skb, ptp_filter);
49621 unsigned long flags;
49622 u16 seqid;
49623 u8 mtype;
49624@@ -367,6 +371,10 @@ int cpts_register(struct device *dev, struct cpts *cpts,
49625 int err, i;
49626 unsigned long flags;
49627
49628+ if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
49629+ pr_err("cpts: bad ptp filter\n");
49630+ return -EINVAL;
49631+ }
49632 cpts->info = cpts_info;
49633 cpts->clock = ptp_clock_register(&cpts->info, dev);
49634 if (IS_ERR(cpts->clock)) {
49635diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig
49636index b81bc9f..3f43101 100644
49637--- a/drivers/net/ethernet/xscale/Kconfig
49638+++ b/drivers/net/ethernet/xscale/Kconfig
49639@@ -23,7 +23,6 @@ config IXP4XX_ETH
49640 tristate "Intel IXP4xx Ethernet support"
49641 depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
49642 select PHYLIB
49643- select NET_PTP_CLASSIFY
49644 ---help---
49645 Say Y here if you want to use built-in Ethernet ports
49646 on IXP4xx processor.
49647diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
49648index f7e0f0f..25283f1 100644
49649--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
49650+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
49651@@ -256,6 +256,10 @@ static int ports_open;
49652 static struct port *npe_port_tab[MAX_NPES];
49653 static struct dma_pool *dma_pool;
49654
49655+static struct sock_filter ptp_filter[] = {
49656+ PTP_FILTER
49657+};
49658+
49659 static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
49660 {
49661 u8 *data = skb->data;
49662@@ -263,7 +267,7 @@ static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
49663 u16 *hi, *id;
49664 u32 lo;
49665
49666- if (ptp_classify_raw(skb) != PTP_CLASS_V1_IPV4)
49667+ if (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)
49668 return 0;
49669
49670 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
49671@@ -1409,6 +1413,11 @@ static int eth_init_one(struct platform_device *pdev)
49672 char phy_id[MII_BUS_ID_SIZE + 3];
49673 int err;
49674
49675+ if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
49676+ pr_err("ixp4xx_eth: bad ptp filter\n");
49677+ return -EINVAL;
49678+ }
49679+
49680 if (!(dev = alloc_etherdev(sizeof(struct port))))
49681 return -ENOMEM;
49682
49683diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
49684index 6cc37c1..fdd9d77 100644
49685--- a/drivers/net/hyperv/hyperv_net.h
49686+++ b/drivers/net/hyperv/hyperv_net.h
49687@@ -170,7 +170,7 @@ struct rndis_device {
49688
49689 enum rndis_device_state state;
49690 bool link_state;
49691- atomic_t new_req_id;
49692+ atomic_unchecked_t new_req_id;
49693
49694 spinlock_t request_lock;
49695 struct list_head req_list;
49696diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
49697index 99c527a..6a2ce38 100644
49698--- a/drivers/net/hyperv/rndis_filter.c
49699+++ b/drivers/net/hyperv/rndis_filter.c
49700@@ -102,7 +102,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
49701 * template
49702 */
49703 set = &rndis_msg->msg.set_req;
49704- set->req_id = atomic_inc_return(&dev->new_req_id);
49705+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
49706
49707 /* Add to the request list */
49708 spin_lock_irqsave(&dev->request_lock, flags);
49709@@ -930,7 +930,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
49710
49711 /* Setup the rndis set */
49712 halt = &request->request_msg.msg.halt_req;
49713- halt->req_id = atomic_inc_return(&dev->new_req_id);
49714+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
49715
49716 /* Ignore return since this msg is optional. */
49717 rndis_filter_send_request(dev, request);
49718diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
49719index 78f18be..1d19c62 100644
49720--- a/drivers/net/ieee802154/fakehard.c
49721+++ b/drivers/net/ieee802154/fakehard.c
49722@@ -364,7 +364,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
49723 phy->transmit_power = 0xbf;
49724
49725 dev->netdev_ops = &fake_ops;
49726- dev->ml_priv = &fake_mlme;
49727+ dev->ml_priv = (void *)&fake_mlme;
49728
49729 priv = netdev_priv(dev);
49730 priv->phy = phy;
49731diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
49732index ef8a5c2..76877d6 100644
49733--- a/drivers/net/macvlan.c
49734+++ b/drivers/net/macvlan.c
49735@@ -264,7 +264,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
49736 free_nskb:
49737 kfree_skb(nskb);
49738 err:
49739- atomic_long_inc(&skb->dev->rx_dropped);
49740+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
49741 }
49742
49743 /* called under rcu_read_lock() from netif_receive_skb */
49744@@ -1134,13 +1134,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
49745 int macvlan_link_register(struct rtnl_link_ops *ops)
49746 {
49747 /* common fields */
49748- ops->priv_size = sizeof(struct macvlan_dev);
49749- ops->validate = macvlan_validate;
49750- ops->maxtype = IFLA_MACVLAN_MAX;
49751- ops->policy = macvlan_policy;
49752- ops->changelink = macvlan_changelink;
49753- ops->get_size = macvlan_get_size;
49754- ops->fill_info = macvlan_fill_info;
49755+ pax_open_kernel();
49756+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
49757+ *(void **)&ops->validate = macvlan_validate;
49758+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
49759+ *(const void **)&ops->policy = macvlan_policy;
49760+ *(void **)&ops->changelink = macvlan_changelink;
49761+ *(void **)&ops->get_size = macvlan_get_size;
49762+ *(void **)&ops->fill_info = macvlan_fill_info;
49763+ pax_close_kernel();
49764
49765 return rtnl_link_register(ops);
49766 };
49767@@ -1220,7 +1222,7 @@ static int macvlan_device_event(struct notifier_block *unused,
49768 return NOTIFY_DONE;
49769 }
49770
49771-static struct notifier_block macvlan_notifier_block __read_mostly = {
49772+static struct notifier_block macvlan_notifier_block = {
49773 .notifier_call = macvlan_device_event,
49774 };
49775
49776diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
49777index 3381c4f..dea5fd5 100644
49778--- a/drivers/net/macvtap.c
49779+++ b/drivers/net/macvtap.c
49780@@ -1020,7 +1020,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
49781 }
49782
49783 ret = 0;
49784- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
49785+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
49786 put_user(q->flags, &ifr->ifr_flags))
49787 ret = -EFAULT;
49788 macvtap_put_vlan(vlan);
49789@@ -1190,7 +1190,7 @@ static int macvtap_device_event(struct notifier_block *unused,
49790 return NOTIFY_DONE;
49791 }
49792
49793-static struct notifier_block macvtap_notifier_block __read_mostly = {
49794+static struct notifier_block macvtap_notifier_block = {
49795 .notifier_call = macvtap_device_event,
49796 };
49797
49798diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
49799index 9408157..d53b924 100644
49800--- a/drivers/net/phy/dp83640.c
49801+++ b/drivers/net/phy/dp83640.c
49802@@ -27,7 +27,6 @@
49803 #include <linux/module.h>
49804 #include <linux/net_tstamp.h>
49805 #include <linux/netdevice.h>
49806-#include <linux/if_vlan.h>
49807 #include <linux/phy.h>
49808 #include <linux/ptp_classify.h>
49809 #include <linux/ptp_clock_kernel.h>
49810diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
49811index d5b77ef..72ff14b 100644
49812--- a/drivers/net/ppp/ppp_generic.c
49813+++ b/drivers/net/ppp/ppp_generic.c
49814@@ -143,8 +143,9 @@ struct ppp {
49815 struct sk_buff_head mrq; /* MP: receive reconstruction queue */
49816 #endif /* CONFIG_PPP_MULTILINK */
49817 #ifdef CONFIG_PPP_FILTER
49818- struct sk_filter *pass_filter; /* filter for packets to pass */
49819- struct sk_filter *active_filter;/* filter for pkts to reset idle */
49820+ struct sock_filter *pass_filter; /* filter for packets to pass */
49821+ struct sock_filter *active_filter;/* filter for pkts to reset idle */
49822+ unsigned pass_len, active_len;
49823 #endif /* CONFIG_PPP_FILTER */
49824 struct net *ppp_net; /* the net we belong to */
49825 struct ppp_link_stats stats64; /* 64 bit network stats */
49826@@ -539,7 +540,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
49827 {
49828 struct sock_fprog uprog;
49829 struct sock_filter *code = NULL;
49830- int len;
49831+ int len, err;
49832
49833 if (copy_from_user(&uprog, arg, sizeof(uprog)))
49834 return -EFAULT;
49835@@ -554,6 +555,12 @@ static int get_filter(void __user *arg, struct sock_filter **p)
49836 if (IS_ERR(code))
49837 return PTR_ERR(code);
49838
49839+ err = sk_chk_filter(code, uprog.len);
49840+ if (err) {
49841+ kfree(code);
49842+ return err;
49843+ }
49844+
49845 *p = code;
49846 return uprog.len;
49847 }
49848@@ -748,52 +755,28 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
49849 case PPPIOCSPASS:
49850 {
49851 struct sock_filter *code;
49852-
49853 err = get_filter(argp, &code);
49854 if (err >= 0) {
49855- struct sock_fprog_kern fprog = {
49856- .len = err,
49857- .filter = code,
49858- };
49859-
49860 ppp_lock(ppp);
49861- if (ppp->pass_filter) {
49862- sk_unattached_filter_destroy(ppp->pass_filter);
49863- ppp->pass_filter = NULL;
49864- }
49865- if (fprog.filter != NULL)
49866- err = sk_unattached_filter_create(&ppp->pass_filter,
49867- &fprog);
49868- else
49869- err = 0;
49870- kfree(code);
49871+ kfree(ppp->pass_filter);
49872+ ppp->pass_filter = code;
49873+ ppp->pass_len = err;
49874 ppp_unlock(ppp);
49875+ err = 0;
49876 }
49877 break;
49878 }
49879 case PPPIOCSACTIVE:
49880 {
49881 struct sock_filter *code;
49882-
49883 err = get_filter(argp, &code);
49884 if (err >= 0) {
49885- struct sock_fprog_kern fprog = {
49886- .len = err,
49887- .filter = code,
49888- };
49889-
49890 ppp_lock(ppp);
49891- if (ppp->active_filter) {
49892- sk_unattached_filter_destroy(ppp->active_filter);
49893- ppp->active_filter = NULL;
49894- }
49895- if (fprog.filter != NULL)
49896- err = sk_unattached_filter_create(&ppp->active_filter,
49897- &fprog);
49898- else
49899- err = 0;
49900- kfree(code);
49901+ kfree(ppp->active_filter);
49902+ ppp->active_filter = code;
49903+ ppp->active_len = err;
49904 ppp_unlock(ppp);
49905+ err = 0;
49906 }
49907 break;
49908 }
49909@@ -1201,7 +1184,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
49910 a four-byte PPP header on each packet */
49911 *skb_push(skb, 2) = 1;
49912 if (ppp->pass_filter &&
49913- SK_RUN_FILTER(ppp->pass_filter, skb) == 0) {
49914+ sk_run_filter(skb, ppp->pass_filter) == 0) {
49915 if (ppp->debug & 1)
49916 netdev_printk(KERN_DEBUG, ppp->dev,
49917 "PPP: outbound frame "
49918@@ -1211,7 +1194,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
49919 }
49920 /* if this packet passes the active filter, record the time */
49921 if (!(ppp->active_filter &&
49922- SK_RUN_FILTER(ppp->active_filter, skb) == 0))
49923+ sk_run_filter(skb, ppp->active_filter) == 0))
49924 ppp->last_xmit = jiffies;
49925 skb_pull(skb, 2);
49926 #else
49927@@ -1835,7 +1818,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
49928
49929 *skb_push(skb, 2) = 0;
49930 if (ppp->pass_filter &&
49931- SK_RUN_FILTER(ppp->pass_filter, skb) == 0) {
49932+ sk_run_filter(skb, ppp->pass_filter) == 0) {
49933 if (ppp->debug & 1)
49934 netdev_printk(KERN_DEBUG, ppp->dev,
49935 "PPP: inbound frame "
49936@@ -1844,7 +1827,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
49937 return;
49938 }
49939 if (!(ppp->active_filter &&
49940- SK_RUN_FILTER(ppp->active_filter, skb) == 0))
49941+ sk_run_filter(skb, ppp->active_filter) == 0))
49942 ppp->last_recv = jiffies;
49943 __skb_pull(skb, 2);
49944 } else
49945@@ -2689,10 +2672,6 @@ ppp_create_interface(struct net *net, int unit, int *retp)
49946 ppp->minseq = -1;
49947 skb_queue_head_init(&ppp->mrq);
49948 #endif /* CONFIG_PPP_MULTILINK */
49949-#ifdef CONFIG_PPP_FILTER
49950- ppp->pass_filter = NULL;
49951- ppp->active_filter = NULL;
49952-#endif /* CONFIG_PPP_FILTER */
49953
49954 /*
49955 * drum roll: don't forget to set
49956@@ -2823,15 +2802,10 @@ static void ppp_destroy_interface(struct ppp *ppp)
49957 skb_queue_purge(&ppp->mrq);
49958 #endif /* CONFIG_PPP_MULTILINK */
49959 #ifdef CONFIG_PPP_FILTER
49960- if (ppp->pass_filter) {
49961- sk_unattached_filter_destroy(ppp->pass_filter);
49962- ppp->pass_filter = NULL;
49963- }
49964-
49965- if (ppp->active_filter) {
49966- sk_unattached_filter_destroy(ppp->active_filter);
49967- ppp->active_filter = NULL;
49968- }
49969+ kfree(ppp->pass_filter);
49970+ ppp->pass_filter = NULL;
49971+ kfree(ppp->active_filter);
49972+ ppp->active_filter = NULL;
49973 #endif /* CONFIG_PPP_FILTER */
49974
49975 kfree_skb(ppp->xmit_pending);
49976diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
49977index 1252d9c..80e660b 100644
49978--- a/drivers/net/slip/slhc.c
49979+++ b/drivers/net/slip/slhc.c
49980@@ -488,7 +488,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
49981 register struct tcphdr *thp;
49982 register struct iphdr *ip;
49983 register struct cstate *cs;
49984- int len, hdrlen;
49985+ long len, hdrlen;
49986 unsigned char *cp = icp;
49987
49988 /* We've got a compressed packet; read the change byte */
49989diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
49990index b4958c7..277cb96 100644
49991--- a/drivers/net/team/team.c
49992+++ b/drivers/net/team/team.c
49993@@ -2868,7 +2868,7 @@ static int team_device_event(struct notifier_block *unused,
49994 return NOTIFY_DONE;
49995 }
49996
49997-static struct notifier_block team_notifier_block __read_mostly = {
49998+static struct notifier_block team_notifier_block = {
49999 .notifier_call = team_device_event,
50000 };
50001
50002diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
50003index a58dfeb..dbde341 100644
50004--- a/drivers/net/team/team_mode_loadbalance.c
50005+++ b/drivers/net/team/team_mode_loadbalance.c
50006@@ -49,7 +49,7 @@ struct lb_port_mapping {
50007 struct lb_priv_ex {
50008 struct team *team;
50009 struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE];
50010- struct sock_fprog_kern *orig_fprog;
50011+ struct sock_fprog *orig_fprog;
50012 struct {
50013 unsigned int refresh_interval; /* in tenths of second */
50014 struct delayed_work refresh_dw;
50015@@ -241,15 +241,15 @@ static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
50016 return 0;
50017 }
50018
50019-static int __fprog_create(struct sock_fprog_kern **pfprog, u32 data_len,
50020+static int __fprog_create(struct sock_fprog **pfprog, u32 data_len,
50021 const void *data)
50022 {
50023- struct sock_fprog_kern *fprog;
50024+ struct sock_fprog *fprog;
50025 struct sock_filter *filter = (struct sock_filter *) data;
50026
50027 if (data_len % sizeof(struct sock_filter))
50028 return -EINVAL;
50029- fprog = kmalloc(sizeof(*fprog), GFP_KERNEL);
50030+ fprog = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL);
50031 if (!fprog)
50032 return -ENOMEM;
50033 fprog->filter = kmemdup(filter, data_len, GFP_KERNEL);
50034@@ -262,7 +262,7 @@ static int __fprog_create(struct sock_fprog_kern **pfprog, u32 data_len,
50035 return 0;
50036 }
50037
50038-static void __fprog_destroy(struct sock_fprog_kern *fprog)
50039+static void __fprog_destroy(struct sock_fprog *fprog)
50040 {
50041 kfree(fprog->filter);
50042 kfree(fprog);
50043@@ -273,7 +273,7 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
50044 struct lb_priv *lb_priv = get_lb_priv(team);
50045 struct sk_filter *fp = NULL;
50046 struct sk_filter *orig_fp;
50047- struct sock_fprog_kern *fprog = NULL;
50048+ struct sock_fprog *fprog = NULL;
50049 int err;
50050
50051 if (ctx->data.bin_val.len) {
50052diff --git a/drivers/net/tun.c b/drivers/net/tun.c
50053index 98bad1f..f197d7a 100644
50054--- a/drivers/net/tun.c
50055+++ b/drivers/net/tun.c
50056@@ -1854,7 +1854,7 @@ unlock:
50057 }
50058
50059 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
50060- unsigned long arg, int ifreq_len)
50061+ unsigned long arg, size_t ifreq_len)
50062 {
50063 struct tun_file *tfile = file->private_data;
50064 struct tun_struct *tun;
50065@@ -1867,6 +1867,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
50066 unsigned int ifindex;
50067 int ret;
50068
50069+ if (ifreq_len > sizeof ifr)
50070+ return -EFAULT;
50071+
50072 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
50073 if (copy_from_user(&ifr, argp, ifreq_len))
50074 return -EFAULT;
50075diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
50076index a4272ed..cdd69ff 100644
50077--- a/drivers/net/usb/hso.c
50078+++ b/drivers/net/usb/hso.c
50079@@ -71,7 +71,7 @@
50080 #include <asm/byteorder.h>
50081 #include <linux/serial_core.h>
50082 #include <linux/serial.h>
50083-
50084+#include <asm/local.h>
50085
50086 #define MOD_AUTHOR "Option Wireless"
50087 #define MOD_DESCRIPTION "USB High Speed Option driver"
50088@@ -1177,7 +1177,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
50089 struct urb *urb;
50090
50091 urb = serial->rx_urb[0];
50092- if (serial->port.count > 0) {
50093+ if (atomic_read(&serial->port.count) > 0) {
50094 count = put_rxbuf_data(urb, serial);
50095 if (count == -1)
50096 return;
50097@@ -1215,7 +1215,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
50098 DUMP1(urb->transfer_buffer, urb->actual_length);
50099
50100 /* Anyone listening? */
50101- if (serial->port.count == 0)
50102+ if (atomic_read(&serial->port.count) == 0)
50103 return;
50104
50105 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
50106@@ -1277,8 +1277,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
50107 tty_port_tty_set(&serial->port, tty);
50108
50109 /* check for port already opened, if not set the termios */
50110- serial->port.count++;
50111- if (serial->port.count == 1) {
50112+ if (atomic_inc_return(&serial->port.count) == 1) {
50113 serial->rx_state = RX_IDLE;
50114 /* Force default termio settings */
50115 _hso_serial_set_termios(tty, NULL);
50116@@ -1288,7 +1287,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
50117 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
50118 if (result) {
50119 hso_stop_serial_device(serial->parent);
50120- serial->port.count--;
50121+ atomic_dec(&serial->port.count);
50122 kref_put(&serial->parent->ref, hso_serial_ref_free);
50123 }
50124 } else {
50125@@ -1325,10 +1324,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
50126
50127 /* reset the rts and dtr */
50128 /* do the actual close */
50129- serial->port.count--;
50130+ atomic_dec(&serial->port.count);
50131
50132- if (serial->port.count <= 0) {
50133- serial->port.count = 0;
50134+ if (atomic_read(&serial->port.count) <= 0) {
50135+ atomic_set(&serial->port.count, 0);
50136 tty_port_tty_set(&serial->port, NULL);
50137 if (!usb_gone)
50138 hso_stop_serial_device(serial->parent);
50139@@ -1403,7 +1402,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
50140
50141 /* the actual setup */
50142 spin_lock_irqsave(&serial->serial_lock, flags);
50143- if (serial->port.count)
50144+ if (atomic_read(&serial->port.count))
50145 _hso_serial_set_termios(tty, old);
50146 else
50147 tty->termios = *old;
50148@@ -1872,7 +1871,7 @@ static void intr_callback(struct urb *urb)
50149 D1("Pending read interrupt on port %d\n", i);
50150 spin_lock(&serial->serial_lock);
50151 if (serial->rx_state == RX_IDLE &&
50152- serial->port.count > 0) {
50153+ atomic_read(&serial->port.count) > 0) {
50154 /* Setup and send a ctrl req read on
50155 * port i */
50156 if (!serial->rx_urb_filled[0]) {
50157@@ -3045,7 +3044,7 @@ static int hso_resume(struct usb_interface *iface)
50158 /* Start all serial ports */
50159 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
50160 if (serial_table[i] && (serial_table[i]->interface == iface)) {
50161- if (dev2ser(serial_table[i])->port.count) {
50162+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
50163 result =
50164 hso_start_serial_device(serial_table[i], GFP_NOIO);
50165 hso_kick_transmit(dev2ser(serial_table[i]));
50166diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
50167index 3eab74c..fb6097c 100644
50168--- a/drivers/net/usb/r8152.c
50169+++ b/drivers/net/usb/r8152.c
50170@@ -567,7 +567,7 @@ struct r8152 {
50171 void (*up)(struct r8152 *);
50172 void (*down)(struct r8152 *);
50173 void (*unload)(struct r8152 *);
50174- } rtl_ops;
50175+ } __no_const rtl_ops;
50176
50177 int intr_interval;
50178 u32 saved_wolopts;
50179diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
50180index a2515887..6d13233 100644
50181--- a/drivers/net/usb/sierra_net.c
50182+++ b/drivers/net/usb/sierra_net.c
50183@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
50184 /* atomic counter partially included in MAC address to make sure 2 devices
50185 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
50186 */
50187-static atomic_t iface_counter = ATOMIC_INIT(0);
50188+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
50189
50190 /*
50191 * SYNC Timer Delay definition used to set the expiry time
50192@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
50193 dev->net->netdev_ops = &sierra_net_device_ops;
50194
50195 /* change MAC addr to include, ifacenum, and to be unique */
50196- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
50197+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
50198 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
50199
50200 /* we will have to manufacture ethernet headers, prepare template */
50201diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
50202index 7d9f84a..7f690da 100644
50203--- a/drivers/net/virtio_net.c
50204+++ b/drivers/net/virtio_net.c
50205@@ -47,7 +47,7 @@ module_param(gso, bool, 0444);
50206 #define RECEIVE_AVG_WEIGHT 64
50207
50208 /* Minimum alignment for mergeable packet buffers. */
50209-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
50210+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
50211
50212 #define VIRTNET_DRIVER_VERSION "1.0.0"
50213
50214diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
50215index 9f79192..838cf95 100644
50216--- a/drivers/net/vxlan.c
50217+++ b/drivers/net/vxlan.c
50218@@ -2838,7 +2838,7 @@ nla_put_failure:
50219 return -EMSGSIZE;
50220 }
50221
50222-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
50223+static struct rtnl_link_ops vxlan_link_ops = {
50224 .kind = "vxlan",
50225 .maxtype = IFLA_VXLAN_MAX,
50226 .policy = vxlan_policy,
50227@@ -2885,7 +2885,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
50228 return NOTIFY_DONE;
50229 }
50230
50231-static struct notifier_block vxlan_notifier_block __read_mostly = {
50232+static struct notifier_block vxlan_notifier_block = {
50233 .notifier_call = vxlan_lowerdev_event,
50234 };
50235
50236diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
50237index 5920c99..ff2e4a5 100644
50238--- a/drivers/net/wan/lmc/lmc_media.c
50239+++ b/drivers/net/wan/lmc/lmc_media.c
50240@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
50241 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
50242
50243 lmc_media_t lmc_ds3_media = {
50244- lmc_ds3_init, /* special media init stuff */
50245- lmc_ds3_default, /* reset to default state */
50246- lmc_ds3_set_status, /* reset status to state provided */
50247- lmc_dummy_set_1, /* set clock source */
50248- lmc_dummy_set2_1, /* set line speed */
50249- lmc_ds3_set_100ft, /* set cable length */
50250- lmc_ds3_set_scram, /* set scrambler */
50251- lmc_ds3_get_link_status, /* get link status */
50252- lmc_dummy_set_1, /* set link status */
50253- lmc_ds3_set_crc_length, /* set CRC length */
50254- lmc_dummy_set_1, /* set T1 or E1 circuit type */
50255- lmc_ds3_watchdog
50256+ .init = lmc_ds3_init, /* special media init stuff */
50257+ .defaults = lmc_ds3_default, /* reset to default state */
50258+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
50259+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
50260+ .set_speed = lmc_dummy_set2_1, /* set line speed */
50261+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
50262+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
50263+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
50264+ .set_link_status = lmc_dummy_set_1, /* set link status */
50265+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
50266+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
50267+ .watchdog = lmc_ds3_watchdog
50268 };
50269
50270 lmc_media_t lmc_hssi_media = {
50271- lmc_hssi_init, /* special media init stuff */
50272- lmc_hssi_default, /* reset to default state */
50273- lmc_hssi_set_status, /* reset status to state provided */
50274- lmc_hssi_set_clock, /* set clock source */
50275- lmc_dummy_set2_1, /* set line speed */
50276- lmc_dummy_set_1, /* set cable length */
50277- lmc_dummy_set_1, /* set scrambler */
50278- lmc_hssi_get_link_status, /* get link status */
50279- lmc_hssi_set_link_status, /* set link status */
50280- lmc_hssi_set_crc_length, /* set CRC length */
50281- lmc_dummy_set_1, /* set T1 or E1 circuit type */
50282- lmc_hssi_watchdog
50283+ .init = lmc_hssi_init, /* special media init stuff */
50284+ .defaults = lmc_hssi_default, /* reset to default state */
50285+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
50286+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
50287+ .set_speed = lmc_dummy_set2_1, /* set line speed */
50288+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
50289+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
50290+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
50291+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
50292+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
50293+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
50294+ .watchdog = lmc_hssi_watchdog
50295 };
50296
50297-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
50298- lmc_ssi_default, /* reset to default state */
50299- lmc_ssi_set_status, /* reset status to state provided */
50300- lmc_ssi_set_clock, /* set clock source */
50301- lmc_ssi_set_speed, /* set line speed */
50302- lmc_dummy_set_1, /* set cable length */
50303- lmc_dummy_set_1, /* set scrambler */
50304- lmc_ssi_get_link_status, /* get link status */
50305- lmc_ssi_set_link_status, /* set link status */
50306- lmc_ssi_set_crc_length, /* set CRC length */
50307- lmc_dummy_set_1, /* set T1 or E1 circuit type */
50308- lmc_ssi_watchdog
50309+lmc_media_t lmc_ssi_media = {
50310+ .init = lmc_ssi_init, /* special media init stuff */
50311+ .defaults = lmc_ssi_default, /* reset to default state */
50312+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
50313+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
50314+ .set_speed = lmc_ssi_set_speed, /* set line speed */
50315+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
50316+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
50317+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
50318+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
50319+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
50320+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
50321+ .watchdog = lmc_ssi_watchdog
50322 };
50323
50324 lmc_media_t lmc_t1_media = {
50325- lmc_t1_init, /* special media init stuff */
50326- lmc_t1_default, /* reset to default state */
50327- lmc_t1_set_status, /* reset status to state provided */
50328- lmc_t1_set_clock, /* set clock source */
50329- lmc_dummy_set2_1, /* set line speed */
50330- lmc_dummy_set_1, /* set cable length */
50331- lmc_dummy_set_1, /* set scrambler */
50332- lmc_t1_get_link_status, /* get link status */
50333- lmc_dummy_set_1, /* set link status */
50334- lmc_t1_set_crc_length, /* set CRC length */
50335- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
50336- lmc_t1_watchdog
50337+ .init = lmc_t1_init, /* special media init stuff */
50338+ .defaults = lmc_t1_default, /* reset to default state */
50339+ .set_status = lmc_t1_set_status, /* reset status to state provided */
50340+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
50341+ .set_speed = lmc_dummy_set2_1, /* set line speed */
50342+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
50343+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
50344+ .get_link_status = lmc_t1_get_link_status, /* get link status */
50345+ .set_link_status = lmc_dummy_set_1, /* set link status */
50346+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
50347+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
50348+ .watchdog = lmc_t1_watchdog
50349 };
50350
50351 static void
50352diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
50353index feacc3b..5bac0de 100644
50354--- a/drivers/net/wan/z85230.c
50355+++ b/drivers/net/wan/z85230.c
50356@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
50357
50358 struct z8530_irqhandler z8530_sync =
50359 {
50360- z8530_rx,
50361- z8530_tx,
50362- z8530_status
50363+ .rx = z8530_rx,
50364+ .tx = z8530_tx,
50365+ .status = z8530_status
50366 };
50367
50368 EXPORT_SYMBOL(z8530_sync);
50369@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
50370 }
50371
50372 static struct z8530_irqhandler z8530_dma_sync = {
50373- z8530_dma_rx,
50374- z8530_dma_tx,
50375- z8530_dma_status
50376+ .rx = z8530_dma_rx,
50377+ .tx = z8530_dma_tx,
50378+ .status = z8530_dma_status
50379 };
50380
50381 static struct z8530_irqhandler z8530_txdma_sync = {
50382- z8530_rx,
50383- z8530_dma_tx,
50384- z8530_dma_status
50385+ .rx = z8530_rx,
50386+ .tx = z8530_dma_tx,
50387+ .status = z8530_dma_status
50388 };
50389
50390 /**
50391@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
50392
50393 struct z8530_irqhandler z8530_nop=
50394 {
50395- z8530_rx_clear,
50396- z8530_tx_clear,
50397- z8530_status_clear
50398+ .rx = z8530_rx_clear,
50399+ .tx = z8530_tx_clear,
50400+ .status = z8530_status_clear
50401 };
50402
50403
50404diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
50405index 0b60295..b8bfa5b 100644
50406--- a/drivers/net/wimax/i2400m/rx.c
50407+++ b/drivers/net/wimax/i2400m/rx.c
50408@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
50409 if (i2400m->rx_roq == NULL)
50410 goto error_roq_alloc;
50411
50412- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
50413+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
50414 GFP_KERNEL);
50415 if (rd == NULL) {
50416 result = -ENOMEM;
50417diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
50418index 64747d4..17c4cf3 100644
50419--- a/drivers/net/wireless/airo.c
50420+++ b/drivers/net/wireless/airo.c
50421@@ -7846,7 +7846,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
50422 struct airo_info *ai = dev->ml_priv;
50423 int ridcode;
50424 int enabled;
50425- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
50426+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
50427 unsigned char *iobuf;
50428
50429 /* Only super-user can write RIDs */
50430diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
50431index d48776e..373d049 100644
50432--- a/drivers/net/wireless/at76c50x-usb.c
50433+++ b/drivers/net/wireless/at76c50x-usb.c
50434@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
50435 }
50436
50437 /* Convert timeout from the DFU status to jiffies */
50438-static inline unsigned long at76_get_timeout(struct dfu_status *s)
50439+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
50440 {
50441 return msecs_to_jiffies((s->poll_timeout[2] << 16)
50442 | (s->poll_timeout[1] << 8)
50443diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
50444index e493db4..2c1853a 100644
50445--- a/drivers/net/wireless/ath/ath10k/htc.c
50446+++ b/drivers/net/wireless/ath/ath10k/htc.c
50447@@ -840,7 +840,10 @@ void ath10k_htc_stop(struct ath10k_htc *htc)
50448 /* registered target arrival callback from the HIF layer */
50449 int ath10k_htc_init(struct ath10k *ar)
50450 {
50451- struct ath10k_hif_cb htc_callbacks;
50452+ static struct ath10k_hif_cb htc_callbacks = {
50453+ .rx_completion = ath10k_htc_rx_completion_handler,
50454+ .tx_completion = ath10k_htc_tx_completion_handler,
50455+ };
50456 struct ath10k_htc_ep *ep = NULL;
50457 struct ath10k_htc *htc = &ar->htc;
50458
50459@@ -850,8 +853,6 @@ int ath10k_htc_init(struct ath10k *ar)
50460 ath10k_htc_reset_endpoint_states(htc);
50461
50462 /* setup HIF layer callbacks */
50463- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
50464- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
50465 htc->ar = ar;
50466
50467 /* Get HIF default pipe for HTC message exchange */
50468diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
50469index 4716d33..a688310 100644
50470--- a/drivers/net/wireless/ath/ath10k/htc.h
50471+++ b/drivers/net/wireless/ath/ath10k/htc.h
50472@@ -271,13 +271,13 @@ enum ath10k_htc_ep_id {
50473
50474 struct ath10k_htc_ops {
50475 void (*target_send_suspend_complete)(struct ath10k *ar);
50476-};
50477+} __no_const;
50478
50479 struct ath10k_htc_ep_ops {
50480 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
50481 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
50482 void (*ep_tx_credits)(struct ath10k *);
50483-};
50484+} __no_const;
50485
50486 /* service connection information */
50487 struct ath10k_htc_svc_conn_req {
50488diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
50489index 741b38d..b7ae41b 100644
50490--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
50491+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
50492@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50493 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
50494 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
50495
50496- ACCESS_ONCE(ads->ds_link) = i->link;
50497- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
50498+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
50499+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
50500
50501 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
50502 ctl6 = SM(i->keytype, AR_EncrType);
50503@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50504
50505 if ((i->is_first || i->is_last) &&
50506 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
50507- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
50508+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
50509 | set11nTries(i->rates, 1)
50510 | set11nTries(i->rates, 2)
50511 | set11nTries(i->rates, 3)
50512 | (i->dur_update ? AR_DurUpdateEna : 0)
50513 | SM(0, AR_BurstDur);
50514
50515- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
50516+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
50517 | set11nRate(i->rates, 1)
50518 | set11nRate(i->rates, 2)
50519 | set11nRate(i->rates, 3);
50520 } else {
50521- ACCESS_ONCE(ads->ds_ctl2) = 0;
50522- ACCESS_ONCE(ads->ds_ctl3) = 0;
50523+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
50524+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
50525 }
50526
50527 if (!i->is_first) {
50528- ACCESS_ONCE(ads->ds_ctl0) = 0;
50529- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
50530- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
50531+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
50532+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
50533+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
50534 return;
50535 }
50536
50537@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50538 break;
50539 }
50540
50541- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
50542+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
50543 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
50544 | SM(i->txpower, AR_XmitPower)
50545 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
50546@@ -289,19 +289,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50547 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
50548 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
50549
50550- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
50551- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
50552+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
50553+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
50554
50555 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
50556 return;
50557
50558- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
50559+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
50560 | set11nPktDurRTSCTS(i->rates, 1);
50561
50562- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
50563+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
50564 | set11nPktDurRTSCTS(i->rates, 3);
50565
50566- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
50567+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
50568 | set11nRateFlags(i->rates, 1)
50569 | set11nRateFlags(i->rates, 2)
50570 | set11nRateFlags(i->rates, 3)
50571diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50572index 729ffbf..49f50e3 100644
50573--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50574+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50575@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50576 (i->qcu << AR_TxQcuNum_S) | desc_len;
50577
50578 checksum += val;
50579- ACCESS_ONCE(ads->info) = val;
50580+ ACCESS_ONCE_RW(ads->info) = val;
50581
50582 checksum += i->link;
50583- ACCESS_ONCE(ads->link) = i->link;
50584+ ACCESS_ONCE_RW(ads->link) = i->link;
50585
50586 checksum += i->buf_addr[0];
50587- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
50588+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
50589 checksum += i->buf_addr[1];
50590- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
50591+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
50592 checksum += i->buf_addr[2];
50593- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
50594+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
50595 checksum += i->buf_addr[3];
50596- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
50597+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
50598
50599 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
50600- ACCESS_ONCE(ads->ctl3) = val;
50601+ ACCESS_ONCE_RW(ads->ctl3) = val;
50602 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
50603- ACCESS_ONCE(ads->ctl5) = val;
50604+ ACCESS_ONCE_RW(ads->ctl5) = val;
50605 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
50606- ACCESS_ONCE(ads->ctl7) = val;
50607+ ACCESS_ONCE_RW(ads->ctl7) = val;
50608 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
50609- ACCESS_ONCE(ads->ctl9) = val;
50610+ ACCESS_ONCE_RW(ads->ctl9) = val;
50611
50612 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
50613- ACCESS_ONCE(ads->ctl10) = checksum;
50614+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
50615
50616 if (i->is_first || i->is_last) {
50617- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
50618+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
50619 | set11nTries(i->rates, 1)
50620 | set11nTries(i->rates, 2)
50621 | set11nTries(i->rates, 3)
50622 | (i->dur_update ? AR_DurUpdateEna : 0)
50623 | SM(0, AR_BurstDur);
50624
50625- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
50626+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
50627 | set11nRate(i->rates, 1)
50628 | set11nRate(i->rates, 2)
50629 | set11nRate(i->rates, 3);
50630 } else {
50631- ACCESS_ONCE(ads->ctl13) = 0;
50632- ACCESS_ONCE(ads->ctl14) = 0;
50633+ ACCESS_ONCE_RW(ads->ctl13) = 0;
50634+ ACCESS_ONCE_RW(ads->ctl14) = 0;
50635 }
50636
50637 ads->ctl20 = 0;
50638@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50639
50640 ctl17 = SM(i->keytype, AR_EncrType);
50641 if (!i->is_first) {
50642- ACCESS_ONCE(ads->ctl11) = 0;
50643- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
50644- ACCESS_ONCE(ads->ctl15) = 0;
50645- ACCESS_ONCE(ads->ctl16) = 0;
50646- ACCESS_ONCE(ads->ctl17) = ctl17;
50647- ACCESS_ONCE(ads->ctl18) = 0;
50648- ACCESS_ONCE(ads->ctl19) = 0;
50649+ ACCESS_ONCE_RW(ads->ctl11) = 0;
50650+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
50651+ ACCESS_ONCE_RW(ads->ctl15) = 0;
50652+ ACCESS_ONCE_RW(ads->ctl16) = 0;
50653+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
50654+ ACCESS_ONCE_RW(ads->ctl18) = 0;
50655+ ACCESS_ONCE_RW(ads->ctl19) = 0;
50656 return;
50657 }
50658
50659- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
50660+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
50661 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
50662 | SM(i->txpower, AR_XmitPower)
50663 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
50664@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50665 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
50666 ctl12 |= SM(val, AR_PAPRDChainMask);
50667
50668- ACCESS_ONCE(ads->ctl12) = ctl12;
50669- ACCESS_ONCE(ads->ctl17) = ctl17;
50670+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
50671+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
50672
50673- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
50674+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
50675 | set11nPktDurRTSCTS(i->rates, 1);
50676
50677- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
50678+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
50679 | set11nPktDurRTSCTS(i->rates, 3);
50680
50681- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
50682+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
50683 | set11nRateFlags(i->rates, 1)
50684 | set11nRateFlags(i->rates, 2)
50685 | set11nRateFlags(i->rates, 3)
50686 | SM(i->rtscts_rate, AR_RTSCTSRate);
50687
50688- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
50689+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
50690 }
50691
50692 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
50693diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
50694index 0acd4b5..0591c91 100644
50695--- a/drivers/net/wireless/ath/ath9k/hw.h
50696+++ b/drivers/net/wireless/ath/ath9k/hw.h
50697@@ -629,7 +629,7 @@ struct ath_hw_private_ops {
50698
50699 /* ANI */
50700 void (*ani_cache_ini_regs)(struct ath_hw *ah);
50701-};
50702+} __no_const;
50703
50704 /**
50705 * struct ath_spec_scan - parameters for Atheros spectral scan
50706@@ -706,7 +706,7 @@ struct ath_hw_ops {
50707 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
50708 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
50709 #endif
50710-};
50711+} __no_const;
50712
50713 struct ath_nf_limits {
50714 s16 max;
50715diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
50716index 92190da..f3a4c4c 100644
50717--- a/drivers/net/wireless/b43/phy_lp.c
50718+++ b/drivers/net/wireless/b43/phy_lp.c
50719@@ -2514,7 +2514,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
50720 {
50721 struct ssb_bus *bus = dev->dev->sdev->bus;
50722
50723- static const struct b206x_channel *chandata = NULL;
50724+ const struct b206x_channel *chandata = NULL;
50725 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
50726 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
50727 u16 old_comm15, scale;
50728diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
50729index dc1d20c..f7a4f06 100644
50730--- a/drivers/net/wireless/iwlegacy/3945-mac.c
50731+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
50732@@ -3633,7 +3633,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
50733 */
50734 if (il3945_mod_params.disable_hw_scan) {
50735 D_INFO("Disabling hw_scan\n");
50736- il3945_mac_ops.hw_scan = NULL;
50737+ pax_open_kernel();
50738+ *(void **)&il3945_mac_ops.hw_scan = NULL;
50739+ pax_close_kernel();
50740 }
50741
50742 D_INFO("*** LOAD DRIVER ***\n");
50743diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50744index 0ffb6ff..c0b7f0e 100644
50745--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50746+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50747@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
50748 {
50749 struct iwl_priv *priv = file->private_data;
50750 char buf[64];
50751- int buf_size;
50752+ size_t buf_size;
50753 u32 offset, len;
50754
50755 memset(buf, 0, sizeof(buf));
50756@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
50757 struct iwl_priv *priv = file->private_data;
50758
50759 char buf[8];
50760- int buf_size;
50761+ size_t buf_size;
50762 u32 reset_flag;
50763
50764 memset(buf, 0, sizeof(buf));
50765@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
50766 {
50767 struct iwl_priv *priv = file->private_data;
50768 char buf[8];
50769- int buf_size;
50770+ size_t buf_size;
50771 int ht40;
50772
50773 memset(buf, 0, sizeof(buf));
50774@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
50775 {
50776 struct iwl_priv *priv = file->private_data;
50777 char buf[8];
50778- int buf_size;
50779+ size_t buf_size;
50780 int value;
50781
50782 memset(buf, 0, sizeof(buf));
50783@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
50784 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
50785 DEBUGFS_READ_FILE_OPS(current_sleep_command);
50786
50787-static const char *fmt_value = " %-30s %10u\n";
50788-static const char *fmt_hex = " %-30s 0x%02X\n";
50789-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
50790-static const char *fmt_header =
50791+static const char fmt_value[] = " %-30s %10u\n";
50792+static const char fmt_hex[] = " %-30s 0x%02X\n";
50793+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
50794+static const char fmt_header[] =
50795 "%-32s current cumulative delta max\n";
50796
50797 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
50798@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
50799 {
50800 struct iwl_priv *priv = file->private_data;
50801 char buf[8];
50802- int buf_size;
50803+ size_t buf_size;
50804 int clear;
50805
50806 memset(buf, 0, sizeof(buf));
50807@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
50808 {
50809 struct iwl_priv *priv = file->private_data;
50810 char buf[8];
50811- int buf_size;
50812+ size_t buf_size;
50813 int trace;
50814
50815 memset(buf, 0, sizeof(buf));
50816@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
50817 {
50818 struct iwl_priv *priv = file->private_data;
50819 char buf[8];
50820- int buf_size;
50821+ size_t buf_size;
50822 int missed;
50823
50824 memset(buf, 0, sizeof(buf));
50825@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
50826
50827 struct iwl_priv *priv = file->private_data;
50828 char buf[8];
50829- int buf_size;
50830+ size_t buf_size;
50831 int plcp;
50832
50833 memset(buf, 0, sizeof(buf));
50834@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
50835
50836 struct iwl_priv *priv = file->private_data;
50837 char buf[8];
50838- int buf_size;
50839+ size_t buf_size;
50840 int flush;
50841
50842 memset(buf, 0, sizeof(buf));
50843@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
50844
50845 struct iwl_priv *priv = file->private_data;
50846 char buf[8];
50847- int buf_size;
50848+ size_t buf_size;
50849 int rts;
50850
50851 if (!priv->cfg->ht_params)
50852@@ -2204,7 +2204,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
50853 {
50854 struct iwl_priv *priv = file->private_data;
50855 char buf[8];
50856- int buf_size;
50857+ size_t buf_size;
50858
50859 memset(buf, 0, sizeof(buf));
50860 buf_size = min(count, sizeof(buf) - 1);
50861@@ -2238,7 +2238,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
50862 struct iwl_priv *priv = file->private_data;
50863 u32 event_log_flag;
50864 char buf[8];
50865- int buf_size;
50866+ size_t buf_size;
50867
50868 /* check that the interface is up */
50869 if (!iwl_is_ready(priv))
50870@@ -2292,7 +2292,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
50871 struct iwl_priv *priv = file->private_data;
50872 char buf[8];
50873 u32 calib_disabled;
50874- int buf_size;
50875+ size_t buf_size;
50876
50877 memset(buf, 0, sizeof(buf));
50878 buf_size = min(count, sizeof(buf) - 1);
50879diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
50880index 788085b..0bc852a 100644
50881--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
50882+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
50883@@ -1598,7 +1598,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
50884 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
50885
50886 char buf[8];
50887- int buf_size;
50888+ size_t buf_size;
50889 u32 reset_flag;
50890
50891 memset(buf, 0, sizeof(buf));
50892@@ -1619,7 +1619,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
50893 {
50894 struct iwl_trans *trans = file->private_data;
50895 char buf[8];
50896- int buf_size;
50897+ size_t buf_size;
50898 int csr;
50899
50900 memset(buf, 0, sizeof(buf));
50901diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
50902index a312c65..162b13a 100644
50903--- a/drivers/net/wireless/mac80211_hwsim.c
50904+++ b/drivers/net/wireless/mac80211_hwsim.c
50905@@ -2573,20 +2573,20 @@ static int __init init_mac80211_hwsim(void)
50906 if (channels < 1)
50907 return -EINVAL;
50908
50909- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
50910- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
50911- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
50912- mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
50913- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
50914- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
50915- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
50916- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
50917- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
50918- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
50919- mac80211_hwsim_mchan_ops.assign_vif_chanctx =
50920- mac80211_hwsim_assign_vif_chanctx;
50921- mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
50922- mac80211_hwsim_unassign_vif_chanctx;
50923+ pax_open_kernel();
50924+ memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
50925+ *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
50926+ *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
50927+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
50928+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
50929+ *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
50930+ *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
50931+ *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
50932+ *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
50933+ *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
50934+ *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
50935+ *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
50936+ pax_close_kernel();
50937
50938 spin_lock_init(&hwsim_radio_lock);
50939 INIT_LIST_HEAD(&hwsim_radios);
50940diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
50941index d2a9a08..0cb175d 100644
50942--- a/drivers/net/wireless/rndis_wlan.c
50943+++ b/drivers/net/wireless/rndis_wlan.c
50944@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
50945
50946 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
50947
50948- if (rts_threshold < 0 || rts_threshold > 2347)
50949+ if (rts_threshold > 2347)
50950 rts_threshold = 2347;
50951
50952 tmp = cpu_to_le32(rts_threshold);
50953diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
50954index d13f25c..2573994 100644
50955--- a/drivers/net/wireless/rt2x00/rt2x00.h
50956+++ b/drivers/net/wireless/rt2x00/rt2x00.h
50957@@ -375,7 +375,7 @@ struct rt2x00_intf {
50958 * for hardware which doesn't support hardware
50959 * sequence counting.
50960 */
50961- atomic_t seqno;
50962+ atomic_unchecked_t seqno;
50963 };
50964
50965 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
50966diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
50967index 5642ccc..01f03eb 100644
50968--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
50969+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
50970@@ -250,9 +250,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
50971 * sequence counter given by mac80211.
50972 */
50973 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
50974- seqno = atomic_add_return(0x10, &intf->seqno);
50975+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
50976 else
50977- seqno = atomic_read(&intf->seqno);
50978+ seqno = atomic_read_unchecked(&intf->seqno);
50979
50980 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
50981 hdr->seq_ctrl |= cpu_to_le16(seqno);
50982diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
50983index b661f896..ddf7d2b 100644
50984--- a/drivers/net/wireless/ti/wl1251/sdio.c
50985+++ b/drivers/net/wireless/ti/wl1251/sdio.c
50986@@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
50987
50988 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
50989
50990- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
50991- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
50992+ pax_open_kernel();
50993+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
50994+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
50995+ pax_close_kernel();
50996
50997 wl1251_info("using dedicated interrupt line");
50998 } else {
50999- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
51000- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
51001+ pax_open_kernel();
51002+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
51003+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
51004+ pax_close_kernel();
51005
51006 wl1251_info("using SDIO interrupt");
51007 }
51008diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
51009index d50dfac..0a6f5be3 100644
51010--- a/drivers/net/wireless/ti/wl12xx/main.c
51011+++ b/drivers/net/wireless/ti/wl12xx/main.c
51012@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
51013 sizeof(wl->conf.mem));
51014
51015 /* read data preparation is only needed by wl127x */
51016- wl->ops->prepare_read = wl127x_prepare_read;
51017+ pax_open_kernel();
51018+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
51019+ pax_close_kernel();
51020
51021 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
51022 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
51023@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
51024 sizeof(wl->conf.mem));
51025
51026 /* read data preparation is only needed by wl127x */
51027- wl->ops->prepare_read = wl127x_prepare_read;
51028+ pax_open_kernel();
51029+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
51030+ pax_close_kernel();
51031
51032 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
51033 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
51034diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
51035index de5b4fa..7996ec6 100644
51036--- a/drivers/net/wireless/ti/wl18xx/main.c
51037+++ b/drivers/net/wireless/ti/wl18xx/main.c
51038@@ -1900,8 +1900,10 @@ static int wl18xx_setup(struct wl1271 *wl)
51039 }
51040
51041 if (!checksum_param) {
51042- wl18xx_ops.set_rx_csum = NULL;
51043- wl18xx_ops.init_vif = NULL;
51044+ pax_open_kernel();
51045+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
51046+ *(void **)&wl18xx_ops.init_vif = NULL;
51047+ pax_close_kernel();
51048 }
51049
51050 /* Enable 11a Band only if we have 5G antennas */
51051diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
51052index a912dc0..a8225ba 100644
51053--- a/drivers/net/wireless/zd1211rw/zd_usb.c
51054+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
51055@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
51056 {
51057 struct zd_usb *usb = urb->context;
51058 struct zd_usb_interrupt *intr = &usb->intr;
51059- int len;
51060+ unsigned int len;
51061 u16 int_num;
51062
51063 ZD_ASSERT(in_interrupt());
51064diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
51065index 683671a..4519fc2 100644
51066--- a/drivers/nfc/nfcwilink.c
51067+++ b/drivers/nfc/nfcwilink.c
51068@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
51069
51070 static int nfcwilink_probe(struct platform_device *pdev)
51071 {
51072- static struct nfcwilink *drv;
51073+ struct nfcwilink *drv;
51074 int rc;
51075 __u32 protocols;
51076
51077diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
51078index d93b2b6..ae50401 100644
51079--- a/drivers/oprofile/buffer_sync.c
51080+++ b/drivers/oprofile/buffer_sync.c
51081@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
51082 if (cookie == NO_COOKIE)
51083 offset = pc;
51084 if (cookie == INVALID_COOKIE) {
51085- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
51086+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
51087 offset = pc;
51088 }
51089 if (cookie != last_cookie) {
51090@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
51091 /* add userspace sample */
51092
51093 if (!mm) {
51094- atomic_inc(&oprofile_stats.sample_lost_no_mm);
51095+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
51096 return 0;
51097 }
51098
51099 cookie = lookup_dcookie(mm, s->eip, &offset);
51100
51101 if (cookie == INVALID_COOKIE) {
51102- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
51103+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
51104 return 0;
51105 }
51106
51107@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
51108 /* ignore backtraces if failed to add a sample */
51109 if (state == sb_bt_start) {
51110 state = sb_bt_ignore;
51111- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
51112+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
51113 }
51114 }
51115 release_mm(mm);
51116diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
51117index c0cc4e7..44d4e54 100644
51118--- a/drivers/oprofile/event_buffer.c
51119+++ b/drivers/oprofile/event_buffer.c
51120@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
51121 }
51122
51123 if (buffer_pos == buffer_size) {
51124- atomic_inc(&oprofile_stats.event_lost_overflow);
51125+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
51126 return;
51127 }
51128
51129diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
51130index ed2c3ec..deda85a 100644
51131--- a/drivers/oprofile/oprof.c
51132+++ b/drivers/oprofile/oprof.c
51133@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
51134 if (oprofile_ops.switch_events())
51135 return;
51136
51137- atomic_inc(&oprofile_stats.multiplex_counter);
51138+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
51139 start_switch_worker();
51140 }
51141
51142diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
51143index ee2cfce..7f8f699 100644
51144--- a/drivers/oprofile/oprofile_files.c
51145+++ b/drivers/oprofile/oprofile_files.c
51146@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
51147
51148 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
51149
51150-static ssize_t timeout_read(struct file *file, char __user *buf,
51151+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
51152 size_t count, loff_t *offset)
51153 {
51154 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
51155diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
51156index 59659ce..6c860a0 100644
51157--- a/drivers/oprofile/oprofile_stats.c
51158+++ b/drivers/oprofile/oprofile_stats.c
51159@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
51160 cpu_buf->sample_invalid_eip = 0;
51161 }
51162
51163- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
51164- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
51165- atomic_set(&oprofile_stats.event_lost_overflow, 0);
51166- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
51167- atomic_set(&oprofile_stats.multiplex_counter, 0);
51168+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
51169+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
51170+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
51171+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
51172+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
51173 }
51174
51175
51176diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
51177index 1fc622b..8c48fc3 100644
51178--- a/drivers/oprofile/oprofile_stats.h
51179+++ b/drivers/oprofile/oprofile_stats.h
51180@@ -13,11 +13,11 @@
51181 #include <linux/atomic.h>
51182
51183 struct oprofile_stat_struct {
51184- atomic_t sample_lost_no_mm;
51185- atomic_t sample_lost_no_mapping;
51186- atomic_t bt_lost_no_mapping;
51187- atomic_t event_lost_overflow;
51188- atomic_t multiplex_counter;
51189+ atomic_unchecked_t sample_lost_no_mm;
51190+ atomic_unchecked_t sample_lost_no_mapping;
51191+ atomic_unchecked_t bt_lost_no_mapping;
51192+ atomic_unchecked_t event_lost_overflow;
51193+ atomic_unchecked_t multiplex_counter;
51194 };
51195
51196 extern struct oprofile_stat_struct oprofile_stats;
51197diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
51198index 3f49345..c750d0b 100644
51199--- a/drivers/oprofile/oprofilefs.c
51200+++ b/drivers/oprofile/oprofilefs.c
51201@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
51202
51203 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
51204 {
51205- atomic_t *val = file->private_data;
51206- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
51207+ atomic_unchecked_t *val = file->private_data;
51208+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
51209 }
51210
51211
51212@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
51213
51214
51215 int oprofilefs_create_ro_atomic(struct dentry *root,
51216- char const *name, atomic_t *val)
51217+ char const *name, atomic_unchecked_t *val)
51218 {
51219 return __oprofilefs_create_file(root, name,
51220 &atomic_ro_fops, 0444, val);
51221diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
51222index 61be1d9..dec05d7 100644
51223--- a/drivers/oprofile/timer_int.c
51224+++ b/drivers/oprofile/timer_int.c
51225@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
51226 return NOTIFY_OK;
51227 }
51228
51229-static struct notifier_block __refdata oprofile_cpu_notifier = {
51230+static struct notifier_block oprofile_cpu_notifier = {
51231 .notifier_call = oprofile_cpu_notify,
51232 };
51233
51234diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
51235index 3b47080..6cd05dd 100644
51236--- a/drivers/parport/procfs.c
51237+++ b/drivers/parport/procfs.c
51238@@ -64,7 +64,7 @@ static int do_active_device(struct ctl_table *table, int write,
51239
51240 *ppos += len;
51241
51242- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
51243+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
51244 }
51245
51246 #ifdef CONFIG_PARPORT_1284
51247@@ -106,7 +106,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
51248
51249 *ppos += len;
51250
51251- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
51252+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
51253 }
51254 #endif /* IEEE1284.3 support. */
51255
51256diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
51257index 8dcccff..35d701d 100644
51258--- a/drivers/pci/hotplug/acpiphp_ibm.c
51259+++ b/drivers/pci/hotplug/acpiphp_ibm.c
51260@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
51261 goto init_cleanup;
51262 }
51263
51264- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
51265+ pax_open_kernel();
51266+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
51267+ pax_close_kernel();
51268 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
51269
51270 return retval;
51271diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
51272index 04fcd78..39e83f1 100644
51273--- a/drivers/pci/hotplug/cpcihp_generic.c
51274+++ b/drivers/pci/hotplug/cpcihp_generic.c
51275@@ -73,7 +73,6 @@ static u16 port;
51276 static unsigned int enum_bit;
51277 static u8 enum_mask;
51278
51279-static struct cpci_hp_controller_ops generic_hpc_ops;
51280 static struct cpci_hp_controller generic_hpc;
51281
51282 static int __init validate_parameters(void)
51283@@ -139,6 +138,10 @@ static int query_enum(void)
51284 return ((value & enum_mask) == enum_mask);
51285 }
51286
51287+static struct cpci_hp_controller_ops generic_hpc_ops = {
51288+ .query_enum = query_enum,
51289+};
51290+
51291 static int __init cpcihp_generic_init(void)
51292 {
51293 int status;
51294@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
51295 pci_dev_put(dev);
51296
51297 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
51298- generic_hpc_ops.query_enum = query_enum;
51299 generic_hpc.ops = &generic_hpc_ops;
51300
51301 status = cpci_hp_register_controller(&generic_hpc);
51302diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
51303index 6757b3e..d3bad62 100644
51304--- a/drivers/pci/hotplug/cpcihp_zt5550.c
51305+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
51306@@ -59,7 +59,6 @@
51307 /* local variables */
51308 static bool debug;
51309 static bool poll;
51310-static struct cpci_hp_controller_ops zt5550_hpc_ops;
51311 static struct cpci_hp_controller zt5550_hpc;
51312
51313 /* Primary cPCI bus bridge device */
51314@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
51315 return 0;
51316 }
51317
51318+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
51319+ .query_enum = zt5550_hc_query_enum,
51320+};
51321+
51322 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
51323 {
51324 int status;
51325@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
51326 dbg("returned from zt5550_hc_config");
51327
51328 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
51329- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
51330 zt5550_hpc.ops = &zt5550_hpc_ops;
51331 if(!poll) {
51332 zt5550_hpc.irq = hc_dev->irq;
51333 zt5550_hpc.irq_flags = IRQF_SHARED;
51334 zt5550_hpc.dev_id = hc_dev;
51335
51336- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
51337- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
51338- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
51339+ pax_open_kernel();
51340+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
51341+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
51342+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
51343+ pax_open_kernel();
51344 } else {
51345 info("using ENUM# polling mode");
51346 }
51347diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
51348index 0968a9b..5a00edf 100644
51349--- a/drivers/pci/hotplug/cpqphp_nvram.c
51350+++ b/drivers/pci/hotplug/cpqphp_nvram.c
51351@@ -427,9 +427,13 @@ static u32 store_HRT (void __iomem *rom_start)
51352
51353 void compaq_nvram_init (void __iomem *rom_start)
51354 {
51355+
51356+#ifndef CONFIG_PAX_KERNEXEC
51357 if (rom_start) {
51358 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
51359 }
51360+#endif
51361+
51362 dbg("int15 entry = %p\n", compaq_int15_entry_point);
51363
51364 /* initialize our int15 lock */
51365diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
51366index 56d8486..f26113f 100644
51367--- a/drivers/pci/hotplug/pci_hotplug_core.c
51368+++ b/drivers/pci/hotplug/pci_hotplug_core.c
51369@@ -436,8 +436,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
51370 return -EINVAL;
51371 }
51372
51373- slot->ops->owner = owner;
51374- slot->ops->mod_name = mod_name;
51375+ pax_open_kernel();
51376+ *(struct module **)&slot->ops->owner = owner;
51377+ *(const char **)&slot->ops->mod_name = mod_name;
51378+ pax_close_kernel();
51379
51380 mutex_lock(&pci_hp_mutex);
51381 /*
51382diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
51383index a2297db..7c7d161 100644
51384--- a/drivers/pci/hotplug/pciehp_core.c
51385+++ b/drivers/pci/hotplug/pciehp_core.c
51386@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
51387 struct slot *slot = ctrl->slot;
51388 struct hotplug_slot *hotplug = NULL;
51389 struct hotplug_slot_info *info = NULL;
51390- struct hotplug_slot_ops *ops = NULL;
51391+ hotplug_slot_ops_no_const *ops = NULL;
51392 char name[SLOT_NAME_SIZE];
51393 int retval = -ENOMEM;
51394
51395diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
51396index 13f3d30..363cb44 100644
51397--- a/drivers/pci/msi.c
51398+++ b/drivers/pci/msi.c
51399@@ -523,8 +523,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
51400 {
51401 struct attribute **msi_attrs;
51402 struct attribute *msi_attr;
51403- struct device_attribute *msi_dev_attr;
51404- struct attribute_group *msi_irq_group;
51405+ device_attribute_no_const *msi_dev_attr;
51406+ attribute_group_no_const *msi_irq_group;
51407 const struct attribute_group **msi_irq_groups;
51408 struct msi_desc *entry;
51409 int ret = -ENOMEM;
51410@@ -584,7 +584,7 @@ error_attrs:
51411 count = 0;
51412 msi_attr = msi_attrs[count];
51413 while (msi_attr) {
51414- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
51415+ msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
51416 kfree(msi_attr->name);
51417 kfree(msi_dev_attr);
51418 ++count;
51419diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
51420index 9ff0a90..e819dda 100644
51421--- a/drivers/pci/pci-sysfs.c
51422+++ b/drivers/pci/pci-sysfs.c
51423@@ -1134,7 +1134,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
51424 {
51425 /* allocate attribute structure, piggyback attribute name */
51426 int name_len = write_combine ? 13 : 10;
51427- struct bin_attribute *res_attr;
51428+ bin_attribute_no_const *res_attr;
51429 int retval;
51430
51431 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
51432@@ -1311,7 +1311,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
51433 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
51434 {
51435 int retval;
51436- struct bin_attribute *attr;
51437+ bin_attribute_no_const *attr;
51438
51439 /* If the device has VPD, try to expose it in sysfs. */
51440 if (dev->vpd) {
51441@@ -1358,7 +1358,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
51442 {
51443 int retval;
51444 int rom_size = 0;
51445- struct bin_attribute *attr;
51446+ bin_attribute_no_const *attr;
51447
51448 if (!sysfs_initialized)
51449 return -EACCES;
51450diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
51451index 0601890..dc15007 100644
51452--- a/drivers/pci/pci.h
51453+++ b/drivers/pci/pci.h
51454@@ -91,7 +91,7 @@ struct pci_vpd_ops {
51455 struct pci_vpd {
51456 unsigned int len;
51457 const struct pci_vpd_ops *ops;
51458- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
51459+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
51460 };
51461
51462 int pci_vpd_pci22_init(struct pci_dev *dev);
51463diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
51464index e1e7026..d28dd33 100644
51465--- a/drivers/pci/pcie/aspm.c
51466+++ b/drivers/pci/pcie/aspm.c
51467@@ -27,9 +27,9 @@
51468 #define MODULE_PARAM_PREFIX "pcie_aspm."
51469
51470 /* Note: those are not register definitions */
51471-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
51472-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
51473-#define ASPM_STATE_L1 (4) /* L1 state */
51474+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
51475+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
51476+#define ASPM_STATE_L1 (4U) /* L1 state */
51477 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
51478 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
51479
51480diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
51481index e3cf8a2..be1baf0 100644
51482--- a/drivers/pci/probe.c
51483+++ b/drivers/pci/probe.c
51484@@ -176,7 +176,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
51485 struct pci_bus_region region, inverted_region;
51486 bool bar_too_big = false, bar_too_high = false, bar_invalid = false;
51487
51488- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
51489+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
51490
51491 /* No printks while decoding is disabled! */
51492 if (!dev->mmio_always_on) {
51493diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
51494index 3f155e7..0f4b1f0 100644
51495--- a/drivers/pci/proc.c
51496+++ b/drivers/pci/proc.c
51497@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
51498 static int __init pci_proc_init(void)
51499 {
51500 struct pci_dev *dev = NULL;
51501+
51502+#ifdef CONFIG_GRKERNSEC_PROC_ADD
51503+#ifdef CONFIG_GRKERNSEC_PROC_USER
51504+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
51505+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51506+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
51507+#endif
51508+#else
51509 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
51510+#endif
51511 proc_create("devices", 0, proc_bus_pci_dir,
51512 &proc_bus_pci_dev_operations);
51513 proc_initialized = 1;
51514diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
51515index 7f1a2e2..bc4b405 100644
51516--- a/drivers/platform/chrome/chromeos_laptop.c
51517+++ b/drivers/platform/chrome/chromeos_laptop.c
51518@@ -395,7 +395,7 @@ static struct chromeos_laptop cr48 = {
51519 .callback = chromeos_laptop_dmi_matched, \
51520 .driver_data = (void *)&board_
51521
51522-static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
51523+static struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
51524 {
51525 .ident = "Samsung Series 5 550",
51526 .matches = {
51527diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
51528index 297b664..ab91e39 100644
51529--- a/drivers/platform/x86/alienware-wmi.c
51530+++ b/drivers/platform/x86/alienware-wmi.c
51531@@ -133,7 +133,7 @@ struct wmax_led_args {
51532 } __packed;
51533
51534 static struct platform_device *platform_device;
51535-static struct device_attribute *zone_dev_attrs;
51536+static device_attribute_no_const *zone_dev_attrs;
51537 static struct attribute **zone_attrs;
51538 static struct platform_zone *zone_data;
51539
51540@@ -144,7 +144,7 @@ static struct platform_driver platform_driver = {
51541 }
51542 };
51543
51544-static struct attribute_group zone_attribute_group = {
51545+static attribute_group_no_const zone_attribute_group = {
51546 .name = "rgb_zones",
51547 };
51548
51549diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
51550index 3c6cced..12e0771 100644
51551--- a/drivers/platform/x86/asus-wmi.c
51552+++ b/drivers/platform/x86/asus-wmi.c
51553@@ -1592,6 +1592,10 @@ static int show_dsts(struct seq_file *m, void *data)
51554 int err;
51555 u32 retval = -1;
51556
51557+#ifdef CONFIG_GRKERNSEC_KMEM
51558+ return -EPERM;
51559+#endif
51560+
51561 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
51562
51563 if (err < 0)
51564@@ -1608,6 +1612,10 @@ static int show_devs(struct seq_file *m, void *data)
51565 int err;
51566 u32 retval = -1;
51567
51568+#ifdef CONFIG_GRKERNSEC_KMEM
51569+ return -EPERM;
51570+#endif
51571+
51572 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
51573 &retval);
51574
51575@@ -1632,6 +1640,10 @@ static int show_call(struct seq_file *m, void *data)
51576 union acpi_object *obj;
51577 acpi_status status;
51578
51579+#ifdef CONFIG_GRKERNSEC_KMEM
51580+ return -EPERM;
51581+#endif
51582+
51583 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
51584 1, asus->debug.method_id,
51585 &input, &output);
51586diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
51587index 62f8030..c7f2a45 100644
51588--- a/drivers/platform/x86/msi-laptop.c
51589+++ b/drivers/platform/x86/msi-laptop.c
51590@@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
51591
51592 if (!quirks->ec_read_only) {
51593 /* allow userland write sysfs file */
51594- dev_attr_bluetooth.store = store_bluetooth;
51595- dev_attr_wlan.store = store_wlan;
51596- dev_attr_threeg.store = store_threeg;
51597- dev_attr_bluetooth.attr.mode |= S_IWUSR;
51598- dev_attr_wlan.attr.mode |= S_IWUSR;
51599- dev_attr_threeg.attr.mode |= S_IWUSR;
51600+ pax_open_kernel();
51601+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
51602+ *(void **)&dev_attr_wlan.store = store_wlan;
51603+ *(void **)&dev_attr_threeg.store = store_threeg;
51604+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
51605+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
51606+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
51607+ pax_close_kernel();
51608 }
51609
51610 /* disable hardware control by fn key */
51611diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
51612index 70222f2..8c8ce66 100644
51613--- a/drivers/platform/x86/msi-wmi.c
51614+++ b/drivers/platform/x86/msi-wmi.c
51615@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
51616 static void msi_wmi_notify(u32 value, void *context)
51617 {
51618 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
51619- static struct key_entry *key;
51620+ struct key_entry *key;
51621 union acpi_object *obj;
51622 acpi_status status;
51623
51624diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
51625index 9c5a074..06c976a 100644
51626--- a/drivers/platform/x86/sony-laptop.c
51627+++ b/drivers/platform/x86/sony-laptop.c
51628@@ -2527,7 +2527,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
51629 }
51630
51631 /* High speed charging function */
51632-static struct device_attribute *hsc_handle;
51633+static device_attribute_no_const *hsc_handle;
51634
51635 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
51636 struct device_attribute *attr,
51637@@ -2601,7 +2601,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
51638 }
51639
51640 /* low battery function */
51641-static struct device_attribute *lowbatt_handle;
51642+static device_attribute_no_const *lowbatt_handle;
51643
51644 static ssize_t sony_nc_lowbatt_store(struct device *dev,
51645 struct device_attribute *attr,
51646@@ -2667,7 +2667,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
51647 }
51648
51649 /* fan speed function */
51650-static struct device_attribute *fan_handle, *hsf_handle;
51651+static device_attribute_no_const *fan_handle, *hsf_handle;
51652
51653 static ssize_t sony_nc_hsfan_store(struct device *dev,
51654 struct device_attribute *attr,
51655@@ -2774,7 +2774,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
51656 }
51657
51658 /* USB charge function */
51659-static struct device_attribute *uc_handle;
51660+static device_attribute_no_const *uc_handle;
51661
51662 static ssize_t sony_nc_usb_charge_store(struct device *dev,
51663 struct device_attribute *attr,
51664@@ -2848,7 +2848,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
51665 }
51666
51667 /* Panel ID function */
51668-static struct device_attribute *panel_handle;
51669+static device_attribute_no_const *panel_handle;
51670
51671 static ssize_t sony_nc_panelid_show(struct device *dev,
51672 struct device_attribute *attr, char *buffer)
51673@@ -2895,7 +2895,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
51674 }
51675
51676 /* smart connect function */
51677-static struct device_attribute *sc_handle;
51678+static device_attribute_no_const *sc_handle;
51679
51680 static ssize_t sony_nc_smart_conn_store(struct device *dev,
51681 struct device_attribute *attr,
51682diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
51683index d82f196..5458f34 100644
51684--- a/drivers/platform/x86/thinkpad_acpi.c
51685+++ b/drivers/platform/x86/thinkpad_acpi.c
51686@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
51687 return 0;
51688 }
51689
51690-void static hotkey_mask_warn_incomplete_mask(void)
51691+static void hotkey_mask_warn_incomplete_mask(void)
51692 {
51693 /* log only what the user can fix... */
51694 const u32 wantedmask = hotkey_driver_mask &
51695@@ -2438,10 +2438,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
51696 && !tp_features.bright_unkfw)
51697 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
51698 }
51699+}
51700
51701 #undef TPACPI_COMPARE_KEY
51702 #undef TPACPI_MAY_SEND_KEY
51703-}
51704
51705 /*
51706 * Polling driver
51707diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
51708index 438d4c7..ca8a2fb 100644
51709--- a/drivers/pnp/pnpbios/bioscalls.c
51710+++ b/drivers/pnp/pnpbios/bioscalls.c
51711@@ -59,7 +59,7 @@ do { \
51712 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
51713 } while(0)
51714
51715-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
51716+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
51717 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
51718
51719 /*
51720@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
51721
51722 cpu = get_cpu();
51723 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
51724+
51725+ pax_open_kernel();
51726 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
51727+ pax_close_kernel();
51728
51729 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
51730 spin_lock_irqsave(&pnp_bios_lock, flags);
51731@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
51732 :"memory");
51733 spin_unlock_irqrestore(&pnp_bios_lock, flags);
51734
51735+ pax_open_kernel();
51736 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
51737+ pax_close_kernel();
51738+
51739 put_cpu();
51740
51741 /* If we get here and this is set then the PnP BIOS faulted on us. */
51742@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
51743 return status;
51744 }
51745
51746-void pnpbios_calls_init(union pnp_bios_install_struct *header)
51747+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
51748 {
51749 int i;
51750
51751@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
51752 pnp_bios_callpoint.offset = header->fields.pm16offset;
51753 pnp_bios_callpoint.segment = PNP_CS16;
51754
51755+ pax_open_kernel();
51756+
51757 for_each_possible_cpu(i) {
51758 struct desc_struct *gdt = get_cpu_gdt_table(i);
51759 if (!gdt)
51760@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
51761 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
51762 (unsigned long)__va(header->fields.pm16dseg));
51763 }
51764+
51765+ pax_close_kernel();
51766 }
51767diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
51768index 0c52e2a..3421ab7 100644
51769--- a/drivers/power/pda_power.c
51770+++ b/drivers/power/pda_power.c
51771@@ -37,7 +37,11 @@ static int polling;
51772
51773 #if IS_ENABLED(CONFIG_USB_PHY)
51774 static struct usb_phy *transceiver;
51775-static struct notifier_block otg_nb;
51776+static int otg_handle_notification(struct notifier_block *nb,
51777+ unsigned long event, void *unused);
51778+static struct notifier_block otg_nb = {
51779+ .notifier_call = otg_handle_notification
51780+};
51781 #endif
51782
51783 static struct regulator *ac_draw;
51784@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
51785
51786 #if IS_ENABLED(CONFIG_USB_PHY)
51787 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
51788- otg_nb.notifier_call = otg_handle_notification;
51789 ret = usb_register_notifier(transceiver, &otg_nb);
51790 if (ret) {
51791 dev_err(dev, "failure to register otg notifier\n");
51792diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
51793index cc439fd..8fa30df 100644
51794--- a/drivers/power/power_supply.h
51795+++ b/drivers/power/power_supply.h
51796@@ -16,12 +16,12 @@ struct power_supply;
51797
51798 #ifdef CONFIG_SYSFS
51799
51800-extern void power_supply_init_attrs(struct device_type *dev_type);
51801+extern void power_supply_init_attrs(void);
51802 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
51803
51804 #else
51805
51806-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
51807+static inline void power_supply_init_attrs(void) {}
51808 #define power_supply_uevent NULL
51809
51810 #endif /* CONFIG_SYSFS */
51811diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
51812index 5a5a24e..f7a3754 100644
51813--- a/drivers/power/power_supply_core.c
51814+++ b/drivers/power/power_supply_core.c
51815@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
51816 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
51817 EXPORT_SYMBOL_GPL(power_supply_notifier);
51818
51819-static struct device_type power_supply_dev_type;
51820+extern const struct attribute_group *power_supply_attr_groups[];
51821+static struct device_type power_supply_dev_type = {
51822+ .groups = power_supply_attr_groups,
51823+};
51824
51825 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
51826 struct power_supply *supply)
51827@@ -639,7 +642,7 @@ static int __init power_supply_class_init(void)
51828 return PTR_ERR(power_supply_class);
51829
51830 power_supply_class->dev_uevent = power_supply_uevent;
51831- power_supply_init_attrs(&power_supply_dev_type);
51832+ power_supply_init_attrs();
51833
51834 return 0;
51835 }
51836diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
51837index 44420d1..967126e 100644
51838--- a/drivers/power/power_supply_sysfs.c
51839+++ b/drivers/power/power_supply_sysfs.c
51840@@ -230,17 +230,15 @@ static struct attribute_group power_supply_attr_group = {
51841 .is_visible = power_supply_attr_is_visible,
51842 };
51843
51844-static const struct attribute_group *power_supply_attr_groups[] = {
51845+const struct attribute_group *power_supply_attr_groups[] = {
51846 &power_supply_attr_group,
51847 NULL,
51848 };
51849
51850-void power_supply_init_attrs(struct device_type *dev_type)
51851+void power_supply_init_attrs(void)
51852 {
51853 int i;
51854
51855- dev_type->groups = power_supply_attr_groups;
51856-
51857 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
51858 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
51859 }
51860diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
51861index 84419af..268ede8 100644
51862--- a/drivers/powercap/powercap_sys.c
51863+++ b/drivers/powercap/powercap_sys.c
51864@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
51865 struct device_attribute name_attr;
51866 };
51867
51868+static ssize_t show_constraint_name(struct device *dev,
51869+ struct device_attribute *dev_attr,
51870+ char *buf);
51871+
51872 static struct powercap_constraint_attr
51873- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
51874+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
51875+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
51876+ .power_limit_attr = {
51877+ .attr = {
51878+ .name = NULL,
51879+ .mode = S_IWUSR | S_IRUGO
51880+ },
51881+ .show = show_constraint_power_limit_uw,
51882+ .store = store_constraint_power_limit_uw
51883+ },
51884+
51885+ .time_window_attr = {
51886+ .attr = {
51887+ .name = NULL,
51888+ .mode = S_IWUSR | S_IRUGO
51889+ },
51890+ .show = show_constraint_time_window_us,
51891+ .store = store_constraint_time_window_us
51892+ },
51893+
51894+ .max_power_attr = {
51895+ .attr = {
51896+ .name = NULL,
51897+ .mode = S_IRUGO
51898+ },
51899+ .show = show_constraint_max_power_uw,
51900+ .store = NULL
51901+ },
51902+
51903+ .min_power_attr = {
51904+ .attr = {
51905+ .name = NULL,
51906+ .mode = S_IRUGO
51907+ },
51908+ .show = show_constraint_min_power_uw,
51909+ .store = NULL
51910+ },
51911+
51912+ .max_time_window_attr = {
51913+ .attr = {
51914+ .name = NULL,
51915+ .mode = S_IRUGO
51916+ },
51917+ .show = show_constraint_max_time_window_us,
51918+ .store = NULL
51919+ },
51920+
51921+ .min_time_window_attr = {
51922+ .attr = {
51923+ .name = NULL,
51924+ .mode = S_IRUGO
51925+ },
51926+ .show = show_constraint_min_time_window_us,
51927+ .store = NULL
51928+ },
51929+
51930+ .name_attr = {
51931+ .attr = {
51932+ .name = NULL,
51933+ .mode = S_IRUGO
51934+ },
51935+ .show = show_constraint_name,
51936+ .store = NULL
51937+ }
51938+ }
51939+};
51940
51941 /* A list of powercap control_types */
51942 static LIST_HEAD(powercap_cntrl_list);
51943@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
51944 }
51945
51946 static int create_constraint_attribute(int id, const char *name,
51947- int mode,
51948- struct device_attribute *dev_attr,
51949- ssize_t (*show)(struct device *,
51950- struct device_attribute *, char *),
51951- ssize_t (*store)(struct device *,
51952- struct device_attribute *,
51953- const char *, size_t)
51954- )
51955+ struct device_attribute *dev_attr)
51956 {
51957+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
51958
51959- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
51960- id, name);
51961- if (!dev_attr->attr.name)
51962+ if (!name)
51963 return -ENOMEM;
51964- dev_attr->attr.mode = mode;
51965- dev_attr->show = show;
51966- dev_attr->store = store;
51967+
51968+ pax_open_kernel();
51969+ *(const char **)&dev_attr->attr.name = name;
51970+ pax_close_kernel();
51971
51972 return 0;
51973 }
51974@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
51975
51976 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
51977 ret = create_constraint_attribute(i, "power_limit_uw",
51978- S_IWUSR | S_IRUGO,
51979- &constraint_attrs[i].power_limit_attr,
51980- show_constraint_power_limit_uw,
51981- store_constraint_power_limit_uw);
51982+ &constraint_attrs[i].power_limit_attr);
51983 if (ret)
51984 goto err_alloc;
51985 ret = create_constraint_attribute(i, "time_window_us",
51986- S_IWUSR | S_IRUGO,
51987- &constraint_attrs[i].time_window_attr,
51988- show_constraint_time_window_us,
51989- store_constraint_time_window_us);
51990+ &constraint_attrs[i].time_window_attr);
51991 if (ret)
51992 goto err_alloc;
51993- ret = create_constraint_attribute(i, "name", S_IRUGO,
51994- &constraint_attrs[i].name_attr,
51995- show_constraint_name,
51996- NULL);
51997+ ret = create_constraint_attribute(i, "name",
51998+ &constraint_attrs[i].name_attr);
51999 if (ret)
52000 goto err_alloc;
52001- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
52002- &constraint_attrs[i].max_power_attr,
52003- show_constraint_max_power_uw,
52004- NULL);
52005+ ret = create_constraint_attribute(i, "max_power_uw",
52006+ &constraint_attrs[i].max_power_attr);
52007 if (ret)
52008 goto err_alloc;
52009- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
52010- &constraint_attrs[i].min_power_attr,
52011- show_constraint_min_power_uw,
52012- NULL);
52013+ ret = create_constraint_attribute(i, "min_power_uw",
52014+ &constraint_attrs[i].min_power_attr);
52015 if (ret)
52016 goto err_alloc;
52017 ret = create_constraint_attribute(i, "max_time_window_us",
52018- S_IRUGO,
52019- &constraint_attrs[i].max_time_window_attr,
52020- show_constraint_max_time_window_us,
52021- NULL);
52022+ &constraint_attrs[i].max_time_window_attr);
52023 if (ret)
52024 goto err_alloc;
52025 ret = create_constraint_attribute(i, "min_time_window_us",
52026- S_IRUGO,
52027- &constraint_attrs[i].min_time_window_attr,
52028- show_constraint_min_time_window_us,
52029- NULL);
52030+ &constraint_attrs[i].min_time_window_attr);
52031 if (ret)
52032 goto err_alloc;
52033
52034@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
52035 power_zone->zone_dev_attrs[count++] =
52036 &dev_attr_max_energy_range_uj.attr;
52037 if (power_zone->ops->get_energy_uj) {
52038+ pax_open_kernel();
52039 if (power_zone->ops->reset_energy_uj)
52040- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
52041+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
52042 else
52043- dev_attr_energy_uj.attr.mode = S_IRUGO;
52044+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
52045+ pax_close_kernel();
52046 power_zone->zone_dev_attrs[count++] =
52047 &dev_attr_energy_uj.attr;
52048 }
52049diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
52050index ee3de34..bec7285 100644
52051--- a/drivers/ptp/Kconfig
52052+++ b/drivers/ptp/Kconfig
52053@@ -8,7 +8,6 @@ config PTP_1588_CLOCK
52054 tristate "PTP clock support"
52055 depends on NET
52056 select PPS
52057- select NET_PTP_CLASSIFY
52058 help
52059 The IEEE 1588 standard defines a method to precisely
52060 synchronize distributed clocks over Ethernet networks. The
52061diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
52062index 9c5d414..c7900ce 100644
52063--- a/drivers/ptp/ptp_private.h
52064+++ b/drivers/ptp/ptp_private.h
52065@@ -51,7 +51,7 @@ struct ptp_clock {
52066 struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
52067 wait_queue_head_t tsev_wq;
52068 int defunct; /* tells readers to go away when clock is being removed */
52069- struct device_attribute *pin_dev_attr;
52070+ device_attribute_no_const *pin_dev_attr;
52071 struct attribute **pin_attr;
52072 struct attribute_group pin_attr_group;
52073 };
52074diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
52075index 302e626..12579af 100644
52076--- a/drivers/ptp/ptp_sysfs.c
52077+++ b/drivers/ptp/ptp_sysfs.c
52078@@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
52079 goto no_pin_attr;
52080
52081 for (i = 0; i < n_pins; i++) {
52082- struct device_attribute *da = &ptp->pin_dev_attr[i];
52083+ device_attribute_no_const *da = &ptp->pin_dev_attr[i];
52084 sysfs_attr_init(&da->attr);
52085 da->attr.name = info->pin_config[i].name;
52086 da->attr.mode = 0644;
52087diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
52088index 4c1f999..11078c9 100644
52089--- a/drivers/regulator/core.c
52090+++ b/drivers/regulator/core.c
52091@@ -3391,7 +3391,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
52092 {
52093 const struct regulation_constraints *constraints = NULL;
52094 const struct regulator_init_data *init_data;
52095- static atomic_t regulator_no = ATOMIC_INIT(0);
52096+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
52097 struct regulator_dev *rdev;
52098 struct device *dev;
52099 int ret, i;
52100@@ -3461,7 +3461,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
52101 rdev->dev.of_node = of_node_get(config->of_node);
52102 rdev->dev.parent = dev;
52103 dev_set_name(&rdev->dev, "regulator.%d",
52104- atomic_inc_return(&regulator_no) - 1);
52105+ atomic_inc_return_unchecked(&regulator_no) - 1);
52106 ret = device_register(&rdev->dev);
52107 if (ret != 0) {
52108 put_device(&rdev->dev);
52109diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
52110index 2fc4111..6aa88ca 100644
52111--- a/drivers/regulator/max8660.c
52112+++ b/drivers/regulator/max8660.c
52113@@ -424,8 +424,10 @@ static int max8660_probe(struct i2c_client *client,
52114 max8660->shadow_regs[MAX8660_OVER1] = 5;
52115 } else {
52116 /* Otherwise devices can be toggled via software */
52117- max8660_dcdc_ops.enable = max8660_dcdc_enable;
52118- max8660_dcdc_ops.disable = max8660_dcdc_disable;
52119+ pax_open_kernel();
52120+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
52121+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
52122+ pax_close_kernel();
52123 }
52124
52125 /*
52126diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
52127index dbedf17..18ff6b7 100644
52128--- a/drivers/regulator/max8973-regulator.c
52129+++ b/drivers/regulator/max8973-regulator.c
52130@@ -403,9 +403,11 @@ static int max8973_probe(struct i2c_client *client,
52131 if (!pdata || !pdata->enable_ext_control) {
52132 max->desc.enable_reg = MAX8973_VOUT;
52133 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
52134- max->ops.enable = regulator_enable_regmap;
52135- max->ops.disable = regulator_disable_regmap;
52136- max->ops.is_enabled = regulator_is_enabled_regmap;
52137+ pax_open_kernel();
52138+ *(void **)&max->ops.enable = regulator_enable_regmap;
52139+ *(void **)&max->ops.disable = regulator_disable_regmap;
52140+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
52141+ pax_close_kernel();
52142 }
52143
52144 if (pdata) {
52145diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
52146index f374fa5..26f0683 100644
52147--- a/drivers/regulator/mc13892-regulator.c
52148+++ b/drivers/regulator/mc13892-regulator.c
52149@@ -582,10 +582,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
52150 }
52151 mc13xxx_unlock(mc13892);
52152
52153- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
52154+ pax_open_kernel();
52155+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
52156 = mc13892_vcam_set_mode;
52157- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
52158+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
52159 = mc13892_vcam_get_mode;
52160+ pax_close_kernel();
52161
52162 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
52163 ARRAY_SIZE(mc13892_regulators));
52164diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
52165index b0e4a3e..e5dc11e 100644
52166--- a/drivers/rtc/rtc-cmos.c
52167+++ b/drivers/rtc/rtc-cmos.c
52168@@ -789,7 +789,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
52169 hpet_rtc_timer_init();
52170
52171 /* export at least the first block of NVRAM */
52172- nvram.size = address_space - NVRAM_OFFSET;
52173+ pax_open_kernel();
52174+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
52175+ pax_close_kernel();
52176 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
52177 if (retval < 0) {
52178 dev_dbg(dev, "can't create nvram file? %d\n", retval);
52179diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
52180index d049393..bb20be0 100644
52181--- a/drivers/rtc/rtc-dev.c
52182+++ b/drivers/rtc/rtc-dev.c
52183@@ -16,6 +16,7 @@
52184 #include <linux/module.h>
52185 #include <linux/rtc.h>
52186 #include <linux/sched.h>
52187+#include <linux/grsecurity.h>
52188 #include "rtc-core.h"
52189
52190 static dev_t rtc_devt;
52191@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
52192 if (copy_from_user(&tm, uarg, sizeof(tm)))
52193 return -EFAULT;
52194
52195+ gr_log_timechange();
52196+
52197 return rtc_set_time(rtc, &tm);
52198
52199 case RTC_PIE_ON:
52200diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
52201index f03d5ba..8325bf6 100644
52202--- a/drivers/rtc/rtc-ds1307.c
52203+++ b/drivers/rtc/rtc-ds1307.c
52204@@ -107,7 +107,7 @@ struct ds1307 {
52205 u8 offset; /* register's offset */
52206 u8 regs[11];
52207 u16 nvram_offset;
52208- struct bin_attribute *nvram;
52209+ bin_attribute_no_const *nvram;
52210 enum ds_type type;
52211 unsigned long flags;
52212 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
52213diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
52214index 11880c1..b823aa4 100644
52215--- a/drivers/rtc/rtc-m48t59.c
52216+++ b/drivers/rtc/rtc-m48t59.c
52217@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
52218 if (IS_ERR(m48t59->rtc))
52219 return PTR_ERR(m48t59->rtc);
52220
52221- m48t59_nvram_attr.size = pdata->offset;
52222+ pax_open_kernel();
52223+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
52224+ pax_close_kernel();
52225
52226 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
52227 if (ret)
52228diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
52229index e693af6..2e525b6 100644
52230--- a/drivers/scsi/bfa/bfa_fcpim.h
52231+++ b/drivers/scsi/bfa/bfa_fcpim.h
52232@@ -36,7 +36,7 @@ struct bfa_iotag_s {
52233
52234 struct bfa_itn_s {
52235 bfa_isr_func_t isr;
52236-};
52237+} __no_const;
52238
52239 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
52240 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
52241diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
52242index a3ab5cc..8143622 100644
52243--- a/drivers/scsi/bfa/bfa_fcs.c
52244+++ b/drivers/scsi/bfa/bfa_fcs.c
52245@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
52246 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
52247
52248 static struct bfa_fcs_mod_s fcs_modules[] = {
52249- { bfa_fcs_port_attach, NULL, NULL },
52250- { bfa_fcs_uf_attach, NULL, NULL },
52251- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
52252- bfa_fcs_fabric_modexit },
52253+ {
52254+ .attach = bfa_fcs_port_attach,
52255+ .modinit = NULL,
52256+ .modexit = NULL
52257+ },
52258+ {
52259+ .attach = bfa_fcs_uf_attach,
52260+ .modinit = NULL,
52261+ .modexit = NULL
52262+ },
52263+ {
52264+ .attach = bfa_fcs_fabric_attach,
52265+ .modinit = bfa_fcs_fabric_modinit,
52266+ .modexit = bfa_fcs_fabric_modexit
52267+ },
52268 };
52269
52270 /*
52271diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
52272index ff75ef8..2dfe00a 100644
52273--- a/drivers/scsi/bfa/bfa_fcs_lport.c
52274+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
52275@@ -89,15 +89,26 @@ static struct {
52276 void (*offline) (struct bfa_fcs_lport_s *port);
52277 } __port_action[] = {
52278 {
52279- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
52280- bfa_fcs_lport_unknown_offline}, {
52281- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
52282- bfa_fcs_lport_fab_offline}, {
52283- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
52284- bfa_fcs_lport_n2n_offline}, {
52285- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
52286- bfa_fcs_lport_loop_offline},
52287- };
52288+ .init = bfa_fcs_lport_unknown_init,
52289+ .online = bfa_fcs_lport_unknown_online,
52290+ .offline = bfa_fcs_lport_unknown_offline
52291+ },
52292+ {
52293+ .init = bfa_fcs_lport_fab_init,
52294+ .online = bfa_fcs_lport_fab_online,
52295+ .offline = bfa_fcs_lport_fab_offline
52296+ },
52297+ {
52298+ .init = bfa_fcs_lport_n2n_init,
52299+ .online = bfa_fcs_lport_n2n_online,
52300+ .offline = bfa_fcs_lport_n2n_offline
52301+ },
52302+ {
52303+ .init = bfa_fcs_lport_loop_init,
52304+ .online = bfa_fcs_lport_loop_online,
52305+ .offline = bfa_fcs_lport_loop_offline
52306+ },
52307+};
52308
52309 /*
52310 * fcs_port_sm FCS logical port state machine
52311diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
52312index 2e28392..9d865b6 100644
52313--- a/drivers/scsi/bfa/bfa_ioc.h
52314+++ b/drivers/scsi/bfa/bfa_ioc.h
52315@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
52316 bfa_ioc_disable_cbfn_t disable_cbfn;
52317 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
52318 bfa_ioc_reset_cbfn_t reset_cbfn;
52319-};
52320+} __no_const;
52321
52322 /*
52323 * IOC event notification mechanism.
52324@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
52325 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
52326 enum bfi_ioc_state fwstate);
52327 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
52328-};
52329+} __no_const;
52330
52331 /*
52332 * Queue element to wait for room in request queue. FIFO order is
52333diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
52334index a14c784..6de6790 100644
52335--- a/drivers/scsi/bfa/bfa_modules.h
52336+++ b/drivers/scsi/bfa/bfa_modules.h
52337@@ -78,12 +78,12 @@ enum {
52338 \
52339 extern struct bfa_module_s hal_mod_ ## __mod; \
52340 struct bfa_module_s hal_mod_ ## __mod = { \
52341- bfa_ ## __mod ## _meminfo, \
52342- bfa_ ## __mod ## _attach, \
52343- bfa_ ## __mod ## _detach, \
52344- bfa_ ## __mod ## _start, \
52345- bfa_ ## __mod ## _stop, \
52346- bfa_ ## __mod ## _iocdisable, \
52347+ .meminfo = bfa_ ## __mod ## _meminfo, \
52348+ .attach = bfa_ ## __mod ## _attach, \
52349+ .detach = bfa_ ## __mod ## _detach, \
52350+ .start = bfa_ ## __mod ## _start, \
52351+ .stop = bfa_ ## __mod ## _stop, \
52352+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
52353 }
52354
52355 #define BFA_CACHELINE_SZ (256)
52356diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
52357index 045c4e1..13de803 100644
52358--- a/drivers/scsi/fcoe/fcoe_sysfs.c
52359+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
52360@@ -33,8 +33,8 @@
52361 */
52362 #include "libfcoe.h"
52363
52364-static atomic_t ctlr_num;
52365-static atomic_t fcf_num;
52366+static atomic_unchecked_t ctlr_num;
52367+static atomic_unchecked_t fcf_num;
52368
52369 /*
52370 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
52371@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
52372 if (!ctlr)
52373 goto out;
52374
52375- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
52376+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
52377 ctlr->f = f;
52378 ctlr->mode = FIP_CONN_TYPE_FABRIC;
52379 INIT_LIST_HEAD(&ctlr->fcfs);
52380@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
52381 fcf->dev.parent = &ctlr->dev;
52382 fcf->dev.bus = &fcoe_bus_type;
52383 fcf->dev.type = &fcoe_fcf_device_type;
52384- fcf->id = atomic_inc_return(&fcf_num) - 1;
52385+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
52386 fcf->state = FCOE_FCF_STATE_UNKNOWN;
52387
52388 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
52389@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
52390 {
52391 int error;
52392
52393- atomic_set(&ctlr_num, 0);
52394- atomic_set(&fcf_num, 0);
52395+ atomic_set_unchecked(&ctlr_num, 0);
52396+ atomic_set_unchecked(&fcf_num, 0);
52397
52398 error = bus_register(&fcoe_bus_type);
52399 if (error)
52400diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
52401index 3cbb57a..95e47a3 100644
52402--- a/drivers/scsi/hosts.c
52403+++ b/drivers/scsi/hosts.c
52404@@ -42,7 +42,7 @@
52405 #include "scsi_logging.h"
52406
52407
52408-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
52409+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
52410
52411
52412 static void scsi_host_cls_release(struct device *dev)
52413@@ -369,7 +369,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
52414 * subtract one because we increment first then return, but we need to
52415 * know what the next host number was before increment
52416 */
52417- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
52418+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
52419 shost->dma_channel = 0xff;
52420
52421 /* These three are default values which can be overridden */
52422diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
52423index 489e83b..193815b 100644
52424--- a/drivers/scsi/hpsa.c
52425+++ b/drivers/scsi/hpsa.c
52426@@ -701,10 +701,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
52427 unsigned long flags;
52428
52429 if (h->transMethod & CFGTBL_Trans_io_accel1)
52430- return h->access.command_completed(h, q);
52431+ return h->access->command_completed(h, q);
52432
52433 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
52434- return h->access.command_completed(h, q);
52435+ return h->access->command_completed(h, q);
52436
52437 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
52438 a = rq->head[rq->current_entry];
52439@@ -5455,7 +5455,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
52440 while (!list_empty(&h->reqQ)) {
52441 c = list_entry(h->reqQ.next, struct CommandList, list);
52442 /* can't do anything if fifo is full */
52443- if ((h->access.fifo_full(h))) {
52444+ if ((h->access->fifo_full(h))) {
52445 h->fifo_recently_full = 1;
52446 dev_warn(&h->pdev->dev, "fifo full\n");
52447 break;
52448@@ -5477,7 +5477,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
52449
52450 /* Tell the controller execute command */
52451 spin_unlock_irqrestore(&h->lock, *flags);
52452- h->access.submit_command(h, c);
52453+ h->access->submit_command(h, c);
52454 spin_lock_irqsave(&h->lock, *flags);
52455 }
52456 }
52457@@ -5493,17 +5493,17 @@ static void lock_and_start_io(struct ctlr_info *h)
52458
52459 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
52460 {
52461- return h->access.command_completed(h, q);
52462+ return h->access->command_completed(h, q);
52463 }
52464
52465 static inline bool interrupt_pending(struct ctlr_info *h)
52466 {
52467- return h->access.intr_pending(h);
52468+ return h->access->intr_pending(h);
52469 }
52470
52471 static inline long interrupt_not_for_us(struct ctlr_info *h)
52472 {
52473- return (h->access.intr_pending(h) == 0) ||
52474+ return (h->access->intr_pending(h) == 0) ||
52475 (h->interrupts_enabled == 0);
52476 }
52477
52478@@ -6459,7 +6459,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
52479 if (prod_index < 0)
52480 return -ENODEV;
52481 h->product_name = products[prod_index].product_name;
52482- h->access = *(products[prod_index].access);
52483+ h->access = products[prod_index].access;
52484
52485 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
52486 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
52487@@ -6781,7 +6781,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
52488 unsigned long flags;
52489 u32 lockup_detected;
52490
52491- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52492+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52493 spin_lock_irqsave(&h->lock, flags);
52494 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
52495 if (!lockup_detected) {
52496@@ -7022,7 +7022,7 @@ reinit_after_soft_reset:
52497 }
52498
52499 /* make sure the board interrupts are off */
52500- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52501+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52502
52503 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
52504 goto clean2;
52505@@ -7057,7 +7057,7 @@ reinit_after_soft_reset:
52506 * fake ones to scoop up any residual completions.
52507 */
52508 spin_lock_irqsave(&h->lock, flags);
52509- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52510+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52511 spin_unlock_irqrestore(&h->lock, flags);
52512 free_irqs(h);
52513 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
52514@@ -7076,9 +7076,9 @@ reinit_after_soft_reset:
52515 dev_info(&h->pdev->dev, "Board READY.\n");
52516 dev_info(&h->pdev->dev,
52517 "Waiting for stale completions to drain.\n");
52518- h->access.set_intr_mask(h, HPSA_INTR_ON);
52519+ h->access->set_intr_mask(h, HPSA_INTR_ON);
52520 msleep(10000);
52521- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52522+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52523
52524 rc = controller_reset_failed(h->cfgtable);
52525 if (rc)
52526@@ -7104,7 +7104,7 @@ reinit_after_soft_reset:
52527 h->drv_req_rescan = 0;
52528
52529 /* Turn the interrupts on so we can service requests */
52530- h->access.set_intr_mask(h, HPSA_INTR_ON);
52531+ h->access->set_intr_mask(h, HPSA_INTR_ON);
52532
52533 hpsa_hba_inquiry(h);
52534 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
52535@@ -7169,7 +7169,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
52536 * To write all data in the battery backed cache to disks
52537 */
52538 hpsa_flush_cache(h);
52539- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52540+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52541 hpsa_free_irqs_and_disable_msix(h);
52542 }
52543
52544@@ -7287,7 +7287,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52545 CFGTBL_Trans_enable_directed_msix |
52546 (trans_support & (CFGTBL_Trans_io_accel1 |
52547 CFGTBL_Trans_io_accel2));
52548- struct access_method access = SA5_performant_access;
52549+ struct access_method *access = &SA5_performant_access;
52550
52551 /* This is a bit complicated. There are 8 registers on
52552 * the controller which we write to to tell it 8 different
52553@@ -7329,7 +7329,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52554 * perform the superfluous readl() after each command submission.
52555 */
52556 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
52557- access = SA5_performant_access_no_read;
52558+ access = &SA5_performant_access_no_read;
52559
52560 /* Controller spec: zero out this buffer. */
52561 for (i = 0; i < h->nreply_queues; i++)
52562@@ -7359,12 +7359,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52563 * enable outbound interrupt coalescing in accelerator mode;
52564 */
52565 if (trans_support & CFGTBL_Trans_io_accel1) {
52566- access = SA5_ioaccel_mode1_access;
52567+ access = &SA5_ioaccel_mode1_access;
52568 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
52569 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
52570 } else {
52571 if (trans_support & CFGTBL_Trans_io_accel2) {
52572- access = SA5_ioaccel_mode2_access;
52573+ access = &SA5_ioaccel_mode2_access;
52574 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
52575 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
52576 }
52577diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
52578index 24472ce..8782caf 100644
52579--- a/drivers/scsi/hpsa.h
52580+++ b/drivers/scsi/hpsa.h
52581@@ -127,7 +127,7 @@ struct ctlr_info {
52582 unsigned int msix_vector;
52583 unsigned int msi_vector;
52584 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
52585- struct access_method access;
52586+ struct access_method *access;
52587 char hba_mode_enabled;
52588
52589 /* queue and queue Info */
52590@@ -536,43 +536,43 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
52591 }
52592
52593 static struct access_method SA5_access = {
52594- SA5_submit_command,
52595- SA5_intr_mask,
52596- SA5_fifo_full,
52597- SA5_intr_pending,
52598- SA5_completed,
52599+ .submit_command = SA5_submit_command,
52600+ .set_intr_mask = SA5_intr_mask,
52601+ .fifo_full = SA5_fifo_full,
52602+ .intr_pending = SA5_intr_pending,
52603+ .command_completed = SA5_completed,
52604 };
52605
52606 static struct access_method SA5_ioaccel_mode1_access = {
52607- SA5_submit_command,
52608- SA5_performant_intr_mask,
52609- SA5_fifo_full,
52610- SA5_ioaccel_mode1_intr_pending,
52611- SA5_ioaccel_mode1_completed,
52612+ .submit_command = SA5_submit_command,
52613+ .set_intr_mask = SA5_performant_intr_mask,
52614+ .fifo_full = SA5_fifo_full,
52615+ .intr_pending = SA5_ioaccel_mode1_intr_pending,
52616+ .command_completed = SA5_ioaccel_mode1_completed,
52617 };
52618
52619 static struct access_method SA5_ioaccel_mode2_access = {
52620- SA5_submit_command_ioaccel2,
52621- SA5_performant_intr_mask,
52622- SA5_fifo_full,
52623- SA5_performant_intr_pending,
52624- SA5_performant_completed,
52625+ .submit_command = SA5_submit_command_ioaccel2,
52626+ .set_intr_mask = SA5_performant_intr_mask,
52627+ .fifo_full = SA5_fifo_full,
52628+ .intr_pending = SA5_performant_intr_pending,
52629+ .command_completed = SA5_performant_completed,
52630 };
52631
52632 static struct access_method SA5_performant_access = {
52633- SA5_submit_command,
52634- SA5_performant_intr_mask,
52635- SA5_fifo_full,
52636- SA5_performant_intr_pending,
52637- SA5_performant_completed,
52638+ .submit_command = SA5_submit_command,
52639+ .set_intr_mask = SA5_performant_intr_mask,
52640+ .fifo_full = SA5_fifo_full,
52641+ .intr_pending = SA5_performant_intr_pending,
52642+ .command_completed = SA5_performant_completed,
52643 };
52644
52645 static struct access_method SA5_performant_access_no_read = {
52646- SA5_submit_command_no_read,
52647- SA5_performant_intr_mask,
52648- SA5_fifo_full,
52649- SA5_performant_intr_pending,
52650- SA5_performant_completed,
52651+ .submit_command = SA5_submit_command_no_read,
52652+ .set_intr_mask = SA5_performant_intr_mask,
52653+ .fifo_full = SA5_fifo_full,
52654+ .intr_pending = SA5_performant_intr_pending,
52655+ .command_completed = SA5_performant_completed,
52656 };
52657
52658 struct board_type {
52659diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
52660index 1b3a094..068e683 100644
52661--- a/drivers/scsi/libfc/fc_exch.c
52662+++ b/drivers/scsi/libfc/fc_exch.c
52663@@ -101,12 +101,12 @@ struct fc_exch_mgr {
52664 u16 pool_max_index;
52665
52666 struct {
52667- atomic_t no_free_exch;
52668- atomic_t no_free_exch_xid;
52669- atomic_t xid_not_found;
52670- atomic_t xid_busy;
52671- atomic_t seq_not_found;
52672- atomic_t non_bls_resp;
52673+ atomic_unchecked_t no_free_exch;
52674+ atomic_unchecked_t no_free_exch_xid;
52675+ atomic_unchecked_t xid_not_found;
52676+ atomic_unchecked_t xid_busy;
52677+ atomic_unchecked_t seq_not_found;
52678+ atomic_unchecked_t non_bls_resp;
52679 } stats;
52680 };
52681
52682@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
52683 /* allocate memory for exchange */
52684 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
52685 if (!ep) {
52686- atomic_inc(&mp->stats.no_free_exch);
52687+ atomic_inc_unchecked(&mp->stats.no_free_exch);
52688 goto out;
52689 }
52690 memset(ep, 0, sizeof(*ep));
52691@@ -874,7 +874,7 @@ out:
52692 return ep;
52693 err:
52694 spin_unlock_bh(&pool->lock);
52695- atomic_inc(&mp->stats.no_free_exch_xid);
52696+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
52697 mempool_free(ep, mp->ep_pool);
52698 return NULL;
52699 }
52700@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52701 xid = ntohs(fh->fh_ox_id); /* we originated exch */
52702 ep = fc_exch_find(mp, xid);
52703 if (!ep) {
52704- atomic_inc(&mp->stats.xid_not_found);
52705+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52706 reject = FC_RJT_OX_ID;
52707 goto out;
52708 }
52709@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52710 ep = fc_exch_find(mp, xid);
52711 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
52712 if (ep) {
52713- atomic_inc(&mp->stats.xid_busy);
52714+ atomic_inc_unchecked(&mp->stats.xid_busy);
52715 reject = FC_RJT_RX_ID;
52716 goto rel;
52717 }
52718@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52719 }
52720 xid = ep->xid; /* get our XID */
52721 } else if (!ep) {
52722- atomic_inc(&mp->stats.xid_not_found);
52723+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52724 reject = FC_RJT_RX_ID; /* XID not found */
52725 goto out;
52726 }
52727@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52728 } else {
52729 sp = &ep->seq;
52730 if (sp->id != fh->fh_seq_id) {
52731- atomic_inc(&mp->stats.seq_not_found);
52732+ atomic_inc_unchecked(&mp->stats.seq_not_found);
52733 if (f_ctl & FC_FC_END_SEQ) {
52734 /*
52735 * Update sequence_id based on incoming last
52736@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52737
52738 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
52739 if (!ep) {
52740- atomic_inc(&mp->stats.xid_not_found);
52741+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52742 goto out;
52743 }
52744 if (ep->esb_stat & ESB_ST_COMPLETE) {
52745- atomic_inc(&mp->stats.xid_not_found);
52746+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52747 goto rel;
52748 }
52749 if (ep->rxid == FC_XID_UNKNOWN)
52750 ep->rxid = ntohs(fh->fh_rx_id);
52751 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
52752- atomic_inc(&mp->stats.xid_not_found);
52753+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52754 goto rel;
52755 }
52756 if (ep->did != ntoh24(fh->fh_s_id) &&
52757 ep->did != FC_FID_FLOGI) {
52758- atomic_inc(&mp->stats.xid_not_found);
52759+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52760 goto rel;
52761 }
52762 sof = fr_sof(fp);
52763@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52764 sp->ssb_stat |= SSB_ST_RESP;
52765 sp->id = fh->fh_seq_id;
52766 } else if (sp->id != fh->fh_seq_id) {
52767- atomic_inc(&mp->stats.seq_not_found);
52768+ atomic_inc_unchecked(&mp->stats.seq_not_found);
52769 goto rel;
52770 }
52771
52772@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52773 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
52774
52775 if (!sp)
52776- atomic_inc(&mp->stats.xid_not_found);
52777+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52778 else
52779- atomic_inc(&mp->stats.non_bls_resp);
52780+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
52781
52782 fc_frame_free(fp);
52783 }
52784@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
52785
52786 list_for_each_entry(ema, &lport->ema_list, ema_list) {
52787 mp = ema->mp;
52788- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
52789+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
52790 st->fc_no_free_exch_xid +=
52791- atomic_read(&mp->stats.no_free_exch_xid);
52792- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
52793- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
52794- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
52795- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
52796+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
52797+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
52798+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
52799+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
52800+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
52801 }
52802 }
52803 EXPORT_SYMBOL(fc_exch_update_stats);
52804diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
52805index 766098a..1c6c971 100644
52806--- a/drivers/scsi/libsas/sas_ata.c
52807+++ b/drivers/scsi/libsas/sas_ata.c
52808@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
52809 .postreset = ata_std_postreset,
52810 .error_handler = ata_std_error_handler,
52811 .post_internal_cmd = sas_ata_post_internal,
52812- .qc_defer = ata_std_qc_defer,
52813+ .qc_defer = ata_std_qc_defer,
52814 .qc_prep = ata_noop_qc_prep,
52815 .qc_issue = sas_ata_qc_issue,
52816 .qc_fill_rtf = sas_ata_qc_fill_rtf,
52817diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
52818index 434e903..5a4a79b 100644
52819--- a/drivers/scsi/lpfc/lpfc.h
52820+++ b/drivers/scsi/lpfc/lpfc.h
52821@@ -430,7 +430,7 @@ struct lpfc_vport {
52822 struct dentry *debug_nodelist;
52823 struct dentry *vport_debugfs_root;
52824 struct lpfc_debugfs_trc *disc_trc;
52825- atomic_t disc_trc_cnt;
52826+ atomic_unchecked_t disc_trc_cnt;
52827 #endif
52828 uint8_t stat_data_enabled;
52829 uint8_t stat_data_blocked;
52830@@ -880,8 +880,8 @@ struct lpfc_hba {
52831 struct timer_list fabric_block_timer;
52832 unsigned long bit_flags;
52833 #define FABRIC_COMANDS_BLOCKED 0
52834- atomic_t num_rsrc_err;
52835- atomic_t num_cmd_success;
52836+ atomic_unchecked_t num_rsrc_err;
52837+ atomic_unchecked_t num_cmd_success;
52838 unsigned long last_rsrc_error_time;
52839 unsigned long last_ramp_down_time;
52840 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
52841@@ -916,7 +916,7 @@ struct lpfc_hba {
52842
52843 struct dentry *debug_slow_ring_trc;
52844 struct lpfc_debugfs_trc *slow_ring_trc;
52845- atomic_t slow_ring_trc_cnt;
52846+ atomic_unchecked_t slow_ring_trc_cnt;
52847 /* iDiag debugfs sub-directory */
52848 struct dentry *idiag_root;
52849 struct dentry *idiag_pci_cfg;
52850diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
52851index b0aedce..89c6ca6 100644
52852--- a/drivers/scsi/lpfc/lpfc_debugfs.c
52853+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
52854@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
52855
52856 #include <linux/debugfs.h>
52857
52858-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
52859+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
52860 static unsigned long lpfc_debugfs_start_time = 0L;
52861
52862 /* iDiag */
52863@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
52864 lpfc_debugfs_enable = 0;
52865
52866 len = 0;
52867- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
52868+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
52869 (lpfc_debugfs_max_disc_trc - 1);
52870 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
52871 dtp = vport->disc_trc + i;
52872@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
52873 lpfc_debugfs_enable = 0;
52874
52875 len = 0;
52876- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
52877+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
52878 (lpfc_debugfs_max_slow_ring_trc - 1);
52879 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
52880 dtp = phba->slow_ring_trc + i;
52881@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
52882 !vport || !vport->disc_trc)
52883 return;
52884
52885- index = atomic_inc_return(&vport->disc_trc_cnt) &
52886+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
52887 (lpfc_debugfs_max_disc_trc - 1);
52888 dtp = vport->disc_trc + index;
52889 dtp->fmt = fmt;
52890 dtp->data1 = data1;
52891 dtp->data2 = data2;
52892 dtp->data3 = data3;
52893- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
52894+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
52895 dtp->jif = jiffies;
52896 #endif
52897 return;
52898@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
52899 !phba || !phba->slow_ring_trc)
52900 return;
52901
52902- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
52903+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
52904 (lpfc_debugfs_max_slow_ring_trc - 1);
52905 dtp = phba->slow_ring_trc + index;
52906 dtp->fmt = fmt;
52907 dtp->data1 = data1;
52908 dtp->data2 = data2;
52909 dtp->data3 = data3;
52910- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
52911+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
52912 dtp->jif = jiffies;
52913 #endif
52914 return;
52915@@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
52916 "slow_ring buffer\n");
52917 goto debug_failed;
52918 }
52919- atomic_set(&phba->slow_ring_trc_cnt, 0);
52920+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
52921 memset(phba->slow_ring_trc, 0,
52922 (sizeof(struct lpfc_debugfs_trc) *
52923 lpfc_debugfs_max_slow_ring_trc));
52924@@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
52925 "buffer\n");
52926 goto debug_failed;
52927 }
52928- atomic_set(&vport->disc_trc_cnt, 0);
52929+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
52930
52931 snprintf(name, sizeof(name), "discovery_trace");
52932 vport->debug_disc_trc =
52933diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
52934index 06f9a5b..82812092 100644
52935--- a/drivers/scsi/lpfc/lpfc_init.c
52936+++ b/drivers/scsi/lpfc/lpfc_init.c
52937@@ -11296,8 +11296,10 @@ lpfc_init(void)
52938 "misc_register returned with status %d", error);
52939
52940 if (lpfc_enable_npiv) {
52941- lpfc_transport_functions.vport_create = lpfc_vport_create;
52942- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
52943+ pax_open_kernel();
52944+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
52945+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
52946+ pax_close_kernel();
52947 }
52948 lpfc_transport_template =
52949 fc_attach_transport(&lpfc_transport_functions);
52950diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
52951index 2df11da..e660a2c 100644
52952--- a/drivers/scsi/lpfc/lpfc_scsi.c
52953+++ b/drivers/scsi/lpfc/lpfc_scsi.c
52954@@ -382,7 +382,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
52955 uint32_t evt_posted;
52956
52957 spin_lock_irqsave(&phba->hbalock, flags);
52958- atomic_inc(&phba->num_rsrc_err);
52959+ atomic_inc_unchecked(&phba->num_rsrc_err);
52960 phba->last_rsrc_error_time = jiffies;
52961
52962 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
52963@@ -423,8 +423,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
52964 unsigned long num_rsrc_err, num_cmd_success;
52965 int i;
52966
52967- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
52968- num_cmd_success = atomic_read(&phba->num_cmd_success);
52969+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
52970+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
52971
52972 /*
52973 * The error and success command counters are global per
52974@@ -452,8 +452,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
52975 }
52976 }
52977 lpfc_destroy_vport_work_array(phba, vports);
52978- atomic_set(&phba->num_rsrc_err, 0);
52979- atomic_set(&phba->num_cmd_success, 0);
52980+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
52981+ atomic_set_unchecked(&phba->num_cmd_success, 0);
52982 }
52983
52984 /**
52985diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52986index 5055f92..376cd98 100644
52987--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52988+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52989@@ -1557,7 +1557,7 @@ _scsih_get_resync(struct device *dev)
52990 {
52991 struct scsi_device *sdev = to_scsi_device(dev);
52992 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
52993- static struct _raid_device *raid_device;
52994+ struct _raid_device *raid_device;
52995 unsigned long flags;
52996 Mpi2RaidVolPage0_t vol_pg0;
52997 Mpi2ConfigReply_t mpi_reply;
52998@@ -1609,7 +1609,7 @@ _scsih_get_state(struct device *dev)
52999 {
53000 struct scsi_device *sdev = to_scsi_device(dev);
53001 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
53002- static struct _raid_device *raid_device;
53003+ struct _raid_device *raid_device;
53004 unsigned long flags;
53005 Mpi2RaidVolPage0_t vol_pg0;
53006 Mpi2ConfigReply_t mpi_reply;
53007@@ -6631,7 +6631,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
53008 struct fw_event_work *fw_event)
53009 {
53010 Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data;
53011- static struct _raid_device *raid_device;
53012+ struct _raid_device *raid_device;
53013 unsigned long flags;
53014 u16 handle;
53015
53016@@ -7102,7 +7102,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
53017 u64 sas_address;
53018 struct _sas_device *sas_device;
53019 struct _sas_node *expander_device;
53020- static struct _raid_device *raid_device;
53021+ struct _raid_device *raid_device;
53022 u8 retry_count;
53023 unsigned long flags;
53024
53025diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
53026index be8ce54..94ed33a 100644
53027--- a/drivers/scsi/pmcraid.c
53028+++ b/drivers/scsi/pmcraid.c
53029@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
53030 res->scsi_dev = scsi_dev;
53031 scsi_dev->hostdata = res;
53032 res->change_detected = 0;
53033- atomic_set(&res->read_failures, 0);
53034- atomic_set(&res->write_failures, 0);
53035+ atomic_set_unchecked(&res->read_failures, 0);
53036+ atomic_set_unchecked(&res->write_failures, 0);
53037 rc = 0;
53038 }
53039 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
53040@@ -2687,9 +2687,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
53041
53042 /* If this was a SCSI read/write command keep count of errors */
53043 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
53044- atomic_inc(&res->read_failures);
53045+ atomic_inc_unchecked(&res->read_failures);
53046 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
53047- atomic_inc(&res->write_failures);
53048+ atomic_inc_unchecked(&res->write_failures);
53049
53050 if (!RES_IS_GSCSI(res->cfg_entry) &&
53051 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
53052@@ -3545,7 +3545,7 @@ static int pmcraid_queuecommand_lck(
53053 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
53054 * hrrq_id assigned here in queuecommand
53055 */
53056- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
53057+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
53058 pinstance->num_hrrq;
53059 cmd->cmd_done = pmcraid_io_done;
53060
53061@@ -3857,7 +3857,7 @@ static long pmcraid_ioctl_passthrough(
53062 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
53063 * hrrq_id assigned here in queuecommand
53064 */
53065- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
53066+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
53067 pinstance->num_hrrq;
53068
53069 if (request_size) {
53070@@ -4495,7 +4495,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
53071
53072 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
53073 /* add resources only after host is added into system */
53074- if (!atomic_read(&pinstance->expose_resources))
53075+ if (!atomic_read_unchecked(&pinstance->expose_resources))
53076 return;
53077
53078 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
53079@@ -5322,8 +5322,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
53080 init_waitqueue_head(&pinstance->reset_wait_q);
53081
53082 atomic_set(&pinstance->outstanding_cmds, 0);
53083- atomic_set(&pinstance->last_message_id, 0);
53084- atomic_set(&pinstance->expose_resources, 0);
53085+ atomic_set_unchecked(&pinstance->last_message_id, 0);
53086+ atomic_set_unchecked(&pinstance->expose_resources, 0);
53087
53088 INIT_LIST_HEAD(&pinstance->free_res_q);
53089 INIT_LIST_HEAD(&pinstance->used_res_q);
53090@@ -6036,7 +6036,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
53091 /* Schedule worker thread to handle CCN and take care of adding and
53092 * removing devices to OS
53093 */
53094- atomic_set(&pinstance->expose_resources, 1);
53095+ atomic_set_unchecked(&pinstance->expose_resources, 1);
53096 schedule_work(&pinstance->worker_q);
53097 return rc;
53098
53099diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
53100index e1d150f..6c6df44 100644
53101--- a/drivers/scsi/pmcraid.h
53102+++ b/drivers/scsi/pmcraid.h
53103@@ -748,7 +748,7 @@ struct pmcraid_instance {
53104 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
53105
53106 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
53107- atomic_t last_message_id;
53108+ atomic_unchecked_t last_message_id;
53109
53110 /* configuration table */
53111 struct pmcraid_config_table *cfg_table;
53112@@ -777,7 +777,7 @@ struct pmcraid_instance {
53113 atomic_t outstanding_cmds;
53114
53115 /* should add/delete resources to mid-layer now ?*/
53116- atomic_t expose_resources;
53117+ atomic_unchecked_t expose_resources;
53118
53119
53120
53121@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
53122 struct pmcraid_config_table_entry_ext cfg_entry_ext;
53123 };
53124 struct scsi_device *scsi_dev; /* Link scsi_device structure */
53125- atomic_t read_failures; /* count of failed READ commands */
53126- atomic_t write_failures; /* count of failed WRITE commands */
53127+ atomic_unchecked_t read_failures; /* count of failed READ commands */
53128+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
53129
53130 /* To indicate add/delete/modify during CCN */
53131 u8 change_detected;
53132diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
53133index 16fe519..3b1ec82 100644
53134--- a/drivers/scsi/qla2xxx/qla_attr.c
53135+++ b/drivers/scsi/qla2xxx/qla_attr.c
53136@@ -2188,7 +2188,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
53137 return 0;
53138 }
53139
53140-struct fc_function_template qla2xxx_transport_functions = {
53141+fc_function_template_no_const qla2xxx_transport_functions = {
53142
53143 .show_host_node_name = 1,
53144 .show_host_port_name = 1,
53145@@ -2236,7 +2236,7 @@ struct fc_function_template qla2xxx_transport_functions = {
53146 .bsg_timeout = qla24xx_bsg_timeout,
53147 };
53148
53149-struct fc_function_template qla2xxx_transport_vport_functions = {
53150+fc_function_template_no_const qla2xxx_transport_vport_functions = {
53151
53152 .show_host_node_name = 1,
53153 .show_host_port_name = 1,
53154diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
53155index d48dea8..0845f78 100644
53156--- a/drivers/scsi/qla2xxx/qla_gbl.h
53157+++ b/drivers/scsi/qla2xxx/qla_gbl.h
53158@@ -569,8 +569,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
53159 struct device_attribute;
53160 extern struct device_attribute *qla2x00_host_attrs[];
53161 struct fc_function_template;
53162-extern struct fc_function_template qla2xxx_transport_functions;
53163-extern struct fc_function_template qla2xxx_transport_vport_functions;
53164+extern fc_function_template_no_const qla2xxx_transport_functions;
53165+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
53166 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
53167 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
53168 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
53169diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
53170index d96bfb5..d7afe90 100644
53171--- a/drivers/scsi/qla2xxx/qla_os.c
53172+++ b/drivers/scsi/qla2xxx/qla_os.c
53173@@ -1490,8 +1490,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
53174 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
53175 /* Ok, a 64bit DMA mask is applicable. */
53176 ha->flags.enable_64bit_addressing = 1;
53177- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
53178- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
53179+ pax_open_kernel();
53180+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
53181+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
53182+ pax_close_kernel();
53183 return;
53184 }
53185 }
53186diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
53187index 8f6d0fb..1b21097 100644
53188--- a/drivers/scsi/qla4xxx/ql4_def.h
53189+++ b/drivers/scsi/qla4xxx/ql4_def.h
53190@@ -305,7 +305,7 @@ struct ddb_entry {
53191 * (4000 only) */
53192 atomic_t relogin_timer; /* Max Time to wait for
53193 * relogin to complete */
53194- atomic_t relogin_retry_count; /* Num of times relogin has been
53195+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
53196 * retried */
53197 uint32_t default_time2wait; /* Default Min time between
53198 * relogins (+aens) */
53199diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
53200index 3202063..f9f0ff6 100644
53201--- a/drivers/scsi/qla4xxx/ql4_os.c
53202+++ b/drivers/scsi/qla4xxx/ql4_os.c
53203@@ -4494,12 +4494,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
53204 */
53205 if (!iscsi_is_session_online(cls_sess)) {
53206 /* Reset retry relogin timer */
53207- atomic_inc(&ddb_entry->relogin_retry_count);
53208+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
53209 DEBUG2(ql4_printk(KERN_INFO, ha,
53210 "%s: index[%d] relogin timed out-retrying"
53211 " relogin (%d), retry (%d)\n", __func__,
53212 ddb_entry->fw_ddb_index,
53213- atomic_read(&ddb_entry->relogin_retry_count),
53214+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
53215 ddb_entry->default_time2wait + 4));
53216 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
53217 atomic_set(&ddb_entry->retry_relogin_timer,
53218@@ -6607,7 +6607,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
53219
53220 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
53221 atomic_set(&ddb_entry->relogin_timer, 0);
53222- atomic_set(&ddb_entry->relogin_retry_count, 0);
53223+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
53224 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
53225 ddb_entry->default_relogin_timeout =
53226 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
53227diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
53228index 88d46fe..7351be5 100644
53229--- a/drivers/scsi/scsi.c
53230+++ b/drivers/scsi/scsi.c
53231@@ -640,7 +640,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
53232 struct Scsi_Host *host = cmd->device->host;
53233 int rtn = 0;
53234
53235- atomic_inc(&cmd->device->iorequest_cnt);
53236+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
53237
53238 /* check if the device is still usable */
53239 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
53240diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
53241index 3f50dfc..86af487 100644
53242--- a/drivers/scsi/scsi_lib.c
53243+++ b/drivers/scsi/scsi_lib.c
53244@@ -1423,7 +1423,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
53245 shost = sdev->host;
53246 scsi_init_cmd_errh(cmd);
53247 cmd->result = DID_NO_CONNECT << 16;
53248- atomic_inc(&cmd->device->iorequest_cnt);
53249+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
53250
53251 /*
53252 * SCSI request completion path will do scsi_device_unbusy(),
53253@@ -1449,9 +1449,9 @@ static void scsi_softirq_done(struct request *rq)
53254
53255 INIT_LIST_HEAD(&cmd->eh_entry);
53256
53257- atomic_inc(&cmd->device->iodone_cnt);
53258+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
53259 if (cmd->result)
53260- atomic_inc(&cmd->device->ioerr_cnt);
53261+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
53262
53263 disposition = scsi_decide_disposition(cmd);
53264 if (disposition != SUCCESS &&
53265diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
53266index 074e8cc..f612e5c 100644
53267--- a/drivers/scsi/scsi_sysfs.c
53268+++ b/drivers/scsi/scsi_sysfs.c
53269@@ -780,7 +780,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
53270 char *buf) \
53271 { \
53272 struct scsi_device *sdev = to_scsi_device(dev); \
53273- unsigned long long count = atomic_read(&sdev->field); \
53274+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
53275 return snprintf(buf, 20, "0x%llx\n", count); \
53276 } \
53277 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
53278diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
53279index e51add0..1e06a96 100644
53280--- a/drivers/scsi/scsi_tgt_lib.c
53281+++ b/drivers/scsi/scsi_tgt_lib.c
53282@@ -363,7 +363,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
53283 int err;
53284
53285 dprintk("%lx %u\n", uaddr, len);
53286- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
53287+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
53288 if (err) {
53289 /*
53290 * TODO: need to fixup sg_tablesize, max_segment_size,
53291diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
53292index 521f583..6b15966 100644
53293--- a/drivers/scsi/scsi_transport_fc.c
53294+++ b/drivers/scsi/scsi_transport_fc.c
53295@@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
53296 * Netlink Infrastructure
53297 */
53298
53299-static atomic_t fc_event_seq;
53300+static atomic_unchecked_t fc_event_seq;
53301
53302 /**
53303 * fc_get_event_number - Obtain the next sequential FC event number
53304@@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
53305 u32
53306 fc_get_event_number(void)
53307 {
53308- return atomic_add_return(1, &fc_event_seq);
53309+ return atomic_add_return_unchecked(1, &fc_event_seq);
53310 }
53311 EXPORT_SYMBOL(fc_get_event_number);
53312
53313@@ -655,7 +655,7 @@ static __init int fc_transport_init(void)
53314 {
53315 int error;
53316
53317- atomic_set(&fc_event_seq, 0);
53318+ atomic_set_unchecked(&fc_event_seq, 0);
53319
53320 error = transport_class_register(&fc_host_class);
53321 if (error)
53322@@ -845,7 +845,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
53323 char *cp;
53324
53325 *val = simple_strtoul(buf, &cp, 0);
53326- if ((*cp && (*cp != '\n')) || (*val < 0))
53327+ if (*cp && (*cp != '\n'))
53328 return -EINVAL;
53329 /*
53330 * Check for overflow; dev_loss_tmo is u32
53331diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
53332index 0102a2d..cc3f8e9 100644
53333--- a/drivers/scsi/scsi_transport_iscsi.c
53334+++ b/drivers/scsi/scsi_transport_iscsi.c
53335@@ -79,7 +79,7 @@ struct iscsi_internal {
53336 struct transport_container session_cont;
53337 };
53338
53339-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
53340+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
53341 static struct workqueue_struct *iscsi_eh_timer_workq;
53342
53343 static DEFINE_IDA(iscsi_sess_ida);
53344@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
53345 int err;
53346
53347 ihost = shost->shost_data;
53348- session->sid = atomic_add_return(1, &iscsi_session_nr);
53349+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
53350
53351 if (target_id == ISCSI_MAX_TARGET) {
53352 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
53353@@ -4511,7 +4511,7 @@ static __init int iscsi_transport_init(void)
53354 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
53355 ISCSI_TRANSPORT_VERSION);
53356
53357- atomic_set(&iscsi_session_nr, 0);
53358+ atomic_set_unchecked(&iscsi_session_nr, 0);
53359
53360 err = class_register(&iscsi_transport_class);
53361 if (err)
53362diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
53363index 13e8983..d306a68 100644
53364--- a/drivers/scsi/scsi_transport_srp.c
53365+++ b/drivers/scsi/scsi_transport_srp.c
53366@@ -36,7 +36,7 @@
53367 #include "scsi_transport_srp_internal.h"
53368
53369 struct srp_host_attrs {
53370- atomic_t next_port_id;
53371+ atomic_unchecked_t next_port_id;
53372 };
53373 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
53374
53375@@ -101,7 +101,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
53376 struct Scsi_Host *shost = dev_to_shost(dev);
53377 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
53378
53379- atomic_set(&srp_host->next_port_id, 0);
53380+ atomic_set_unchecked(&srp_host->next_port_id, 0);
53381 return 0;
53382 }
53383
53384@@ -734,7 +734,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
53385 rport_fast_io_fail_timedout);
53386 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
53387
53388- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
53389+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
53390 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
53391
53392 transport_setup_device(&rport->dev);
53393diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
53394index 6825eda..be470c4 100644
53395--- a/drivers/scsi/sd.c
53396+++ b/drivers/scsi/sd.c
53397@@ -2954,7 +2954,7 @@ static int sd_probe(struct device *dev)
53398 sdkp->disk = gd;
53399 sdkp->index = index;
53400 atomic_set(&sdkp->openers, 0);
53401- atomic_set(&sdkp->device->ioerr_cnt, 0);
53402+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
53403
53404 if (!sdp->request_queue->rq_timeout) {
53405 if (sdp->type != TYPE_MOD)
53406diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
53407index 53268aab..17c2764 100644
53408--- a/drivers/scsi/sg.c
53409+++ b/drivers/scsi/sg.c
53410@@ -1102,7 +1102,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
53411 sdp->disk->disk_name,
53412 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
53413 NULL,
53414- (char *)arg);
53415+ (char __user *)arg);
53416 case BLKTRACESTART:
53417 return blk_trace_startstop(sdp->device->request_queue, 1);
53418 case BLKTRACESTOP:
53419diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
53420index d4f9670..d37b662 100644
53421--- a/drivers/spi/spi.c
53422+++ b/drivers/spi/spi.c
53423@@ -2204,7 +2204,7 @@ int spi_bus_unlock(struct spi_master *master)
53424 EXPORT_SYMBOL_GPL(spi_bus_unlock);
53425
53426 /* portable code must never pass more than 32 bytes */
53427-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
53428+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
53429
53430 static u8 *buf;
53431
53432diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
53433index c341ac1..bf9799f 100644
53434--- a/drivers/staging/android/timed_output.c
53435+++ b/drivers/staging/android/timed_output.c
53436@@ -25,7 +25,7 @@
53437 #include "timed_output.h"
53438
53439 static struct class *timed_output_class;
53440-static atomic_t device_count;
53441+static atomic_unchecked_t device_count;
53442
53443 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
53444 char *buf)
53445@@ -63,7 +63,7 @@ static int create_timed_output_class(void)
53446 timed_output_class = class_create(THIS_MODULE, "timed_output");
53447 if (IS_ERR(timed_output_class))
53448 return PTR_ERR(timed_output_class);
53449- atomic_set(&device_count, 0);
53450+ atomic_set_unchecked(&device_count, 0);
53451 timed_output_class->dev_groups = timed_output_groups;
53452 }
53453
53454@@ -81,7 +81,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
53455 if (ret < 0)
53456 return ret;
53457
53458- tdev->index = atomic_inc_return(&device_count);
53459+ tdev->index = atomic_inc_return_unchecked(&device_count);
53460 tdev->dev = device_create(timed_output_class, NULL,
53461 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
53462 if (IS_ERR(tdev->dev))
53463diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
53464index fe47cd3..19a1bd1 100644
53465--- a/drivers/staging/gdm724x/gdm_tty.c
53466+++ b/drivers/staging/gdm724x/gdm_tty.c
53467@@ -44,7 +44,7 @@
53468 #define gdm_tty_send_control(n, r, v, d, l) (\
53469 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
53470
53471-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
53472+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
53473
53474 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
53475 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
53476diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
53477index def8280..e3fd96a 100644
53478--- a/drivers/staging/imx-drm/imx-drm-core.c
53479+++ b/drivers/staging/imx-drm/imx-drm-core.c
53480@@ -355,7 +355,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
53481 if (imxdrm->pipes >= MAX_CRTC)
53482 return -EINVAL;
53483
53484- if (imxdrm->drm->open_count)
53485+ if (local_read(&imxdrm->drm->open_count))
53486 return -EBUSY;
53487
53488 imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
53489diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
53490index 3f8020c..649fded 100644
53491--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
53492+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
53493@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
53494 return 0;
53495 }
53496
53497-sfw_test_client_ops_t brw_test_client;
53498-void brw_init_test_client(void)
53499-{
53500- brw_test_client.tso_init = brw_client_init;
53501- brw_test_client.tso_fini = brw_client_fini;
53502- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
53503- brw_test_client.tso_done_rpc = brw_client_done_rpc;
53504+sfw_test_client_ops_t brw_test_client = {
53505+ .tso_init = brw_client_init,
53506+ .tso_fini = brw_client_fini,
53507+ .tso_prep_rpc = brw_client_prep_rpc,
53508+ .tso_done_rpc = brw_client_done_rpc,
53509 };
53510
53511 srpc_service_t brw_test_service;
53512diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
53513index 050723a..fa6fdf1 100644
53514--- a/drivers/staging/lustre/lnet/selftest/framework.c
53515+++ b/drivers/staging/lustre/lnet/selftest/framework.c
53516@@ -1635,12 +1635,10 @@ static srpc_service_t sfw_services[] =
53517
53518 extern sfw_test_client_ops_t ping_test_client;
53519 extern srpc_service_t ping_test_service;
53520-extern void ping_init_test_client(void);
53521 extern void ping_init_test_service(void);
53522
53523 extern sfw_test_client_ops_t brw_test_client;
53524 extern srpc_service_t brw_test_service;
53525-extern void brw_init_test_client(void);
53526 extern void brw_init_test_service(void);
53527
53528
53529@@ -1684,12 +1682,10 @@ sfw_startup (void)
53530 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
53531 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
53532
53533- brw_init_test_client();
53534 brw_init_test_service();
53535 rc = sfw_register_test(&brw_test_service, &brw_test_client);
53536 LASSERT (rc == 0);
53537
53538- ping_init_test_client();
53539 ping_init_test_service();
53540 rc = sfw_register_test(&ping_test_service, &ping_test_client);
53541 LASSERT (rc == 0);
53542diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
53543index 750cac4..e4d751f 100644
53544--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
53545+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
53546@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
53547 return 0;
53548 }
53549
53550-sfw_test_client_ops_t ping_test_client;
53551-void ping_init_test_client(void)
53552-{
53553- ping_test_client.tso_init = ping_client_init;
53554- ping_test_client.tso_fini = ping_client_fini;
53555- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
53556- ping_test_client.tso_done_rpc = ping_client_done_rpc;
53557-}
53558+sfw_test_client_ops_t ping_test_client = {
53559+ .tso_init = ping_client_init,
53560+ .tso_fini = ping_client_fini,
53561+ .tso_prep_rpc = ping_client_prep_rpc,
53562+ .tso_done_rpc = ping_client_done_rpc,
53563+};
53564
53565 srpc_service_t ping_test_service;
53566 void ping_init_test_service(void)
53567diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
53568index 0c6b784..c64235c 100644
53569--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
53570+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
53571@@ -1141,7 +1141,7 @@ struct ldlm_callback_suite {
53572 ldlm_completion_callback lcs_completion;
53573 ldlm_blocking_callback lcs_blocking;
53574 ldlm_glimpse_callback lcs_glimpse;
53575-};
53576+} __no_const;
53577
53578 /* ldlm_lockd.c */
53579 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
53580diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
53581index d5c4613..a341678 100644
53582--- a/drivers/staging/lustre/lustre/include/obd.h
53583+++ b/drivers/staging/lustre/lustre/include/obd.h
53584@@ -1439,7 +1439,7 @@ struct md_ops {
53585 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
53586 * wrapper function in include/linux/obd_class.h.
53587 */
53588-};
53589+} __no_const;
53590
53591 struct lsm_operations {
53592 void (*lsm_free)(struct lov_stripe_md *);
53593diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53594index 986bf38..eab2558f 100644
53595--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53596+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53597@@ -259,7 +259,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
53598 int added = (mode == LCK_NL);
53599 int overlaps = 0;
53600 int splitted = 0;
53601- const struct ldlm_callback_suite null_cbs = { NULL };
53602+ const struct ldlm_callback_suite null_cbs = { };
53603
53604 CDEBUG(D_DLMTRACE, "flags %#llx owner "LPU64" pid %u mode %u start "
53605 LPU64" end "LPU64"\n", *flags,
53606diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53607index e947b91..f408990 100644
53608--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53609+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53610@@ -217,7 +217,7 @@ DECLARE_PROC_HANDLER(proc_debug_mb)
53611 int LL_PROC_PROTO(proc_console_max_delay_cs)
53612 {
53613 int rc, max_delay_cs;
53614- ctl_table_t dummy = *table;
53615+ ctl_table_no_const dummy = *table;
53616 cfs_duration_t d;
53617
53618 dummy.data = &max_delay_cs;
53619@@ -248,7 +248,7 @@ int LL_PROC_PROTO(proc_console_max_delay_cs)
53620 int LL_PROC_PROTO(proc_console_min_delay_cs)
53621 {
53622 int rc, min_delay_cs;
53623- ctl_table_t dummy = *table;
53624+ ctl_table_no_const dummy = *table;
53625 cfs_duration_t d;
53626
53627 dummy.data = &min_delay_cs;
53628@@ -279,7 +279,7 @@ int LL_PROC_PROTO(proc_console_min_delay_cs)
53629 int LL_PROC_PROTO(proc_console_backoff)
53630 {
53631 int rc, backoff;
53632- ctl_table_t dummy = *table;
53633+ ctl_table_no_const dummy = *table;
53634
53635 dummy.data = &backoff;
53636 dummy.proc_handler = &proc_dointvec;
53637diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
53638index b16ee08..a3db5c6 100644
53639--- a/drivers/staging/lustre/lustre/libcfs/module.c
53640+++ b/drivers/staging/lustre/lustre/libcfs/module.c
53641@@ -314,11 +314,11 @@ out:
53642
53643
53644 struct cfs_psdev_ops libcfs_psdev_ops = {
53645- libcfs_psdev_open,
53646- libcfs_psdev_release,
53647- NULL,
53648- NULL,
53649- libcfs_ioctl
53650+ .p_open = libcfs_psdev_open,
53651+ .p_close = libcfs_psdev_release,
53652+ .p_read = NULL,
53653+ .p_write = NULL,
53654+ .p_ioctl = libcfs_ioctl
53655 };
53656
53657 extern int insert_proc(void);
53658diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
53659index ae6f61a..03c3d5d 100644
53660--- a/drivers/staging/lustre/lustre/llite/dir.c
53661+++ b/drivers/staging/lustre/lustre/llite/dir.c
53662@@ -660,7 +660,7 @@ int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
53663 int mode;
53664 int err;
53665
53666- mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current->fs->umask) | S_IFDIR;
53667+ mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current_umask()) | S_IFDIR;
53668 op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
53669 strlen(filename), mode, LUSTRE_OPC_MKDIR,
53670 lump);
53671diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
53672index f670469..03b7438 100644
53673--- a/drivers/staging/media/solo6x10/solo6x10-core.c
53674+++ b/drivers/staging/media/solo6x10/solo6x10-core.c
53675@@ -434,7 +434,7 @@ static void solo_device_release(struct device *dev)
53676
53677 static int solo_sysfs_init(struct solo_dev *solo_dev)
53678 {
53679- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
53680+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
53681 struct device *dev = &solo_dev->dev;
53682 const char *driver;
53683 int i;
53684diff --git a/drivers/staging/media/solo6x10/solo6x10-g723.c b/drivers/staging/media/solo6x10/solo6x10-g723.c
53685index 74f037b..5b5bb76 100644
53686--- a/drivers/staging/media/solo6x10/solo6x10-g723.c
53687+++ b/drivers/staging/media/solo6x10/solo6x10-g723.c
53688@@ -355,7 +355,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
53689
53690 int solo_g723_init(struct solo_dev *solo_dev)
53691 {
53692- static struct snd_device_ops ops = { NULL };
53693+ static struct snd_device_ops ops = { };
53694 struct snd_card *card;
53695 struct snd_kcontrol_new kctl;
53696 char name[32];
53697diff --git a/drivers/staging/media/solo6x10/solo6x10-p2m.c b/drivers/staging/media/solo6x10/solo6x10-p2m.c
53698index 7f2f247..d999137 100644
53699--- a/drivers/staging/media/solo6x10/solo6x10-p2m.c
53700+++ b/drivers/staging/media/solo6x10/solo6x10-p2m.c
53701@@ -77,7 +77,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
53702
53703 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
53704 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
53705- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
53706+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
53707 if (p2m_id < 0)
53708 p2m_id = -p2m_id;
53709 }
53710diff --git a/drivers/staging/media/solo6x10/solo6x10.h b/drivers/staging/media/solo6x10/solo6x10.h
53711index 8964f8b..36eb087 100644
53712--- a/drivers/staging/media/solo6x10/solo6x10.h
53713+++ b/drivers/staging/media/solo6x10/solo6x10.h
53714@@ -237,7 +237,7 @@ struct solo_dev {
53715
53716 /* P2M DMA Engine */
53717 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
53718- atomic_t p2m_count;
53719+ atomic_unchecked_t p2m_count;
53720 int p2m_jiffies;
53721 unsigned int p2m_timeouts;
53722
53723diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
53724index a0f4868..139f1fb 100644
53725--- a/drivers/staging/octeon/ethernet-rx.c
53726+++ b/drivers/staging/octeon/ethernet-rx.c
53727@@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
53728 /* Increment RX stats for virtual ports */
53729 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
53730 #ifdef CONFIG_64BIT
53731- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
53732- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
53733+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
53734+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
53735 #else
53736- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
53737- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
53738+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
53739+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
53740 #endif
53741 }
53742 netif_receive_skb(skb);
53743@@ -432,9 +432,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
53744 dev->name);
53745 */
53746 #ifdef CONFIG_64BIT
53747- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
53748+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
53749 #else
53750- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
53751+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
53752 #endif
53753 dev_kfree_skb_irq(skb);
53754 }
53755diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
53756index da9dd6b..8e3e0f5 100644
53757--- a/drivers/staging/octeon/ethernet.c
53758+++ b/drivers/staging/octeon/ethernet.c
53759@@ -247,11 +247,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
53760 * since the RX tasklet also increments it.
53761 */
53762 #ifdef CONFIG_64BIT
53763- atomic64_add(rx_status.dropped_packets,
53764- (atomic64_t *)&priv->stats.rx_dropped);
53765+ atomic64_add_unchecked(rx_status.dropped_packets,
53766+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
53767 #else
53768- atomic_add(rx_status.dropped_packets,
53769- (atomic_t *)&priv->stats.rx_dropped);
53770+ atomic_add_unchecked(rx_status.dropped_packets,
53771+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
53772 #endif
53773 }
53774
53775diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
53776index c59fccd..79f8fc2 100644
53777--- a/drivers/staging/rtl8188eu/include/hal_intf.h
53778+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
53779@@ -267,7 +267,7 @@ struct hal_ops {
53780 s32 (*c2h_handler)(struct adapter *padapter,
53781 struct c2h_evt_hdr *c2h_evt);
53782 c2h_id_filter c2h_id_filter_ccx;
53783-};
53784+} __no_const;
53785
53786 enum rt_eeprom_type {
53787 EEPROM_93C46,
53788diff --git a/drivers/staging/rtl8188eu/include/rtw_io.h b/drivers/staging/rtl8188eu/include/rtw_io.h
53789index e8790f8..b4a5980 100644
53790--- a/drivers/staging/rtl8188eu/include/rtw_io.h
53791+++ b/drivers/staging/rtl8188eu/include/rtw_io.h
53792@@ -124,7 +124,7 @@ struct _io_ops {
53793 u32 (*_write_scsi)(struct intf_hdl *pintfhdl, u32 cnt, u8 *pmem);
53794 void (*_read_port_cancel)(struct intf_hdl *pintfhdl);
53795 void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
53796-};
53797+} __no_const;
53798
53799 struct io_req {
53800 struct list_head list;
53801diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
53802index dc23395..cf7e9b1 100644
53803--- a/drivers/staging/rtl8712/rtl871x_io.h
53804+++ b/drivers/staging/rtl8712/rtl871x_io.h
53805@@ -108,7 +108,7 @@ struct _io_ops {
53806 u8 *pmem);
53807 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
53808 u8 *pmem);
53809-};
53810+} __no_const;
53811
53812 struct io_req {
53813 struct list_head list;
53814diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
53815index a863a98..d272795 100644
53816--- a/drivers/staging/usbip/vhci.h
53817+++ b/drivers/staging/usbip/vhci.h
53818@@ -83,7 +83,7 @@ struct vhci_hcd {
53819 unsigned resuming:1;
53820 unsigned long re_timeout;
53821
53822- atomic_t seqnum;
53823+ atomic_unchecked_t seqnum;
53824
53825 /*
53826 * NOTE:
53827diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
53828index 0007d30..c06a693 100644
53829--- a/drivers/staging/usbip/vhci_hcd.c
53830+++ b/drivers/staging/usbip/vhci_hcd.c
53831@@ -439,7 +439,7 @@ static void vhci_tx_urb(struct urb *urb)
53832
53833 spin_lock(&vdev->priv_lock);
53834
53835- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
53836+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
53837 if (priv->seqnum == 0xffff)
53838 dev_info(&urb->dev->dev, "seqnum max\n");
53839
53840@@ -686,7 +686,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
53841 return -ENOMEM;
53842 }
53843
53844- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
53845+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
53846 if (unlink->seqnum == 0xffff)
53847 pr_info("seqnum max\n");
53848
53849@@ -891,7 +891,7 @@ static int vhci_start(struct usb_hcd *hcd)
53850 vdev->rhport = rhport;
53851 }
53852
53853- atomic_set(&vhci->seqnum, 0);
53854+ atomic_set_unchecked(&vhci->seqnum, 0);
53855 spin_lock_init(&vhci->lock);
53856
53857 hcd->power_budget = 0; /* no limit */
53858diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
53859index d07fcb5..358e1e1 100644
53860--- a/drivers/staging/usbip/vhci_rx.c
53861+++ b/drivers/staging/usbip/vhci_rx.c
53862@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
53863 if (!urb) {
53864 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
53865 pr_info("max seqnum %d\n",
53866- atomic_read(&the_controller->seqnum));
53867+ atomic_read_unchecked(&the_controller->seqnum));
53868 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
53869 return;
53870 }
53871diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
53872index 317c2a8..ffeb4ef 100644
53873--- a/drivers/staging/vt6655/hostap.c
53874+++ b/drivers/staging/vt6655/hostap.c
53875@@ -68,14 +68,13 @@ static int msglevel = MSG_LEVEL_INFO;
53876 *
53877 */
53878
53879+static net_device_ops_no_const apdev_netdev_ops;
53880+
53881 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
53882 {
53883 PSDevice apdev_priv;
53884 struct net_device *dev = pDevice->dev;
53885 int ret;
53886- const struct net_device_ops apdev_netdev_ops = {
53887- .ndo_start_xmit = pDevice->tx_80211,
53888- };
53889
53890 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
53891
53892@@ -87,6 +86,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
53893 *apdev_priv = *pDevice;
53894 eth_hw_addr_inherit(pDevice->apdev, dev);
53895
53896+ /* only half broken now */
53897+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
53898 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
53899
53900 pDevice->apdev->type = ARPHRD_IEEE80211;
53901diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
53902index e7e9372..161f530 100644
53903--- a/drivers/target/sbp/sbp_target.c
53904+++ b/drivers/target/sbp/sbp_target.c
53905@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
53906
53907 #define SESSION_MAINTENANCE_INTERVAL HZ
53908
53909-static atomic_t login_id = ATOMIC_INIT(0);
53910+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
53911
53912 static void session_maintenance_work(struct work_struct *);
53913 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
53914@@ -444,7 +444,7 @@ static void sbp_management_request_login(
53915 login->lun = se_lun;
53916 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
53917 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
53918- login->login_id = atomic_inc_return(&login_id);
53919+ login->login_id = atomic_inc_return_unchecked(&login_id);
53920
53921 login->tgt_agt = sbp_target_agent_register(login);
53922 if (IS_ERR(login->tgt_agt)) {
53923diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
53924index 98da901..bb443e8 100644
53925--- a/drivers/target/target_core_device.c
53926+++ b/drivers/target/target_core_device.c
53927@@ -1525,7 +1525,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
53928 spin_lock_init(&dev->se_tmr_lock);
53929 spin_lock_init(&dev->qf_cmd_lock);
53930 sema_init(&dev->caw_sem, 1);
53931- atomic_set(&dev->dev_ordered_id, 0);
53932+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
53933 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
53934 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
53935 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
53936diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
53937index 7fa62fc..abdd041 100644
53938--- a/drivers/target/target_core_transport.c
53939+++ b/drivers/target/target_core_transport.c
53940@@ -1165,7 +1165,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
53941 * Used to determine when ORDERED commands should go from
53942 * Dormant to Active status.
53943 */
53944- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
53945+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
53946 smp_mb__after_atomic();
53947 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
53948 cmd->se_ordered_id, cmd->sam_task_attr,
53949diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
53950index 4b2b999..cad9fa5 100644
53951--- a/drivers/thermal/of-thermal.c
53952+++ b/drivers/thermal/of-thermal.c
53953@@ -30,6 +30,7 @@
53954 #include <linux/err.h>
53955 #include <linux/export.h>
53956 #include <linux/string.h>
53957+#include <linux/mm.h>
53958
53959 #include "thermal_core.h"
53960
53961@@ -341,8 +342,10 @@ thermal_zone_of_add_sensor(struct device_node *zone,
53962 tz->get_trend = get_trend;
53963 tz->sensor_data = data;
53964
53965- tzd->ops->get_temp = of_thermal_get_temp;
53966- tzd->ops->get_trend = of_thermal_get_trend;
53967+ pax_open_kernel();
53968+ *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
53969+ *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
53970+ pax_close_kernel();
53971 mutex_unlock(&tzd->lock);
53972
53973 return tzd;
53974@@ -461,8 +464,10 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
53975 return;
53976
53977 mutex_lock(&tzd->lock);
53978- tzd->ops->get_temp = NULL;
53979- tzd->ops->get_trend = NULL;
53980+ pax_open_kernel();
53981+ *(void **)&tzd->ops->get_temp = NULL;
53982+ *(void **)&tzd->ops->get_trend = NULL;
53983+ pax_close_kernel();
53984
53985 tz->get_temp = NULL;
53986 tz->get_trend = NULL;
53987diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
53988index a57bb5a..1f727d33 100644
53989--- a/drivers/tty/cyclades.c
53990+++ b/drivers/tty/cyclades.c
53991@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
53992 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
53993 info->port.count);
53994 #endif
53995- info->port.count++;
53996+ atomic_inc(&info->port.count);
53997 #ifdef CY_DEBUG_COUNT
53998 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
53999- current->pid, info->port.count);
54000+ current->pid, atomic_read(&info->port.count));
54001 #endif
54002
54003 /*
54004@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
54005 for (j = 0; j < cy_card[i].nports; j++) {
54006 info = &cy_card[i].ports[j];
54007
54008- if (info->port.count) {
54009+ if (atomic_read(&info->port.count)) {
54010 /* XXX is the ldisc num worth this? */
54011 struct tty_struct *tty;
54012 struct tty_ldisc *ld;
54013diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
54014index 4fcec1d..5a036f7 100644
54015--- a/drivers/tty/hvc/hvc_console.c
54016+++ b/drivers/tty/hvc/hvc_console.c
54017@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
54018
54019 spin_lock_irqsave(&hp->port.lock, flags);
54020 /* Check and then increment for fast path open. */
54021- if (hp->port.count++ > 0) {
54022+ if (atomic_inc_return(&hp->port.count) > 1) {
54023 spin_unlock_irqrestore(&hp->port.lock, flags);
54024 hvc_kick();
54025 return 0;
54026@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
54027
54028 spin_lock_irqsave(&hp->port.lock, flags);
54029
54030- if (--hp->port.count == 0) {
54031+ if (atomic_dec_return(&hp->port.count) == 0) {
54032 spin_unlock_irqrestore(&hp->port.lock, flags);
54033 /* We are done with the tty pointer now. */
54034 tty_port_tty_set(&hp->port, NULL);
54035@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
54036 */
54037 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
54038 } else {
54039- if (hp->port.count < 0)
54040+ if (atomic_read(&hp->port.count) < 0)
54041 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
54042- hp->vtermno, hp->port.count);
54043+ hp->vtermno, atomic_read(&hp->port.count));
54044 spin_unlock_irqrestore(&hp->port.lock, flags);
54045 }
54046 }
54047@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
54048 * open->hangup case this can be called after the final close so prevent
54049 * that from happening for now.
54050 */
54051- if (hp->port.count <= 0) {
54052+ if (atomic_read(&hp->port.count) <= 0) {
54053 spin_unlock_irqrestore(&hp->port.lock, flags);
54054 return;
54055 }
54056
54057- hp->port.count = 0;
54058+ atomic_set(&hp->port.count, 0);
54059 spin_unlock_irqrestore(&hp->port.lock, flags);
54060 tty_port_tty_set(&hp->port, NULL);
54061
54062@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
54063 return -EPIPE;
54064
54065 /* FIXME what's this (unprotected) check for? */
54066- if (hp->port.count <= 0)
54067+ if (atomic_read(&hp->port.count) <= 0)
54068 return -EIO;
54069
54070 spin_lock_irqsave(&hp->lock, flags);
54071diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
54072index 81e939e..95ead10 100644
54073--- a/drivers/tty/hvc/hvcs.c
54074+++ b/drivers/tty/hvc/hvcs.c
54075@@ -83,6 +83,7 @@
54076 #include <asm/hvcserver.h>
54077 #include <asm/uaccess.h>
54078 #include <asm/vio.h>
54079+#include <asm/local.h>
54080
54081 /*
54082 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
54083@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
54084
54085 spin_lock_irqsave(&hvcsd->lock, flags);
54086
54087- if (hvcsd->port.count > 0) {
54088+ if (atomic_read(&hvcsd->port.count) > 0) {
54089 spin_unlock_irqrestore(&hvcsd->lock, flags);
54090 printk(KERN_INFO "HVCS: vterm state unchanged. "
54091 "The hvcs device node is still in use.\n");
54092@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
54093 }
54094 }
54095
54096- hvcsd->port.count = 0;
54097+ atomic_set(&hvcsd->port.count, 0);
54098 hvcsd->port.tty = tty;
54099 tty->driver_data = hvcsd;
54100
54101@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
54102 unsigned long flags;
54103
54104 spin_lock_irqsave(&hvcsd->lock, flags);
54105- hvcsd->port.count++;
54106+ atomic_inc(&hvcsd->port.count);
54107 hvcsd->todo_mask |= HVCS_SCHED_READ;
54108 spin_unlock_irqrestore(&hvcsd->lock, flags);
54109
54110@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
54111 hvcsd = tty->driver_data;
54112
54113 spin_lock_irqsave(&hvcsd->lock, flags);
54114- if (--hvcsd->port.count == 0) {
54115+ if (atomic_dec_and_test(&hvcsd->port.count)) {
54116
54117 vio_disable_interrupts(hvcsd->vdev);
54118
54119@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
54120
54121 free_irq(irq, hvcsd);
54122 return;
54123- } else if (hvcsd->port.count < 0) {
54124+ } else if (atomic_read(&hvcsd->port.count) < 0) {
54125 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
54126 " is missmanaged.\n",
54127- hvcsd->vdev->unit_address, hvcsd->port.count);
54128+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
54129 }
54130
54131 spin_unlock_irqrestore(&hvcsd->lock, flags);
54132@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
54133
54134 spin_lock_irqsave(&hvcsd->lock, flags);
54135 /* Preserve this so that we know how many kref refs to put */
54136- temp_open_count = hvcsd->port.count;
54137+ temp_open_count = atomic_read(&hvcsd->port.count);
54138
54139 /*
54140 * Don't kref put inside the spinlock because the destruction
54141@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
54142 tty->driver_data = NULL;
54143 hvcsd->port.tty = NULL;
54144
54145- hvcsd->port.count = 0;
54146+ atomic_set(&hvcsd->port.count, 0);
54147
54148 /* This will drop any buffered data on the floor which is OK in a hangup
54149 * scenario. */
54150@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
54151 * the middle of a write operation? This is a crummy place to do this
54152 * but we want to keep it all in the spinlock.
54153 */
54154- if (hvcsd->port.count <= 0) {
54155+ if (atomic_read(&hvcsd->port.count) <= 0) {
54156 spin_unlock_irqrestore(&hvcsd->lock, flags);
54157 return -ENODEV;
54158 }
54159@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
54160 {
54161 struct hvcs_struct *hvcsd = tty->driver_data;
54162
54163- if (!hvcsd || hvcsd->port.count <= 0)
54164+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
54165 return 0;
54166
54167 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
54168diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
54169index 4190199..06d5bfa 100644
54170--- a/drivers/tty/hvc/hvsi.c
54171+++ b/drivers/tty/hvc/hvsi.c
54172@@ -85,7 +85,7 @@ struct hvsi_struct {
54173 int n_outbuf;
54174 uint32_t vtermno;
54175 uint32_t virq;
54176- atomic_t seqno; /* HVSI packet sequence number */
54177+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
54178 uint16_t mctrl;
54179 uint8_t state; /* HVSI protocol state */
54180 uint8_t flags;
54181@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
54182
54183 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
54184 packet.hdr.len = sizeof(struct hvsi_query_response);
54185- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54186+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54187 packet.verb = VSV_SEND_VERSION_NUMBER;
54188 packet.u.version = HVSI_VERSION;
54189 packet.query_seqno = query_seqno+1;
54190@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
54191
54192 packet.hdr.type = VS_QUERY_PACKET_HEADER;
54193 packet.hdr.len = sizeof(struct hvsi_query);
54194- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54195+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54196 packet.verb = verb;
54197
54198 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
54199@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
54200 int wrote;
54201
54202 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
54203- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54204+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54205 packet.hdr.len = sizeof(struct hvsi_control);
54206 packet.verb = VSV_SET_MODEM_CTL;
54207 packet.mask = HVSI_TSDTR;
54208@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
54209 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
54210
54211 packet.hdr.type = VS_DATA_PACKET_HEADER;
54212- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54213+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54214 packet.hdr.len = count + sizeof(struct hvsi_header);
54215 memcpy(&packet.data, buf, count);
54216
54217@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
54218 struct hvsi_control packet __ALIGNED__;
54219
54220 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
54221- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54222+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54223 packet.hdr.len = 6;
54224 packet.verb = VSV_CLOSE_PROTOCOL;
54225
54226@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
54227
54228 tty_port_tty_set(&hp->port, tty);
54229 spin_lock_irqsave(&hp->lock, flags);
54230- hp->port.count++;
54231+ atomic_inc(&hp->port.count);
54232 atomic_set(&hp->seqno, 0);
54233 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
54234 spin_unlock_irqrestore(&hp->lock, flags);
54235@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
54236
54237 spin_lock_irqsave(&hp->lock, flags);
54238
54239- if (--hp->port.count == 0) {
54240+ if (atomic_dec_return(&hp->port.count) == 0) {
54241 tty_port_tty_set(&hp->port, NULL);
54242 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
54243
54244@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
54245
54246 spin_lock_irqsave(&hp->lock, flags);
54247 }
54248- } else if (hp->port.count < 0)
54249+ } else if (atomic_read(&hp->port.count) < 0)
54250 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
54251- hp - hvsi_ports, hp->port.count);
54252+ hp - hvsi_ports, atomic_read(&hp->port.count));
54253
54254 spin_unlock_irqrestore(&hp->lock, flags);
54255 }
54256@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
54257 tty_port_tty_set(&hp->port, NULL);
54258
54259 spin_lock_irqsave(&hp->lock, flags);
54260- hp->port.count = 0;
54261+ atomic_set(&hp->port.count, 0);
54262 hp->n_outbuf = 0;
54263 spin_unlock_irqrestore(&hp->lock, flags);
54264 }
54265diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
54266index 7ae6c29..05c6dba 100644
54267--- a/drivers/tty/hvc/hvsi_lib.c
54268+++ b/drivers/tty/hvc/hvsi_lib.c
54269@@ -8,7 +8,7 @@
54270
54271 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
54272 {
54273- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
54274+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
54275
54276 /* Assumes that always succeeds, works in practice */
54277 return pv->put_chars(pv->termno, (char *)packet, packet->len);
54278@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
54279
54280 /* Reset state */
54281 pv->established = 0;
54282- atomic_set(&pv->seqno, 0);
54283+ atomic_set_unchecked(&pv->seqno, 0);
54284
54285 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
54286
54287diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
54288index 17ee3bf..8d2520d 100644
54289--- a/drivers/tty/ipwireless/tty.c
54290+++ b/drivers/tty/ipwireless/tty.c
54291@@ -28,6 +28,7 @@
54292 #include <linux/tty_driver.h>
54293 #include <linux/tty_flip.h>
54294 #include <linux/uaccess.h>
54295+#include <asm/local.h>
54296
54297 #include "tty.h"
54298 #include "network.h"
54299@@ -98,10 +99,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
54300 mutex_unlock(&tty->ipw_tty_mutex);
54301 return -ENODEV;
54302 }
54303- if (tty->port.count == 0)
54304+ if (atomic_read(&tty->port.count) == 0)
54305 tty->tx_bytes_queued = 0;
54306
54307- tty->port.count++;
54308+ atomic_inc(&tty->port.count);
54309
54310 tty->port.tty = linux_tty;
54311 linux_tty->driver_data = tty;
54312@@ -117,9 +118,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
54313
54314 static void do_ipw_close(struct ipw_tty *tty)
54315 {
54316- tty->port.count--;
54317-
54318- if (tty->port.count == 0) {
54319+ if (atomic_dec_return(&tty->port.count) == 0) {
54320 struct tty_struct *linux_tty = tty->port.tty;
54321
54322 if (linux_tty != NULL) {
54323@@ -140,7 +139,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
54324 return;
54325
54326 mutex_lock(&tty->ipw_tty_mutex);
54327- if (tty->port.count == 0) {
54328+ if (atomic_read(&tty->port.count) == 0) {
54329 mutex_unlock(&tty->ipw_tty_mutex);
54330 return;
54331 }
54332@@ -163,7 +162,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
54333
54334 mutex_lock(&tty->ipw_tty_mutex);
54335
54336- if (!tty->port.count) {
54337+ if (!atomic_read(&tty->port.count)) {
54338 mutex_unlock(&tty->ipw_tty_mutex);
54339 return;
54340 }
54341@@ -202,7 +201,7 @@ static int ipw_write(struct tty_struct *linux_tty,
54342 return -ENODEV;
54343
54344 mutex_lock(&tty->ipw_tty_mutex);
54345- if (!tty->port.count) {
54346+ if (!atomic_read(&tty->port.count)) {
54347 mutex_unlock(&tty->ipw_tty_mutex);
54348 return -EINVAL;
54349 }
54350@@ -242,7 +241,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
54351 if (!tty)
54352 return -ENODEV;
54353
54354- if (!tty->port.count)
54355+ if (!atomic_read(&tty->port.count))
54356 return -EINVAL;
54357
54358 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
54359@@ -284,7 +283,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
54360 if (!tty)
54361 return 0;
54362
54363- if (!tty->port.count)
54364+ if (!atomic_read(&tty->port.count))
54365 return 0;
54366
54367 return tty->tx_bytes_queued;
54368@@ -365,7 +364,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
54369 if (!tty)
54370 return -ENODEV;
54371
54372- if (!tty->port.count)
54373+ if (!atomic_read(&tty->port.count))
54374 return -EINVAL;
54375
54376 return get_control_lines(tty);
54377@@ -381,7 +380,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
54378 if (!tty)
54379 return -ENODEV;
54380
54381- if (!tty->port.count)
54382+ if (!atomic_read(&tty->port.count))
54383 return -EINVAL;
54384
54385 return set_control_lines(tty, set, clear);
54386@@ -395,7 +394,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
54387 if (!tty)
54388 return -ENODEV;
54389
54390- if (!tty->port.count)
54391+ if (!atomic_read(&tty->port.count))
54392 return -EINVAL;
54393
54394 /* FIXME: Exactly how is the tty object locked here .. */
54395@@ -551,7 +550,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
54396 * are gone */
54397 mutex_lock(&ttyj->ipw_tty_mutex);
54398 }
54399- while (ttyj->port.count)
54400+ while (atomic_read(&ttyj->port.count))
54401 do_ipw_close(ttyj);
54402 ipwireless_disassociate_network_ttys(network,
54403 ttyj->channel_idx);
54404diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
54405index 1deaca4..c8582d4 100644
54406--- a/drivers/tty/moxa.c
54407+++ b/drivers/tty/moxa.c
54408@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
54409 }
54410
54411 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
54412- ch->port.count++;
54413+ atomic_inc(&ch->port.count);
54414 tty->driver_data = ch;
54415 tty_port_tty_set(&ch->port, tty);
54416 mutex_lock(&ch->port.mutex);
54417diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
54418index 2ebe47b..3205833 100644
54419--- a/drivers/tty/n_gsm.c
54420+++ b/drivers/tty/n_gsm.c
54421@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
54422 spin_lock_init(&dlci->lock);
54423 mutex_init(&dlci->mutex);
54424 dlci->fifo = &dlci->_fifo;
54425- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
54426+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
54427 kfree(dlci);
54428 return NULL;
54429 }
54430@@ -2954,7 +2954,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
54431 struct gsm_dlci *dlci = tty->driver_data;
54432 struct tty_port *port = &dlci->port;
54433
54434- port->count++;
54435+ atomic_inc(&port->count);
54436 tty_port_tty_set(port, tty);
54437
54438 dlci->modem_rx = 0;
54439diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
54440index f44f1ba..a8d5915 100644
54441--- a/drivers/tty/n_tty.c
54442+++ b/drivers/tty/n_tty.c
54443@@ -115,7 +115,7 @@ struct n_tty_data {
54444 int minimum_to_wake;
54445
54446 /* consumer-published */
54447- size_t read_tail;
54448+ size_t read_tail __intentional_overflow(-1);
54449 size_t line_start;
54450
54451 /* protected by output lock */
54452@@ -2517,6 +2517,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
54453 {
54454 *ops = tty_ldisc_N_TTY;
54455 ops->owner = NULL;
54456- ops->refcount = ops->flags = 0;
54457+ atomic_set(&ops->refcount, 0);
54458+ ops->flags = 0;
54459 }
54460 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
54461diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
54462index 25c9bc7..24077b7 100644
54463--- a/drivers/tty/pty.c
54464+++ b/drivers/tty/pty.c
54465@@ -790,8 +790,10 @@ static void __init unix98_pty_init(void)
54466 panic("Couldn't register Unix98 pts driver");
54467
54468 /* Now create the /dev/ptmx special device */
54469+ pax_open_kernel();
54470 tty_default_fops(&ptmx_fops);
54471- ptmx_fops.open = ptmx_open;
54472+ *(void **)&ptmx_fops.open = ptmx_open;
54473+ pax_close_kernel();
54474
54475 cdev_init(&ptmx_cdev, &ptmx_fops);
54476 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
54477diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
54478index 383c4c7..d408e21 100644
54479--- a/drivers/tty/rocket.c
54480+++ b/drivers/tty/rocket.c
54481@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
54482 tty->driver_data = info;
54483 tty_port_tty_set(port, tty);
54484
54485- if (port->count++ == 0) {
54486+ if (atomic_inc_return(&port->count) == 1) {
54487 atomic_inc(&rp_num_ports_open);
54488
54489 #ifdef ROCKET_DEBUG_OPEN
54490@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
54491 #endif
54492 }
54493 #ifdef ROCKET_DEBUG_OPEN
54494- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
54495+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
54496 #endif
54497
54498 /*
54499@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
54500 spin_unlock_irqrestore(&info->port.lock, flags);
54501 return;
54502 }
54503- if (info->port.count)
54504+ if (atomic_read(&info->port.count))
54505 atomic_dec(&rp_num_ports_open);
54506 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
54507 spin_unlock_irqrestore(&info->port.lock, flags);
54508diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
54509index 1274499..f541382 100644
54510--- a/drivers/tty/serial/ioc4_serial.c
54511+++ b/drivers/tty/serial/ioc4_serial.c
54512@@ -437,7 +437,7 @@ struct ioc4_soft {
54513 } is_intr_info[MAX_IOC4_INTR_ENTS];
54514
54515 /* Number of entries active in the above array */
54516- atomic_t is_num_intrs;
54517+ atomic_unchecked_t is_num_intrs;
54518 } is_intr_type[IOC4_NUM_INTR_TYPES];
54519
54520 /* is_ir_lock must be held while
54521@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
54522 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
54523 || (type == IOC4_OTHER_INTR_TYPE)));
54524
54525- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
54526+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
54527 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
54528
54529 /* Save off the lower level interrupt handler */
54530@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
54531
54532 soft = arg;
54533 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
54534- num_intrs = (int)atomic_read(
54535+ num_intrs = (int)atomic_read_unchecked(
54536 &soft->is_intr_type[intr_type].is_num_intrs);
54537
54538 this_mir = this_ir = pending_intrs(soft, intr_type);
54539diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
54540index cfadf29..8cf4595 100644
54541--- a/drivers/tty/serial/kgdb_nmi.c
54542+++ b/drivers/tty/serial/kgdb_nmi.c
54543@@ -51,7 +51,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
54544 * I/O utilities that messages sent to the console will automatically
54545 * be displayed on the dbg_io.
54546 */
54547- dbg_io_ops->is_console = true;
54548+ pax_open_kernel();
54549+ *(int *)&dbg_io_ops->is_console = true;
54550+ pax_close_kernel();
54551
54552 return 0;
54553 }
54554diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
54555index a260cde..6b2b5ce 100644
54556--- a/drivers/tty/serial/kgdboc.c
54557+++ b/drivers/tty/serial/kgdboc.c
54558@@ -24,8 +24,9 @@
54559 #define MAX_CONFIG_LEN 40
54560
54561 static struct kgdb_io kgdboc_io_ops;
54562+static struct kgdb_io kgdboc_io_ops_console;
54563
54564-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
54565+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
54566 static int configured = -1;
54567
54568 static char config[MAX_CONFIG_LEN];
54569@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
54570 kgdboc_unregister_kbd();
54571 if (configured == 1)
54572 kgdb_unregister_io_module(&kgdboc_io_ops);
54573+ else if (configured == 2)
54574+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
54575 }
54576
54577 static int configure_kgdboc(void)
54578@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
54579 int err;
54580 char *cptr = config;
54581 struct console *cons;
54582+ int is_console = 0;
54583
54584 err = kgdboc_option_setup(config);
54585 if (err || !strlen(config) || isspace(config[0]))
54586 goto noconfig;
54587
54588 err = -ENODEV;
54589- kgdboc_io_ops.is_console = 0;
54590 kgdb_tty_driver = NULL;
54591
54592 kgdboc_use_kms = 0;
54593@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
54594 int idx;
54595 if (cons->device && cons->device(cons, &idx) == p &&
54596 idx == tty_line) {
54597- kgdboc_io_ops.is_console = 1;
54598+ is_console = 1;
54599 break;
54600 }
54601 cons = cons->next;
54602@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
54603 kgdb_tty_line = tty_line;
54604
54605 do_register:
54606- err = kgdb_register_io_module(&kgdboc_io_ops);
54607+ if (is_console) {
54608+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
54609+ configured = 2;
54610+ } else {
54611+ err = kgdb_register_io_module(&kgdboc_io_ops);
54612+ configured = 1;
54613+ }
54614 if (err)
54615 goto noconfig;
54616
54617@@ -205,8 +214,6 @@ do_register:
54618 if (err)
54619 goto nmi_con_failed;
54620
54621- configured = 1;
54622-
54623 return 0;
54624
54625 nmi_con_failed:
54626@@ -223,7 +230,7 @@ noconfig:
54627 static int __init init_kgdboc(void)
54628 {
54629 /* Already configured? */
54630- if (configured == 1)
54631+ if (configured >= 1)
54632 return 0;
54633
54634 return configure_kgdboc();
54635@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
54636 if (config[len - 1] == '\n')
54637 config[len - 1] = '\0';
54638
54639- if (configured == 1)
54640+ if (configured >= 1)
54641 cleanup_kgdboc();
54642
54643 /* Go and configure with the new params. */
54644@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
54645 .post_exception = kgdboc_post_exp_handler,
54646 };
54647
54648+static struct kgdb_io kgdboc_io_ops_console = {
54649+ .name = "kgdboc",
54650+ .read_char = kgdboc_get_char,
54651+ .write_char = kgdboc_put_char,
54652+ .pre_exception = kgdboc_pre_exp_handler,
54653+ .post_exception = kgdboc_post_exp_handler,
54654+ .is_console = 1
54655+};
54656+
54657 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
54658 /* This is only available if kgdboc is a built in for early debugging */
54659 static int __init kgdboc_early_init(char *opt)
54660diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
54661index 72000a6..a190bc4 100644
54662--- a/drivers/tty/serial/msm_serial.c
54663+++ b/drivers/tty/serial/msm_serial.c
54664@@ -981,7 +981,7 @@ static struct uart_driver msm_uart_driver = {
54665 .cons = MSM_CONSOLE,
54666 };
54667
54668-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
54669+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
54670
54671 static const struct of_device_id msm_uartdm_table[] = {
54672 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
54673@@ -1000,7 +1000,7 @@ static int msm_serial_probe(struct platform_device *pdev)
54674 int irq;
54675
54676 if (pdev->id == -1)
54677- pdev->id = atomic_inc_return(&msm_uart_next_id) - 1;
54678+ pdev->id = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
54679
54680 if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
54681 return -ENXIO;
54682diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
54683index c1d3ebd..f618a93 100644
54684--- a/drivers/tty/serial/samsung.c
54685+++ b/drivers/tty/serial/samsung.c
54686@@ -486,11 +486,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
54687 }
54688 }
54689
54690+static int s3c64xx_serial_startup(struct uart_port *port);
54691 static int s3c24xx_serial_startup(struct uart_port *port)
54692 {
54693 struct s3c24xx_uart_port *ourport = to_ourport(port);
54694 int ret;
54695
54696+ /* Startup sequence is different for s3c64xx and higher SoC's */
54697+ if (s3c24xx_serial_has_interrupt_mask(port))
54698+ return s3c64xx_serial_startup(port);
54699+
54700 dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
54701 port, (unsigned long long)port->mapbase, port->membase);
54702
54703@@ -1164,10 +1169,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
54704 /* setup info for port */
54705 port->dev = &platdev->dev;
54706
54707- /* Startup sequence is different for s3c64xx and higher SoC's */
54708- if (s3c24xx_serial_has_interrupt_mask(port))
54709- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
54710-
54711 port->uartclk = 1;
54712
54713 if (cfg->uart_flags & UPF_CONS_FLOW) {
54714diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
54715index ef2fb36..238d80c 100644
54716--- a/drivers/tty/serial/serial_core.c
54717+++ b/drivers/tty/serial/serial_core.c
54718@@ -1336,7 +1336,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
54719
54720 pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
54721
54722- if (!port->count || tty_port_close_start(port, tty, filp) == 0)
54723+ if (!atomic_read(&port->count) || tty_port_close_start(port, tty, filp) == 0)
54724 return;
54725
54726 /*
54727@@ -1463,7 +1463,7 @@ static void uart_hangup(struct tty_struct *tty)
54728 uart_flush_buffer(tty);
54729 uart_shutdown(tty, state);
54730 spin_lock_irqsave(&port->lock, flags);
54731- port->count = 0;
54732+ atomic_set(&port->count, 0);
54733 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
54734 spin_unlock_irqrestore(&port->lock, flags);
54735 tty_port_tty_set(port, NULL);
54736@@ -1561,7 +1561,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
54737 goto end;
54738 }
54739
54740- port->count++;
54741+ atomic_inc(&port->count);
54742 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
54743 retval = -ENXIO;
54744 goto err_dec_count;
54745@@ -1601,7 +1601,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
54746 end:
54747 return retval;
54748 err_dec_count:
54749- port->count--;
54750+ atomic_inc(&port->count);
54751 mutex_unlock(&port->mutex);
54752 goto end;
54753 }
54754diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
54755index d48e040..0f52764 100644
54756--- a/drivers/tty/synclink.c
54757+++ b/drivers/tty/synclink.c
54758@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
54759
54760 if (debug_level >= DEBUG_LEVEL_INFO)
54761 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
54762- __FILE__,__LINE__, info->device_name, info->port.count);
54763+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
54764
54765 if (tty_port_close_start(&info->port, tty, filp) == 0)
54766 goto cleanup;
54767@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
54768 cleanup:
54769 if (debug_level >= DEBUG_LEVEL_INFO)
54770 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
54771- tty->driver->name, info->port.count);
54772+ tty->driver->name, atomic_read(&info->port.count));
54773
54774 } /* end of mgsl_close() */
54775
54776@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
54777
54778 mgsl_flush_buffer(tty);
54779 shutdown(info);
54780-
54781- info->port.count = 0;
54782+
54783+ atomic_set(&info->port.count, 0);
54784 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54785 info->port.tty = NULL;
54786
54787@@ -3297,12 +3297,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54788
54789 if (debug_level >= DEBUG_LEVEL_INFO)
54790 printk("%s(%d):block_til_ready before block on %s count=%d\n",
54791- __FILE__,__LINE__, tty->driver->name, port->count );
54792+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54793
54794 spin_lock_irqsave(&info->irq_spinlock, flags);
54795 if (!tty_hung_up_p(filp)) {
54796 extra_count = true;
54797- port->count--;
54798+ atomic_dec(&port->count);
54799 }
54800 spin_unlock_irqrestore(&info->irq_spinlock, flags);
54801 port->blocked_open++;
54802@@ -3331,7 +3331,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54803
54804 if (debug_level >= DEBUG_LEVEL_INFO)
54805 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
54806- __FILE__,__LINE__, tty->driver->name, port->count );
54807+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54808
54809 tty_unlock(tty);
54810 schedule();
54811@@ -3343,12 +3343,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54812
54813 /* FIXME: Racy on hangup during close wait */
54814 if (extra_count)
54815- port->count++;
54816+ atomic_inc(&port->count);
54817 port->blocked_open--;
54818
54819 if (debug_level >= DEBUG_LEVEL_INFO)
54820 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
54821- __FILE__,__LINE__, tty->driver->name, port->count );
54822+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54823
54824 if (!retval)
54825 port->flags |= ASYNC_NORMAL_ACTIVE;
54826@@ -3400,7 +3400,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
54827
54828 if (debug_level >= DEBUG_LEVEL_INFO)
54829 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
54830- __FILE__,__LINE__,tty->driver->name, info->port.count);
54831+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
54832
54833 /* If port is closing, signal caller to try again */
54834 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
54835@@ -3419,10 +3419,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
54836 spin_unlock_irqrestore(&info->netlock, flags);
54837 goto cleanup;
54838 }
54839- info->port.count++;
54840+ atomic_inc(&info->port.count);
54841 spin_unlock_irqrestore(&info->netlock, flags);
54842
54843- if (info->port.count == 1) {
54844+ if (atomic_read(&info->port.count) == 1) {
54845 /* 1st open on this device, init hardware */
54846 retval = startup(info);
54847 if (retval < 0)
54848@@ -3446,8 +3446,8 @@ cleanup:
54849 if (retval) {
54850 if (tty->count == 1)
54851 info->port.tty = NULL; /* tty layer will release tty struct */
54852- if(info->port.count)
54853- info->port.count--;
54854+ if (atomic_read(&info->port.count))
54855+ atomic_dec(&info->port.count);
54856 }
54857
54858 return retval;
54859@@ -7665,7 +7665,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54860 unsigned short new_crctype;
54861
54862 /* return error if TTY interface open */
54863- if (info->port.count)
54864+ if (atomic_read(&info->port.count))
54865 return -EBUSY;
54866
54867 switch (encoding)
54868@@ -7760,7 +7760,7 @@ static int hdlcdev_open(struct net_device *dev)
54869
54870 /* arbitrate between network and tty opens */
54871 spin_lock_irqsave(&info->netlock, flags);
54872- if (info->port.count != 0 || info->netcount != 0) {
54873+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54874 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
54875 spin_unlock_irqrestore(&info->netlock, flags);
54876 return -EBUSY;
54877@@ -7846,7 +7846,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54878 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
54879
54880 /* return error if TTY interface open */
54881- if (info->port.count)
54882+ if (atomic_read(&info->port.count))
54883 return -EBUSY;
54884
54885 if (cmd != SIOCWANDEV)
54886diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
54887index c359a91..959fc26 100644
54888--- a/drivers/tty/synclink_gt.c
54889+++ b/drivers/tty/synclink_gt.c
54890@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
54891 tty->driver_data = info;
54892 info->port.tty = tty;
54893
54894- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
54895+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
54896
54897 /* If port is closing, signal caller to try again */
54898 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
54899@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
54900 mutex_unlock(&info->port.mutex);
54901 goto cleanup;
54902 }
54903- info->port.count++;
54904+ atomic_inc(&info->port.count);
54905 spin_unlock_irqrestore(&info->netlock, flags);
54906
54907- if (info->port.count == 1) {
54908+ if (atomic_read(&info->port.count) == 1) {
54909 /* 1st open on this device, init hardware */
54910 retval = startup(info);
54911 if (retval < 0) {
54912@@ -715,8 +715,8 @@ cleanup:
54913 if (retval) {
54914 if (tty->count == 1)
54915 info->port.tty = NULL; /* tty layer will release tty struct */
54916- if(info->port.count)
54917- info->port.count--;
54918+ if(atomic_read(&info->port.count))
54919+ atomic_dec(&info->port.count);
54920 }
54921
54922 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
54923@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54924
54925 if (sanity_check(info, tty->name, "close"))
54926 return;
54927- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
54928+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
54929
54930 if (tty_port_close_start(&info->port, tty, filp) == 0)
54931 goto cleanup;
54932@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54933 tty_port_close_end(&info->port, tty);
54934 info->port.tty = NULL;
54935 cleanup:
54936- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
54937+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
54938 }
54939
54940 static void hangup(struct tty_struct *tty)
54941@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
54942 shutdown(info);
54943
54944 spin_lock_irqsave(&info->port.lock, flags);
54945- info->port.count = 0;
54946+ atomic_set(&info->port.count, 0);
54947 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54948 info->port.tty = NULL;
54949 spin_unlock_irqrestore(&info->port.lock, flags);
54950@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54951 unsigned short new_crctype;
54952
54953 /* return error if TTY interface open */
54954- if (info->port.count)
54955+ if (atomic_read(&info->port.count))
54956 return -EBUSY;
54957
54958 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
54959@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
54960
54961 /* arbitrate between network and tty opens */
54962 spin_lock_irqsave(&info->netlock, flags);
54963- if (info->port.count != 0 || info->netcount != 0) {
54964+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54965 DBGINFO(("%s hdlc_open busy\n", dev->name));
54966 spin_unlock_irqrestore(&info->netlock, flags);
54967 return -EBUSY;
54968@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54969 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
54970
54971 /* return error if TTY interface open */
54972- if (info->port.count)
54973+ if (atomic_read(&info->port.count))
54974 return -EBUSY;
54975
54976 if (cmd != SIOCWANDEV)
54977@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
54978 if (port == NULL)
54979 continue;
54980 spin_lock(&port->lock);
54981- if ((port->port.count || port->netcount) &&
54982+ if ((atomic_read(&port->port.count) || port->netcount) &&
54983 port->pending_bh && !port->bh_running &&
54984 !port->bh_requested) {
54985 DBGISR(("%s bh queued\n", port->device_name));
54986@@ -3302,7 +3302,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54987 spin_lock_irqsave(&info->lock, flags);
54988 if (!tty_hung_up_p(filp)) {
54989 extra_count = true;
54990- port->count--;
54991+ atomic_dec(&port->count);
54992 }
54993 spin_unlock_irqrestore(&info->lock, flags);
54994 port->blocked_open++;
54995@@ -3339,7 +3339,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54996 remove_wait_queue(&port->open_wait, &wait);
54997
54998 if (extra_count)
54999- port->count++;
55000+ atomic_inc(&port->count);
55001 port->blocked_open--;
55002
55003 if (!retval)
55004diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
55005index 53ba853..3c30f6d 100644
55006--- a/drivers/tty/synclinkmp.c
55007+++ b/drivers/tty/synclinkmp.c
55008@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
55009
55010 if (debug_level >= DEBUG_LEVEL_INFO)
55011 printk("%s(%d):%s open(), old ref count = %d\n",
55012- __FILE__,__LINE__,tty->driver->name, info->port.count);
55013+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
55014
55015 /* If port is closing, signal caller to try again */
55016 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
55017@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
55018 spin_unlock_irqrestore(&info->netlock, flags);
55019 goto cleanup;
55020 }
55021- info->port.count++;
55022+ atomic_inc(&info->port.count);
55023 spin_unlock_irqrestore(&info->netlock, flags);
55024
55025- if (info->port.count == 1) {
55026+ if (atomic_read(&info->port.count) == 1) {
55027 /* 1st open on this device, init hardware */
55028 retval = startup(info);
55029 if (retval < 0)
55030@@ -796,8 +796,8 @@ cleanup:
55031 if (retval) {
55032 if (tty->count == 1)
55033 info->port.tty = NULL; /* tty layer will release tty struct */
55034- if(info->port.count)
55035- info->port.count--;
55036+ if(atomic_read(&info->port.count))
55037+ atomic_dec(&info->port.count);
55038 }
55039
55040 return retval;
55041@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
55042
55043 if (debug_level >= DEBUG_LEVEL_INFO)
55044 printk("%s(%d):%s close() entry, count=%d\n",
55045- __FILE__,__LINE__, info->device_name, info->port.count);
55046+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
55047
55048 if (tty_port_close_start(&info->port, tty, filp) == 0)
55049 goto cleanup;
55050@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
55051 cleanup:
55052 if (debug_level >= DEBUG_LEVEL_INFO)
55053 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
55054- tty->driver->name, info->port.count);
55055+ tty->driver->name, atomic_read(&info->port.count));
55056 }
55057
55058 /* Called by tty_hangup() when a hangup is signaled.
55059@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
55060 shutdown(info);
55061
55062 spin_lock_irqsave(&info->port.lock, flags);
55063- info->port.count = 0;
55064+ atomic_set(&info->port.count, 0);
55065 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
55066 info->port.tty = NULL;
55067 spin_unlock_irqrestore(&info->port.lock, flags);
55068@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
55069 unsigned short new_crctype;
55070
55071 /* return error if TTY interface open */
55072- if (info->port.count)
55073+ if (atomic_read(&info->port.count))
55074 return -EBUSY;
55075
55076 switch (encoding)
55077@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
55078
55079 /* arbitrate between network and tty opens */
55080 spin_lock_irqsave(&info->netlock, flags);
55081- if (info->port.count != 0 || info->netcount != 0) {
55082+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
55083 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
55084 spin_unlock_irqrestore(&info->netlock, flags);
55085 return -EBUSY;
55086@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
55087 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
55088
55089 /* return error if TTY interface open */
55090- if (info->port.count)
55091+ if (atomic_read(&info->port.count))
55092 return -EBUSY;
55093
55094 if (cmd != SIOCWANDEV)
55095@@ -2621,7 +2621,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
55096 * do not request bottom half processing if the
55097 * device is not open in a normal mode.
55098 */
55099- if ( port && (port->port.count || port->netcount) &&
55100+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
55101 port->pending_bh && !port->bh_running &&
55102 !port->bh_requested ) {
55103 if ( debug_level >= DEBUG_LEVEL_ISR )
55104@@ -3319,12 +3319,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
55105
55106 if (debug_level >= DEBUG_LEVEL_INFO)
55107 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
55108- __FILE__,__LINE__, tty->driver->name, port->count );
55109+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
55110
55111 spin_lock_irqsave(&info->lock, flags);
55112 if (!tty_hung_up_p(filp)) {
55113 extra_count = true;
55114- port->count--;
55115+ atomic_dec(&port->count);
55116 }
55117 spin_unlock_irqrestore(&info->lock, flags);
55118 port->blocked_open++;
55119@@ -3353,7 +3353,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
55120
55121 if (debug_level >= DEBUG_LEVEL_INFO)
55122 printk("%s(%d):%s block_til_ready() count=%d\n",
55123- __FILE__,__LINE__, tty->driver->name, port->count );
55124+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
55125
55126 tty_unlock(tty);
55127 schedule();
55128@@ -3364,12 +3364,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
55129 remove_wait_queue(&port->open_wait, &wait);
55130
55131 if (extra_count)
55132- port->count++;
55133+ atomic_inc(&port->count);
55134 port->blocked_open--;
55135
55136 if (debug_level >= DEBUG_LEVEL_INFO)
55137 printk("%s(%d):%s block_til_ready() after, count=%d\n",
55138- __FILE__,__LINE__, tty->driver->name, port->count );
55139+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
55140
55141 if (!retval)
55142 port->flags |= ASYNC_NORMAL_ACTIVE;
55143diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
55144index 454b658..57b1430 100644
55145--- a/drivers/tty/sysrq.c
55146+++ b/drivers/tty/sysrq.c
55147@@ -1084,7 +1084,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
55148 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
55149 size_t count, loff_t *ppos)
55150 {
55151- if (count) {
55152+ if (count && capable(CAP_SYS_ADMIN)) {
55153 char c;
55154
55155 if (get_user(c, buf))
55156diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
55157index 3411071..86f2cf2 100644
55158--- a/drivers/tty/tty_io.c
55159+++ b/drivers/tty/tty_io.c
55160@@ -3475,7 +3475,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
55161
55162 void tty_default_fops(struct file_operations *fops)
55163 {
55164- *fops = tty_fops;
55165+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
55166 }
55167
55168 /*
55169diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
55170index 2d822aa..a566234 100644
55171--- a/drivers/tty/tty_ldisc.c
55172+++ b/drivers/tty/tty_ldisc.c
55173@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
55174 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
55175 tty_ldiscs[disc] = new_ldisc;
55176 new_ldisc->num = disc;
55177- new_ldisc->refcount = 0;
55178+ atomic_set(&new_ldisc->refcount, 0);
55179 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
55180
55181 return ret;
55182@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
55183 return -EINVAL;
55184
55185 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
55186- if (tty_ldiscs[disc]->refcount)
55187+ if (atomic_read(&tty_ldiscs[disc]->refcount))
55188 ret = -EBUSY;
55189 else
55190 tty_ldiscs[disc] = NULL;
55191@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
55192 if (ldops) {
55193 ret = ERR_PTR(-EAGAIN);
55194 if (try_module_get(ldops->owner)) {
55195- ldops->refcount++;
55196+ atomic_inc(&ldops->refcount);
55197 ret = ldops;
55198 }
55199 }
55200@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
55201 unsigned long flags;
55202
55203 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
55204- ldops->refcount--;
55205+ atomic_dec(&ldops->refcount);
55206 module_put(ldops->owner);
55207 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
55208 }
55209diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
55210index 3f746c8..2f2fcaa 100644
55211--- a/drivers/tty/tty_port.c
55212+++ b/drivers/tty/tty_port.c
55213@@ -235,7 +235,7 @@ void tty_port_hangup(struct tty_port *port)
55214 unsigned long flags;
55215
55216 spin_lock_irqsave(&port->lock, flags);
55217- port->count = 0;
55218+ atomic_set(&port->count, 0);
55219 port->flags &= ~ASYNC_NORMAL_ACTIVE;
55220 tty = port->tty;
55221 if (tty)
55222@@ -393,7 +393,7 @@ int tty_port_block_til_ready(struct tty_port *port,
55223 /* The port lock protects the port counts */
55224 spin_lock_irqsave(&port->lock, flags);
55225 if (!tty_hung_up_p(filp))
55226- port->count--;
55227+ atomic_dec(&port->count);
55228 port->blocked_open++;
55229 spin_unlock_irqrestore(&port->lock, flags);
55230
55231@@ -435,7 +435,7 @@ int tty_port_block_til_ready(struct tty_port *port,
55232 we must not mess that up further */
55233 spin_lock_irqsave(&port->lock, flags);
55234 if (!tty_hung_up_p(filp))
55235- port->count++;
55236+ atomic_inc(&port->count);
55237 port->blocked_open--;
55238 if (retval == 0)
55239 port->flags |= ASYNC_NORMAL_ACTIVE;
55240@@ -469,19 +469,19 @@ int tty_port_close_start(struct tty_port *port,
55241 return 0;
55242 }
55243
55244- if (tty->count == 1 && port->count != 1) {
55245+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
55246 printk(KERN_WARNING
55247 "tty_port_close_start: tty->count = 1 port count = %d.\n",
55248- port->count);
55249- port->count = 1;
55250+ atomic_read(&port->count));
55251+ atomic_set(&port->count, 1);
55252 }
55253- if (--port->count < 0) {
55254+ if (atomic_dec_return(&port->count) < 0) {
55255 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
55256- port->count);
55257- port->count = 0;
55258+ atomic_read(&port->count));
55259+ atomic_set(&port->count, 0);
55260 }
55261
55262- if (port->count) {
55263+ if (atomic_read(&port->count)) {
55264 spin_unlock_irqrestore(&port->lock, flags);
55265 return 0;
55266 }
55267@@ -563,7 +563,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
55268 {
55269 spin_lock_irq(&port->lock);
55270 if (!tty_hung_up_p(filp))
55271- ++port->count;
55272+ atomic_inc(&port->count);
55273 spin_unlock_irq(&port->lock);
55274 tty_port_tty_set(port, tty);
55275
55276diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
55277index d0e3a44..5f8b754 100644
55278--- a/drivers/tty/vt/keyboard.c
55279+++ b/drivers/tty/vt/keyboard.c
55280@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
55281 kbd->kbdmode == VC_OFF) &&
55282 value != KVAL(K_SAK))
55283 return; /* SAK is allowed even in raw mode */
55284+
55285+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
55286+ {
55287+ void *func = fn_handler[value];
55288+ if (func == fn_show_state || func == fn_show_ptregs ||
55289+ func == fn_show_mem)
55290+ return;
55291+ }
55292+#endif
55293+
55294 fn_handler[value](vc);
55295 }
55296
55297@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
55298 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
55299 return -EFAULT;
55300
55301- if (!capable(CAP_SYS_TTY_CONFIG))
55302- perm = 0;
55303-
55304 switch (cmd) {
55305 case KDGKBENT:
55306 /* Ensure another thread doesn't free it under us */
55307@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
55308 spin_unlock_irqrestore(&kbd_event_lock, flags);
55309 return put_user(val, &user_kbe->kb_value);
55310 case KDSKBENT:
55311+ if (!capable(CAP_SYS_TTY_CONFIG))
55312+ perm = 0;
55313+
55314 if (!perm)
55315 return -EPERM;
55316 if (!i && v == K_NOSUCHMAP) {
55317@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
55318 int i, j, k;
55319 int ret;
55320
55321- if (!capable(CAP_SYS_TTY_CONFIG))
55322- perm = 0;
55323-
55324 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
55325 if (!kbs) {
55326 ret = -ENOMEM;
55327@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
55328 kfree(kbs);
55329 return ((p && *p) ? -EOVERFLOW : 0);
55330 case KDSKBSENT:
55331+ if (!capable(CAP_SYS_TTY_CONFIG))
55332+ perm = 0;
55333+
55334 if (!perm) {
55335 ret = -EPERM;
55336 goto reterr;
55337diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
55338index a673e5b..36e5d32 100644
55339--- a/drivers/uio/uio.c
55340+++ b/drivers/uio/uio.c
55341@@ -25,6 +25,7 @@
55342 #include <linux/kobject.h>
55343 #include <linux/cdev.h>
55344 #include <linux/uio_driver.h>
55345+#include <asm/local.h>
55346
55347 #define UIO_MAX_DEVICES (1U << MINORBITS)
55348
55349@@ -32,7 +33,7 @@ struct uio_device {
55350 struct module *owner;
55351 struct device *dev;
55352 int minor;
55353- atomic_t event;
55354+ atomic_unchecked_t event;
55355 struct fasync_struct *async_queue;
55356 wait_queue_head_t wait;
55357 struct uio_info *info;
55358@@ -243,7 +244,7 @@ static ssize_t event_show(struct device *dev,
55359 struct device_attribute *attr, char *buf)
55360 {
55361 struct uio_device *idev = dev_get_drvdata(dev);
55362- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
55363+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
55364 }
55365 static DEVICE_ATTR_RO(event);
55366
55367@@ -405,7 +406,7 @@ void uio_event_notify(struct uio_info *info)
55368 {
55369 struct uio_device *idev = info->uio_dev;
55370
55371- atomic_inc(&idev->event);
55372+ atomic_inc_unchecked(&idev->event);
55373 wake_up_interruptible(&idev->wait);
55374 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
55375 }
55376@@ -458,7 +459,7 @@ static int uio_open(struct inode *inode, struct file *filep)
55377 }
55378
55379 listener->dev = idev;
55380- listener->event_count = atomic_read(&idev->event);
55381+ listener->event_count = atomic_read_unchecked(&idev->event);
55382 filep->private_data = listener;
55383
55384 if (idev->info->open) {
55385@@ -509,7 +510,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
55386 return -EIO;
55387
55388 poll_wait(filep, &idev->wait, wait);
55389- if (listener->event_count != atomic_read(&idev->event))
55390+ if (listener->event_count != atomic_read_unchecked(&idev->event))
55391 return POLLIN | POLLRDNORM;
55392 return 0;
55393 }
55394@@ -534,7 +535,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
55395 do {
55396 set_current_state(TASK_INTERRUPTIBLE);
55397
55398- event_count = atomic_read(&idev->event);
55399+ event_count = atomic_read_unchecked(&idev->event);
55400 if (event_count != listener->event_count) {
55401 if (copy_to_user(buf, &event_count, count))
55402 retval = -EFAULT;
55403@@ -591,9 +592,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
55404 static int uio_find_mem_index(struct vm_area_struct *vma)
55405 {
55406 struct uio_device *idev = vma->vm_private_data;
55407+ unsigned long size;
55408
55409 if (vma->vm_pgoff < MAX_UIO_MAPS) {
55410- if (idev->info->mem[vma->vm_pgoff].size == 0)
55411+ size = idev->info->mem[vma->vm_pgoff].size;
55412+ if (size == 0)
55413+ return -1;
55414+ if (vma->vm_end - vma->vm_start > size)
55415 return -1;
55416 return (int)vma->vm_pgoff;
55417 }
55418@@ -825,7 +830,7 @@ int __uio_register_device(struct module *owner,
55419 idev->owner = owner;
55420 idev->info = info;
55421 init_waitqueue_head(&idev->wait);
55422- atomic_set(&idev->event, 0);
55423+ atomic_set_unchecked(&idev->event, 0);
55424
55425 ret = uio_get_minor(idev);
55426 if (ret)
55427diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
55428index 813d4d3..a71934f 100644
55429--- a/drivers/usb/atm/cxacru.c
55430+++ b/drivers/usb/atm/cxacru.c
55431@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
55432 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
55433 if (ret < 2)
55434 return -EINVAL;
55435- if (index < 0 || index > 0x7f)
55436+ if (index > 0x7f)
55437 return -EINVAL;
55438 pos += tmp;
55439
55440diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
55441index dada014..1d0d517 100644
55442--- a/drivers/usb/atm/usbatm.c
55443+++ b/drivers/usb/atm/usbatm.c
55444@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55445 if (printk_ratelimit())
55446 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
55447 __func__, vpi, vci);
55448- atomic_inc(&vcc->stats->rx_err);
55449+ atomic_inc_unchecked(&vcc->stats->rx_err);
55450 return;
55451 }
55452
55453@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55454 if (length > ATM_MAX_AAL5_PDU) {
55455 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
55456 __func__, length, vcc);
55457- atomic_inc(&vcc->stats->rx_err);
55458+ atomic_inc_unchecked(&vcc->stats->rx_err);
55459 goto out;
55460 }
55461
55462@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55463 if (sarb->len < pdu_length) {
55464 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
55465 __func__, pdu_length, sarb->len, vcc);
55466- atomic_inc(&vcc->stats->rx_err);
55467+ atomic_inc_unchecked(&vcc->stats->rx_err);
55468 goto out;
55469 }
55470
55471 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
55472 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
55473 __func__, vcc);
55474- atomic_inc(&vcc->stats->rx_err);
55475+ atomic_inc_unchecked(&vcc->stats->rx_err);
55476 goto out;
55477 }
55478
55479@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55480 if (printk_ratelimit())
55481 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
55482 __func__, length);
55483- atomic_inc(&vcc->stats->rx_drop);
55484+ atomic_inc_unchecked(&vcc->stats->rx_drop);
55485 goto out;
55486 }
55487
55488@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55489
55490 vcc->push(vcc, skb);
55491
55492- atomic_inc(&vcc->stats->rx);
55493+ atomic_inc_unchecked(&vcc->stats->rx);
55494 out:
55495 skb_trim(sarb, 0);
55496 }
55497@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
55498 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
55499
55500 usbatm_pop(vcc, skb);
55501- atomic_inc(&vcc->stats->tx);
55502+ atomic_inc_unchecked(&vcc->stats->tx);
55503
55504 skb = skb_dequeue(&instance->sndqueue);
55505 }
55506@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
55507 if (!left--)
55508 return sprintf(page,
55509 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
55510- atomic_read(&atm_dev->stats.aal5.tx),
55511- atomic_read(&atm_dev->stats.aal5.tx_err),
55512- atomic_read(&atm_dev->stats.aal5.rx),
55513- atomic_read(&atm_dev->stats.aal5.rx_err),
55514- atomic_read(&atm_dev->stats.aal5.rx_drop));
55515+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
55516+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
55517+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
55518+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
55519+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
55520
55521 if (!left--) {
55522 if (instance->disconnected)
55523diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
55524index 2a3bbdf..91d72cf 100644
55525--- a/drivers/usb/core/devices.c
55526+++ b/drivers/usb/core/devices.c
55527@@ -126,7 +126,7 @@ static const char format_endpt[] =
55528 * time it gets called.
55529 */
55530 static struct device_connect_event {
55531- atomic_t count;
55532+ atomic_unchecked_t count;
55533 wait_queue_head_t wait;
55534 } device_event = {
55535 .count = ATOMIC_INIT(1),
55536@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
55537
55538 void usbfs_conn_disc_event(void)
55539 {
55540- atomic_add(2, &device_event.count);
55541+ atomic_add_unchecked(2, &device_event.count);
55542 wake_up(&device_event.wait);
55543 }
55544
55545@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
55546
55547 poll_wait(file, &device_event.wait, wait);
55548
55549- event_count = atomic_read(&device_event.count);
55550+ event_count = atomic_read_unchecked(&device_event.count);
55551 if (file->f_version != event_count) {
55552 file->f_version = event_count;
55553 return POLLIN | POLLRDNORM;
55554diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
55555index 0b59731..46ee7d1 100644
55556--- a/drivers/usb/core/devio.c
55557+++ b/drivers/usb/core/devio.c
55558@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
55559 struct usb_dev_state *ps = file->private_data;
55560 struct usb_device *dev = ps->dev;
55561 ssize_t ret = 0;
55562- unsigned len;
55563+ size_t len;
55564 loff_t pos;
55565 int i;
55566
55567@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
55568 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
55569 struct usb_config_descriptor *config =
55570 (struct usb_config_descriptor *)dev->rawdescriptors[i];
55571- unsigned int length = le16_to_cpu(config->wTotalLength);
55572+ size_t length = le16_to_cpu(config->wTotalLength);
55573
55574 if (*ppos < pos + length) {
55575
55576 /* The descriptor may claim to be longer than it
55577 * really is. Here is the actual allocated length. */
55578- unsigned alloclen =
55579+ size_t alloclen =
55580 le16_to_cpu(dev->config[i].desc.wTotalLength);
55581
55582- len = length - (*ppos - pos);
55583+ len = length + pos - *ppos;
55584 if (len > nbytes)
55585 len = nbytes;
55586
55587 /* Simply don't write (skip over) unallocated parts */
55588 if (alloclen > (*ppos - pos)) {
55589- alloclen -= (*ppos - pos);
55590+ alloclen = alloclen + pos - *ppos;
55591 if (copy_to_user(buf,
55592 dev->rawdescriptors[i] + (*ppos - pos),
55593 min(len, alloclen))) {
55594diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
55595index bec31e2..b8091cd 100644
55596--- a/drivers/usb/core/hcd.c
55597+++ b/drivers/usb/core/hcd.c
55598@@ -1554,7 +1554,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
55599 */
55600 usb_get_urb(urb);
55601 atomic_inc(&urb->use_count);
55602- atomic_inc(&urb->dev->urbnum);
55603+ atomic_inc_unchecked(&urb->dev->urbnum);
55604 usbmon_urb_submit(&hcd->self, urb);
55605
55606 /* NOTE requirements on root-hub callers (usbfs and the hub
55607@@ -1581,7 +1581,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
55608 urb->hcpriv = NULL;
55609 INIT_LIST_HEAD(&urb->urb_list);
55610 atomic_dec(&urb->use_count);
55611- atomic_dec(&urb->dev->urbnum);
55612+ atomic_dec_unchecked(&urb->dev->urbnum);
55613 if (atomic_read(&urb->reject))
55614 wake_up(&usb_kill_urb_queue);
55615 usb_put_urb(urb);
55616diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
55617index 27f2171..e3dfc22 100644
55618--- a/drivers/usb/core/hub.c
55619+++ b/drivers/usb/core/hub.c
55620@@ -27,6 +27,7 @@
55621 #include <linux/freezer.h>
55622 #include <linux/random.h>
55623 #include <linux/pm_qos.h>
55624+#include <linux/grsecurity.h>
55625
55626 #include <asm/uaccess.h>
55627 #include <asm/byteorder.h>
55628@@ -4644,6 +4645,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
55629 goto done;
55630 return;
55631 }
55632+
55633+ if (gr_handle_new_usb())
55634+ goto done;
55635+
55636 if (hub_is_superspeed(hub->hdev))
55637 unit_load = 150;
55638 else
55639diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
55640index 0c8a7fc..c45b40a 100644
55641--- a/drivers/usb/core/message.c
55642+++ b/drivers/usb/core/message.c
55643@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
55644 * Return: If successful, the number of bytes transferred. Otherwise, a negative
55645 * error number.
55646 */
55647-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
55648+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
55649 __u8 requesttype, __u16 value, __u16 index, void *data,
55650 __u16 size, int timeout)
55651 {
55652@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
55653 * If successful, 0. Otherwise a negative error number. The number of actual
55654 * bytes transferred will be stored in the @actual_length parameter.
55655 */
55656-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
55657+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
55658 void *data, int len, int *actual_length, int timeout)
55659 {
55660 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
55661@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
55662 * bytes transferred will be stored in the @actual_length parameter.
55663 *
55664 */
55665-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
55666+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
55667 void *data, int len, int *actual_length, int timeout)
55668 {
55669 struct urb *urb;
55670diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
55671index 1236c60..d47a51c 100644
55672--- a/drivers/usb/core/sysfs.c
55673+++ b/drivers/usb/core/sysfs.c
55674@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
55675 struct usb_device *udev;
55676
55677 udev = to_usb_device(dev);
55678- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
55679+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
55680 }
55681 static DEVICE_ATTR_RO(urbnum);
55682
55683diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
55684index 4d11449..f4ccabf 100644
55685--- a/drivers/usb/core/usb.c
55686+++ b/drivers/usb/core/usb.c
55687@@ -433,7 +433,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
55688 set_dev_node(&dev->dev, dev_to_node(bus->controller));
55689 dev->state = USB_STATE_ATTACHED;
55690 dev->lpm_disable_count = 1;
55691- atomic_set(&dev->urbnum, 0);
55692+ atomic_set_unchecked(&dev->urbnum, 0);
55693
55694 INIT_LIST_HEAD(&dev->ep0.urb_list);
55695 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
55696diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
55697index dab7927..6f53afc 100644
55698--- a/drivers/usb/dwc3/gadget.c
55699+++ b/drivers/usb/dwc3/gadget.c
55700@@ -615,8 +615,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
55701 if (!usb_endpoint_xfer_isoc(desc))
55702 return 0;
55703
55704- memset(&trb_link, 0, sizeof(trb_link));
55705-
55706 /* Link TRB for ISOC. The HWO bit is never reset */
55707 trb_st_hw = &dep->trb_pool[0];
55708
55709diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
55710index 8cfc319..4868255 100644
55711--- a/drivers/usb/early/ehci-dbgp.c
55712+++ b/drivers/usb/early/ehci-dbgp.c
55713@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
55714
55715 #ifdef CONFIG_KGDB
55716 static struct kgdb_io kgdbdbgp_io_ops;
55717-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
55718+static struct kgdb_io kgdbdbgp_io_ops_console;
55719+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
55720 #else
55721 #define dbgp_kgdb_mode (0)
55722 #endif
55723@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
55724 .write_char = kgdbdbgp_write_char,
55725 };
55726
55727+static struct kgdb_io kgdbdbgp_io_ops_console = {
55728+ .name = "kgdbdbgp",
55729+ .read_char = kgdbdbgp_read_char,
55730+ .write_char = kgdbdbgp_write_char,
55731+ .is_console = 1
55732+};
55733+
55734 static int kgdbdbgp_wait_time;
55735
55736 static int __init kgdbdbgp_parse_config(char *str)
55737@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
55738 ptr++;
55739 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
55740 }
55741- kgdb_register_io_module(&kgdbdbgp_io_ops);
55742- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
55743+ if (early_dbgp_console.index != -1)
55744+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
55745+ else
55746+ kgdb_register_io_module(&kgdbdbgp_io_ops);
55747
55748 return 0;
55749 }
55750diff --git a/drivers/usb/gadget/f_uac1.c b/drivers/usb/gadget/f_uac1.c
55751index 2b4c82d..06a8ee6 100644
55752--- a/drivers/usb/gadget/f_uac1.c
55753+++ b/drivers/usb/gadget/f_uac1.c
55754@@ -13,6 +13,7 @@
55755 #include <linux/kernel.h>
55756 #include <linux/device.h>
55757 #include <linux/atomic.h>
55758+#include <linux/module.h>
55759
55760 #include "u_uac1.h"
55761
55762diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
55763index ad0aca8..8ff84865 100644
55764--- a/drivers/usb/gadget/u_serial.c
55765+++ b/drivers/usb/gadget/u_serial.c
55766@@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
55767 spin_lock_irq(&port->port_lock);
55768
55769 /* already open? Great. */
55770- if (port->port.count) {
55771+ if (atomic_read(&port->port.count)) {
55772 status = 0;
55773- port->port.count++;
55774+ atomic_inc(&port->port.count);
55775
55776 /* currently opening/closing? wait ... */
55777 } else if (port->openclose) {
55778@@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
55779 tty->driver_data = port;
55780 port->port.tty = tty;
55781
55782- port->port.count = 1;
55783+ atomic_set(&port->port.count, 1);
55784 port->openclose = false;
55785
55786 /* if connected, start the I/O stream */
55787@@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
55788
55789 spin_lock_irq(&port->port_lock);
55790
55791- if (port->port.count != 1) {
55792- if (port->port.count == 0)
55793+ if (atomic_read(&port->port.count) != 1) {
55794+ if (atomic_read(&port->port.count) == 0)
55795 WARN_ON(1);
55796 else
55797- --port->port.count;
55798+ atomic_dec(&port->port.count);
55799 goto exit;
55800 }
55801
55802@@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
55803 * and sleep if necessary
55804 */
55805 port->openclose = true;
55806- port->port.count = 0;
55807+ atomic_set(&port->port.count, 0);
55808
55809 gser = port->port_usb;
55810 if (gser && gser->disconnect)
55811@@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port)
55812 int cond;
55813
55814 spin_lock_irq(&port->port_lock);
55815- cond = (port->port.count == 0) && !port->openclose;
55816+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
55817 spin_unlock_irq(&port->port_lock);
55818 return cond;
55819 }
55820@@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
55821 /* if it's already open, start I/O ... and notify the serial
55822 * protocol about open/close status (connect/disconnect).
55823 */
55824- if (port->port.count) {
55825+ if (atomic_read(&port->port.count)) {
55826 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
55827 gs_start_io(port);
55828 if (gser->connect)
55829@@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser)
55830
55831 port->port_usb = NULL;
55832 gser->ioport = NULL;
55833- if (port->port.count > 0 || port->openclose) {
55834+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
55835 wake_up_interruptible(&port->drain_wait);
55836 if (port->port.tty)
55837 tty_hangup(port->port.tty);
55838@@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser)
55839
55840 /* finally, free any unused/unusable I/O buffers */
55841 spin_lock_irqsave(&port->port_lock, flags);
55842- if (port->port.count == 0 && !port->openclose)
55843+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
55844 gs_buf_free(&port->port_write_buf);
55845 gs_free_requests(gser->out, &port->read_pool, NULL);
55846 gs_free_requests(gser->out, &port->read_queue, NULL);
55847diff --git a/drivers/usb/gadget/u_uac1.c b/drivers/usb/gadget/u_uac1.c
55848index 7a55fea..cc0ed4f 100644
55849--- a/drivers/usb/gadget/u_uac1.c
55850+++ b/drivers/usb/gadget/u_uac1.c
55851@@ -16,6 +16,7 @@
55852 #include <linux/ctype.h>
55853 #include <linux/random.h>
55854 #include <linux/syscalls.h>
55855+#include <linux/module.h>
55856
55857 #include "u_uac1.h"
55858
55859diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
55860index 6130b75..3b60008 100644
55861--- a/drivers/usb/host/ehci-hub.c
55862+++ b/drivers/usb/host/ehci-hub.c
55863@@ -771,7 +771,7 @@ static struct urb *request_single_step_set_feature_urb(
55864 urb->transfer_flags = URB_DIR_IN;
55865 usb_get_urb(urb);
55866 atomic_inc(&urb->use_count);
55867- atomic_inc(&urb->dev->urbnum);
55868+ atomic_inc_unchecked(&urb->dev->urbnum);
55869 urb->setup_dma = dma_map_single(
55870 hcd->self.controller,
55871 urb->setup_packet,
55872@@ -838,7 +838,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
55873 urb->status = -EINPROGRESS;
55874 usb_get_urb(urb);
55875 atomic_inc(&urb->use_count);
55876- atomic_inc(&urb->dev->urbnum);
55877+ atomic_inc_unchecked(&urb->dev->urbnum);
55878 retval = submit_single_step_set_feature(hcd, urb, 0);
55879 if (!retval && !wait_for_completion_timeout(&done,
55880 msecs_to_jiffies(2000))) {
55881diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
55882index d0d8fad..668ef7b 100644
55883--- a/drivers/usb/host/hwa-hc.c
55884+++ b/drivers/usb/host/hwa-hc.c
55885@@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
55886 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
55887 struct wahc *wa = &hwahc->wa;
55888 struct device *dev = &wa->usb_iface->dev;
55889- u8 mas_le[UWB_NUM_MAS/8];
55890+ u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
55891+
55892+ if (mas_le == NULL)
55893+ return -ENOMEM;
55894
55895 /* Set the stream index */
55896 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
55897@@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
55898 WUSB_REQ_SET_WUSB_MAS,
55899 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
55900 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
55901- mas_le, 32, USB_CTRL_SET_TIMEOUT);
55902+ mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
55903 if (result < 0)
55904 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
55905 out:
55906+ kfree(mas_le);
55907+
55908 return result;
55909 }
55910
55911diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
55912index b3d245e..99549ed 100644
55913--- a/drivers/usb/misc/appledisplay.c
55914+++ b/drivers/usb/misc/appledisplay.c
55915@@ -84,7 +84,7 @@ struct appledisplay {
55916 struct mutex sysfslock; /* concurrent read and write */
55917 };
55918
55919-static atomic_t count_displays = ATOMIC_INIT(0);
55920+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
55921 static struct workqueue_struct *wq;
55922
55923 static void appledisplay_complete(struct urb *urb)
55924@@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
55925
55926 /* Register backlight device */
55927 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
55928- atomic_inc_return(&count_displays) - 1);
55929+ atomic_inc_return_unchecked(&count_displays) - 1);
55930 memset(&props, 0, sizeof(struct backlight_properties));
55931 props.type = BACKLIGHT_RAW;
55932 props.max_brightness = 0xff;
55933diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
55934index 8d7fc48..01c4986 100644
55935--- a/drivers/usb/serial/console.c
55936+++ b/drivers/usb/serial/console.c
55937@@ -123,7 +123,7 @@ static int usb_console_setup(struct console *co, char *options)
55938
55939 info->port = port;
55940
55941- ++port->port.count;
55942+ atomic_inc(&port->port.count);
55943 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
55944 if (serial->type->set_termios) {
55945 /*
55946@@ -167,7 +167,7 @@ static int usb_console_setup(struct console *co, char *options)
55947 }
55948 /* Now that any required fake tty operations are completed restore
55949 * the tty port count */
55950- --port->port.count;
55951+ atomic_dec(&port->port.count);
55952 /* The console is special in terms of closing the device so
55953 * indicate this port is now acting as a system console. */
55954 port->port.console = 1;
55955@@ -180,7 +180,7 @@ static int usb_console_setup(struct console *co, char *options)
55956 free_tty:
55957 kfree(tty);
55958 reset_open_count:
55959- port->port.count = 0;
55960+ atomic_set(&port->port.count, 0);
55961 usb_autopm_put_interface(serial->interface);
55962 error_get_interface:
55963 usb_serial_put(serial);
55964@@ -191,7 +191,7 @@ static int usb_console_setup(struct console *co, char *options)
55965 static void usb_console_write(struct console *co,
55966 const char *buf, unsigned count)
55967 {
55968- static struct usbcons_info *info = &usbcons_info;
55969+ struct usbcons_info *info = &usbcons_info;
55970 struct usb_serial_port *port = info->port;
55971 struct usb_serial *serial;
55972 int retval = -ENODEV;
55973diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
55974index 307e339..6aa97cb 100644
55975--- a/drivers/usb/storage/usb.h
55976+++ b/drivers/usb/storage/usb.h
55977@@ -63,7 +63,7 @@ struct us_unusual_dev {
55978 __u8 useProtocol;
55979 __u8 useTransport;
55980 int (*initFunction)(struct us_data *);
55981-};
55982+} __do_const;
55983
55984
55985 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
55986diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
55987index f2a8d29..7bc3fe7 100644
55988--- a/drivers/usb/wusbcore/wa-hc.h
55989+++ b/drivers/usb/wusbcore/wa-hc.h
55990@@ -240,7 +240,7 @@ struct wahc {
55991 spinlock_t xfer_list_lock;
55992 struct work_struct xfer_enqueue_work;
55993 struct work_struct xfer_error_work;
55994- atomic_t xfer_id_count;
55995+ atomic_unchecked_t xfer_id_count;
55996
55997 kernel_ulong_t quirks;
55998 };
55999@@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
56000 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
56001 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
56002 wa->dto_in_use = 0;
56003- atomic_set(&wa->xfer_id_count, 1);
56004+ atomic_set_unchecked(&wa->xfer_id_count, 1);
56005 /* init the buf in URBs */
56006 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
56007 usb_init_urb(&(wa->buf_in_urbs[index]));
56008diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
56009index 3e2e4ed..060c9b8 100644
56010--- a/drivers/usb/wusbcore/wa-xfer.c
56011+++ b/drivers/usb/wusbcore/wa-xfer.c
56012@@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
56013 */
56014 static void wa_xfer_id_init(struct wa_xfer *xfer)
56015 {
56016- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
56017+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
56018 }
56019
56020 /* Return the xfer's ID. */
56021diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
56022index f018d8d..ccab63f 100644
56023--- a/drivers/vfio/vfio.c
56024+++ b/drivers/vfio/vfio.c
56025@@ -481,7 +481,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
56026 return 0;
56027
56028 /* TODO Prevent device auto probing */
56029- WARN("Device %s added to live group %d!\n", dev_name(dev),
56030+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
56031 iommu_group_id(group->iommu_group));
56032
56033 return 0;
56034diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
56035index 5174eba..451e6bc 100644
56036--- a/drivers/vhost/vringh.c
56037+++ b/drivers/vhost/vringh.c
56038@@ -530,17 +530,17 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
56039 /* Userspace access helpers: in this case, addresses are really userspace. */
56040 static inline int getu16_user(u16 *val, const u16 *p)
56041 {
56042- return get_user(*val, (__force u16 __user *)p);
56043+ return get_user(*val, (u16 __force_user *)p);
56044 }
56045
56046 static inline int putu16_user(u16 *p, u16 val)
56047 {
56048- return put_user(val, (__force u16 __user *)p);
56049+ return put_user(val, (u16 __force_user *)p);
56050 }
56051
56052 static inline int copydesc_user(void *dst, const void *src, size_t len)
56053 {
56054- return copy_from_user(dst, (__force void __user *)src, len) ?
56055+ return copy_from_user(dst, (void __force_user *)src, len) ?
56056 -EFAULT : 0;
56057 }
56058
56059@@ -548,19 +548,19 @@ static inline int putused_user(struct vring_used_elem *dst,
56060 const struct vring_used_elem *src,
56061 unsigned int num)
56062 {
56063- return copy_to_user((__force void __user *)dst, src,
56064+ return copy_to_user((void __force_user *)dst, src,
56065 sizeof(*dst) * num) ? -EFAULT : 0;
56066 }
56067
56068 static inline int xfer_from_user(void *src, void *dst, size_t len)
56069 {
56070- return copy_from_user(dst, (__force void __user *)src, len) ?
56071+ return copy_from_user(dst, (void __force_user *)src, len) ?
56072 -EFAULT : 0;
56073 }
56074
56075 static inline int xfer_to_user(void *dst, void *src, size_t len)
56076 {
56077- return copy_to_user((__force void __user *)dst, src, len) ?
56078+ return copy_to_user((void __force_user *)dst, src, len) ?
56079 -EFAULT : 0;
56080 }
56081
56082@@ -596,9 +596,9 @@ int vringh_init_user(struct vringh *vrh, u32 features,
56083 vrh->last_used_idx = 0;
56084 vrh->vring.num = num;
56085 /* vring expects kernel addresses, but only used via accessors. */
56086- vrh->vring.desc = (__force struct vring_desc *)desc;
56087- vrh->vring.avail = (__force struct vring_avail *)avail;
56088- vrh->vring.used = (__force struct vring_used *)used;
56089+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
56090+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
56091+ vrh->vring.used = (__force_kernel struct vring_used *)used;
56092 return 0;
56093 }
56094 EXPORT_SYMBOL(vringh_init_user);
56095@@ -800,7 +800,7 @@ static inline int getu16_kern(u16 *val, const u16 *p)
56096
56097 static inline int putu16_kern(u16 *p, u16 val)
56098 {
56099- ACCESS_ONCE(*p) = val;
56100+ ACCESS_ONCE_RW(*p) = val;
56101 return 0;
56102 }
56103
56104diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
56105index 84a110a..96312c3 100644
56106--- a/drivers/video/backlight/kb3886_bl.c
56107+++ b/drivers/video/backlight/kb3886_bl.c
56108@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
56109 static unsigned long kb3886bl_flags;
56110 #define KB3886BL_SUSPENDED 0x01
56111
56112-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
56113+static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
56114 {
56115 .ident = "Sahara Touch-iT",
56116 .matches = {
56117diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
56118index 1b0b233..6f34c2c 100644
56119--- a/drivers/video/fbdev/arcfb.c
56120+++ b/drivers/video/fbdev/arcfb.c
56121@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
56122 return -ENOSPC;
56123
56124 err = 0;
56125- if ((count + p) > fbmemlength) {
56126+ if (count > (fbmemlength - p)) {
56127 count = fbmemlength - p;
56128 err = -ENOSPC;
56129 }
56130diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
56131index 52108be..c7c110d 100644
56132--- a/drivers/video/fbdev/aty/aty128fb.c
56133+++ b/drivers/video/fbdev/aty/aty128fb.c
56134@@ -149,7 +149,7 @@ enum {
56135 };
56136
56137 /* Must match above enum */
56138-static char * const r128_family[] = {
56139+static const char * const r128_family[] = {
56140 "AGP",
56141 "PCI",
56142 "PRO AGP",
56143diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
56144index c3d0074..0b9077e 100644
56145--- a/drivers/video/fbdev/aty/atyfb_base.c
56146+++ b/drivers/video/fbdev/aty/atyfb_base.c
56147@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
56148 par->accel_flags = var->accel_flags; /* hack */
56149
56150 if (var->accel_flags) {
56151- info->fbops->fb_sync = atyfb_sync;
56152+ pax_open_kernel();
56153+ *(void **)&info->fbops->fb_sync = atyfb_sync;
56154+ pax_close_kernel();
56155 info->flags &= ~FBINFO_HWACCEL_DISABLED;
56156 } else {
56157- info->fbops->fb_sync = NULL;
56158+ pax_open_kernel();
56159+ *(void **)&info->fbops->fb_sync = NULL;
56160+ pax_close_kernel();
56161 info->flags |= FBINFO_HWACCEL_DISABLED;
56162 }
56163
56164diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
56165index 2fa0317..4983f2a 100644
56166--- a/drivers/video/fbdev/aty/mach64_cursor.c
56167+++ b/drivers/video/fbdev/aty/mach64_cursor.c
56168@@ -8,6 +8,7 @@
56169 #include "../core/fb_draw.h"
56170
56171 #include <asm/io.h>
56172+#include <asm/pgtable.h>
56173
56174 #ifdef __sparc__
56175 #include <asm/fbio.h>
56176@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
56177 info->sprite.buf_align = 16; /* and 64 lines tall. */
56178 info->sprite.flags = FB_PIXMAP_IO;
56179
56180- info->fbops->fb_cursor = atyfb_cursor;
56181+ pax_open_kernel();
56182+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
56183+ pax_close_kernel();
56184
56185 return 0;
56186 }
56187diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
56188index 900aa4e..6d49418 100644
56189--- a/drivers/video/fbdev/core/fb_defio.c
56190+++ b/drivers/video/fbdev/core/fb_defio.c
56191@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
56192
56193 BUG_ON(!fbdefio);
56194 mutex_init(&fbdefio->lock);
56195- info->fbops->fb_mmap = fb_deferred_io_mmap;
56196+ pax_open_kernel();
56197+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
56198+ pax_close_kernel();
56199 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
56200 INIT_LIST_HEAD(&fbdefio->pagelist);
56201 if (fbdefio->delay == 0) /* set a default of 1 s */
56202@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
56203 page->mapping = NULL;
56204 }
56205
56206- info->fbops->fb_mmap = NULL;
56207+ *(void **)&info->fbops->fb_mmap = NULL;
56208 mutex_destroy(&fbdefio->lock);
56209 }
56210 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
56211diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
56212index b5e85f6..290f8c7 100644
56213--- a/drivers/video/fbdev/core/fbmem.c
56214+++ b/drivers/video/fbdev/core/fbmem.c
56215@@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
56216 __u32 data;
56217 int err;
56218
56219- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
56220+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
56221
56222 data = (__u32) (unsigned long) fix->smem_start;
56223 err |= put_user(data, &fix32->smem_start);
56224diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
56225index e23392e..8a77540 100644
56226--- a/drivers/video/fbdev/hyperv_fb.c
56227+++ b/drivers/video/fbdev/hyperv_fb.c
56228@@ -235,7 +235,7 @@ static uint screen_fb_size;
56229 static inline int synthvid_send(struct hv_device *hdev,
56230 struct synthvid_msg *msg)
56231 {
56232- static atomic64_t request_id = ATOMIC64_INIT(0);
56233+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
56234 int ret;
56235
56236 msg->pipe_hdr.type = PIPE_MSG_DATA;
56237@@ -243,7 +243,7 @@ static inline int synthvid_send(struct hv_device *hdev,
56238
56239 ret = vmbus_sendpacket(hdev->channel, msg,
56240 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
56241- atomic64_inc_return(&request_id),
56242+ atomic64_inc_return_unchecked(&request_id),
56243 VM_PKT_DATA_INBAND, 0);
56244
56245 if (ret)
56246diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
56247index 7672d2e..b56437f 100644
56248--- a/drivers/video/fbdev/i810/i810_accel.c
56249+++ b/drivers/video/fbdev/i810/i810_accel.c
56250@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
56251 }
56252 }
56253 printk("ringbuffer lockup!!!\n");
56254+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
56255 i810_report_error(mmio);
56256 par->dev_flags |= LOCKUP;
56257 info->pixmap.scan_align = 1;
56258diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
56259index a01147f..5d896f8 100644
56260--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
56261+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
56262@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
56263
56264 #ifdef CONFIG_FB_MATROX_MYSTIQUE
56265 struct matrox_switch matrox_mystique = {
56266- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
56267+ .preinit = MGA1064_preinit,
56268+ .reset = MGA1064_reset,
56269+ .init = MGA1064_init,
56270+ .restore = MGA1064_restore,
56271 };
56272 EXPORT_SYMBOL(matrox_mystique);
56273 #endif
56274
56275 #ifdef CONFIG_FB_MATROX_G
56276 struct matrox_switch matrox_G100 = {
56277- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
56278+ .preinit = MGAG100_preinit,
56279+ .reset = MGAG100_reset,
56280+ .init = MGAG100_init,
56281+ .restore = MGAG100_restore,
56282 };
56283 EXPORT_SYMBOL(matrox_G100);
56284 #endif
56285diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
56286index 195ad7c..09743fc 100644
56287--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
56288+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
56289@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
56290 }
56291
56292 struct matrox_switch matrox_millennium = {
56293- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
56294+ .preinit = Ti3026_preinit,
56295+ .reset = Ti3026_reset,
56296+ .init = Ti3026_init,
56297+ .restore = Ti3026_restore
56298 };
56299 EXPORT_SYMBOL(matrox_millennium);
56300 #endif
56301diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
56302index fe92eed..106e085 100644
56303--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
56304+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
56305@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
56306 struct mb862xxfb_par *par = info->par;
56307
56308 if (info->var.bits_per_pixel == 32) {
56309- info->fbops->fb_fillrect = cfb_fillrect;
56310- info->fbops->fb_copyarea = cfb_copyarea;
56311- info->fbops->fb_imageblit = cfb_imageblit;
56312+ pax_open_kernel();
56313+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
56314+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
56315+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
56316+ pax_close_kernel();
56317 } else {
56318 outreg(disp, GC_L0EM, 3);
56319- info->fbops->fb_fillrect = mb86290fb_fillrect;
56320- info->fbops->fb_copyarea = mb86290fb_copyarea;
56321- info->fbops->fb_imageblit = mb86290fb_imageblit;
56322+ pax_open_kernel();
56323+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
56324+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
56325+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
56326+ pax_close_kernel();
56327 }
56328 outreg(draw, GDC_REG_DRAW_BASE, 0);
56329 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
56330diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
56331index def0412..fed6529 100644
56332--- a/drivers/video/fbdev/nvidia/nvidia.c
56333+++ b/drivers/video/fbdev/nvidia/nvidia.c
56334@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
56335 info->fix.line_length = (info->var.xres_virtual *
56336 info->var.bits_per_pixel) >> 3;
56337 if (info->var.accel_flags) {
56338- info->fbops->fb_imageblit = nvidiafb_imageblit;
56339- info->fbops->fb_fillrect = nvidiafb_fillrect;
56340- info->fbops->fb_copyarea = nvidiafb_copyarea;
56341- info->fbops->fb_sync = nvidiafb_sync;
56342+ pax_open_kernel();
56343+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
56344+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
56345+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
56346+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
56347+ pax_close_kernel();
56348 info->pixmap.scan_align = 4;
56349 info->flags &= ~FBINFO_HWACCEL_DISABLED;
56350 info->flags |= FBINFO_READS_FAST;
56351 NVResetGraphics(info);
56352 } else {
56353- info->fbops->fb_imageblit = cfb_imageblit;
56354- info->fbops->fb_fillrect = cfb_fillrect;
56355- info->fbops->fb_copyarea = cfb_copyarea;
56356- info->fbops->fb_sync = NULL;
56357+ pax_open_kernel();
56358+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
56359+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
56360+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
56361+ *(void **)&info->fbops->fb_sync = NULL;
56362+ pax_close_kernel();
56363 info->pixmap.scan_align = 1;
56364 info->flags |= FBINFO_HWACCEL_DISABLED;
56365 info->flags &= ~FBINFO_READS_FAST;
56366@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
56367 info->pixmap.size = 8 * 1024;
56368 info->pixmap.flags = FB_PIXMAP_SYSTEM;
56369
56370- if (!hwcur)
56371- info->fbops->fb_cursor = NULL;
56372+ if (!hwcur) {
56373+ pax_open_kernel();
56374+ *(void **)&info->fbops->fb_cursor = NULL;
56375+ pax_close_kernel();
56376+ }
56377
56378 info->var.accel_flags = (!noaccel);
56379
56380diff --git a/drivers/video/fbdev/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
56381index 2412a0d..294215b 100644
56382--- a/drivers/video/fbdev/omap2/dss/display.c
56383+++ b/drivers/video/fbdev/omap2/dss/display.c
56384@@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
56385 if (dssdev->name == NULL)
56386 dssdev->name = dssdev->alias;
56387
56388+ pax_open_kernel();
56389 if (drv && drv->get_resolution == NULL)
56390- drv->get_resolution = omapdss_default_get_resolution;
56391+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
56392 if (drv && drv->get_recommended_bpp == NULL)
56393- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
56394+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
56395 if (drv && drv->get_timings == NULL)
56396- drv->get_timings = omapdss_default_get_timings;
56397+ *(void **)&drv->get_timings = omapdss_default_get_timings;
56398+ pax_close_kernel();
56399
56400 mutex_lock(&panel_list_mutex);
56401 list_add_tail(&dssdev->panel_list, &panel_list);
56402diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
56403index 83433cb..71e9b98 100644
56404--- a/drivers/video/fbdev/s1d13xxxfb.c
56405+++ b/drivers/video/fbdev/s1d13xxxfb.c
56406@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
56407
56408 switch(prod_id) {
56409 case S1D13506_PROD_ID: /* activate acceleration */
56410- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
56411- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
56412+ pax_open_kernel();
56413+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
56414+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
56415+ pax_close_kernel();
56416 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
56417 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
56418 break;
56419diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
56420index 2bcc84a..29dd1ea 100644
56421--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
56422+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
56423@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
56424 }
56425
56426 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
56427- lcdc_sys_write_index,
56428- lcdc_sys_write_data,
56429- lcdc_sys_read_data,
56430+ .write_index = lcdc_sys_write_index,
56431+ .write_data = lcdc_sys_write_data,
56432+ .read_data = lcdc_sys_read_data,
56433 };
56434
56435 static int sh_mobile_lcdc_sginit(struct fb_info *info,
56436diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
56437index d513ed6..90b0de9 100644
56438--- a/drivers/video/fbdev/smscufx.c
56439+++ b/drivers/video/fbdev/smscufx.c
56440@@ -1175,7 +1175,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
56441 fb_deferred_io_cleanup(info);
56442 kfree(info->fbdefio);
56443 info->fbdefio = NULL;
56444- info->fbops->fb_mmap = ufx_ops_mmap;
56445+ pax_open_kernel();
56446+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
56447+ pax_close_kernel();
56448 }
56449
56450 pr_debug("released /dev/fb%d user=%d count=%d",
56451diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
56452index 77b890e..458e666 100644
56453--- a/drivers/video/fbdev/udlfb.c
56454+++ b/drivers/video/fbdev/udlfb.c
56455@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
56456 dlfb_urb_completion(urb);
56457
56458 error:
56459- atomic_add(bytes_sent, &dev->bytes_sent);
56460- atomic_add(bytes_identical, &dev->bytes_identical);
56461- atomic_add(width*height*2, &dev->bytes_rendered);
56462+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56463+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56464+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
56465 end_cycles = get_cycles();
56466- atomic_add(((unsigned int) ((end_cycles - start_cycles)
56467+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56468 >> 10)), /* Kcycles */
56469 &dev->cpu_kcycles_used);
56470
56471@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
56472 dlfb_urb_completion(urb);
56473
56474 error:
56475- atomic_add(bytes_sent, &dev->bytes_sent);
56476- atomic_add(bytes_identical, &dev->bytes_identical);
56477- atomic_add(bytes_rendered, &dev->bytes_rendered);
56478+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56479+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56480+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
56481 end_cycles = get_cycles();
56482- atomic_add(((unsigned int) ((end_cycles - start_cycles)
56483+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56484 >> 10)), /* Kcycles */
56485 &dev->cpu_kcycles_used);
56486 }
56487@@ -993,7 +993,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
56488 fb_deferred_io_cleanup(info);
56489 kfree(info->fbdefio);
56490 info->fbdefio = NULL;
56491- info->fbops->fb_mmap = dlfb_ops_mmap;
56492+ pax_open_kernel();
56493+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
56494+ pax_close_kernel();
56495 }
56496
56497 pr_warn("released /dev/fb%d user=%d count=%d\n",
56498@@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
56499 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56500 struct dlfb_data *dev = fb_info->par;
56501 return snprintf(buf, PAGE_SIZE, "%u\n",
56502- atomic_read(&dev->bytes_rendered));
56503+ atomic_read_unchecked(&dev->bytes_rendered));
56504 }
56505
56506 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56507@@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56508 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56509 struct dlfb_data *dev = fb_info->par;
56510 return snprintf(buf, PAGE_SIZE, "%u\n",
56511- atomic_read(&dev->bytes_identical));
56512+ atomic_read_unchecked(&dev->bytes_identical));
56513 }
56514
56515 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56516@@ -1392,7 +1394,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56517 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56518 struct dlfb_data *dev = fb_info->par;
56519 return snprintf(buf, PAGE_SIZE, "%u\n",
56520- atomic_read(&dev->bytes_sent));
56521+ atomic_read_unchecked(&dev->bytes_sent));
56522 }
56523
56524 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56525@@ -1400,7 +1402,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56526 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56527 struct dlfb_data *dev = fb_info->par;
56528 return snprintf(buf, PAGE_SIZE, "%u\n",
56529- atomic_read(&dev->cpu_kcycles_used));
56530+ atomic_read_unchecked(&dev->cpu_kcycles_used));
56531 }
56532
56533 static ssize_t edid_show(
56534@@ -1460,10 +1462,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
56535 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56536 struct dlfb_data *dev = fb_info->par;
56537
56538- atomic_set(&dev->bytes_rendered, 0);
56539- atomic_set(&dev->bytes_identical, 0);
56540- atomic_set(&dev->bytes_sent, 0);
56541- atomic_set(&dev->cpu_kcycles_used, 0);
56542+ atomic_set_unchecked(&dev->bytes_rendered, 0);
56543+ atomic_set_unchecked(&dev->bytes_identical, 0);
56544+ atomic_set_unchecked(&dev->bytes_sent, 0);
56545+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
56546
56547 return count;
56548 }
56549diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
56550index 509d452..7c9d2de 100644
56551--- a/drivers/video/fbdev/uvesafb.c
56552+++ b/drivers/video/fbdev/uvesafb.c
56553@@ -19,6 +19,7 @@
56554 #include <linux/io.h>
56555 #include <linux/mutex.h>
56556 #include <linux/slab.h>
56557+#include <linux/moduleloader.h>
56558 #include <video/edid.h>
56559 #include <video/uvesafb.h>
56560 #ifdef CONFIG_X86
56561@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
56562 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
56563 par->pmi_setpal = par->ypan = 0;
56564 } else {
56565+
56566+#ifdef CONFIG_PAX_KERNEXEC
56567+#ifdef CONFIG_MODULES
56568+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
56569+#endif
56570+ if (!par->pmi_code) {
56571+ par->pmi_setpal = par->ypan = 0;
56572+ return 0;
56573+ }
56574+#endif
56575+
56576 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
56577 + task->t.regs.edi);
56578+
56579+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56580+ pax_open_kernel();
56581+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
56582+ pax_close_kernel();
56583+
56584+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
56585+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
56586+#else
56587 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
56588 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
56589+#endif
56590+
56591 printk(KERN_INFO "uvesafb: protected mode interface info at "
56592 "%04x:%04x\n",
56593 (u16)task->t.regs.es, (u16)task->t.regs.edi);
56594@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
56595 par->ypan = ypan;
56596
56597 if (par->pmi_setpal || par->ypan) {
56598+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
56599 if (__supported_pte_mask & _PAGE_NX) {
56600 par->pmi_setpal = par->ypan = 0;
56601 printk(KERN_WARNING "uvesafb: NX protection is active, "
56602 "better not use the PMI.\n");
56603- } else {
56604+ } else
56605+#endif
56606 uvesafb_vbe_getpmi(task, par);
56607- }
56608 }
56609 #else
56610 /* The protected mode interface is not available on non-x86. */
56611@@ -1453,8 +1477,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56612 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
56613
56614 /* Disable blanking if the user requested so. */
56615- if (!blank)
56616- info->fbops->fb_blank = NULL;
56617+ if (!blank) {
56618+ pax_open_kernel();
56619+ *(void **)&info->fbops->fb_blank = NULL;
56620+ pax_close_kernel();
56621+ }
56622
56623 /*
56624 * Find out how much IO memory is required for the mode with
56625@@ -1525,8 +1552,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56626 info->flags = FBINFO_FLAG_DEFAULT |
56627 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
56628
56629- if (!par->ypan)
56630- info->fbops->fb_pan_display = NULL;
56631+ if (!par->ypan) {
56632+ pax_open_kernel();
56633+ *(void **)&info->fbops->fb_pan_display = NULL;
56634+ pax_close_kernel();
56635+ }
56636 }
56637
56638 static void uvesafb_init_mtrr(struct fb_info *info)
56639@@ -1787,6 +1817,11 @@ out_mode:
56640 out:
56641 kfree(par->vbe_modes);
56642
56643+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56644+ if (par->pmi_code)
56645+ module_free_exec(NULL, par->pmi_code);
56646+#endif
56647+
56648 framebuffer_release(info);
56649 return err;
56650 }
56651@@ -1811,6 +1846,11 @@ static int uvesafb_remove(struct platform_device *dev)
56652 kfree(par->vbe_state_orig);
56653 kfree(par->vbe_state_saved);
56654
56655+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56656+ if (par->pmi_code)
56657+ module_free_exec(NULL, par->pmi_code);
56658+#endif
56659+
56660 framebuffer_release(info);
56661 }
56662 return 0;
56663diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
56664index 6170e7f..dd63031 100644
56665--- a/drivers/video/fbdev/vesafb.c
56666+++ b/drivers/video/fbdev/vesafb.c
56667@@ -9,6 +9,7 @@
56668 */
56669
56670 #include <linux/module.h>
56671+#include <linux/moduleloader.h>
56672 #include <linux/kernel.h>
56673 #include <linux/errno.h>
56674 #include <linux/string.h>
56675@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
56676 static int vram_total; /* Set total amount of memory */
56677 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
56678 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
56679-static void (*pmi_start)(void) __read_mostly;
56680-static void (*pmi_pal) (void) __read_mostly;
56681+static void (*pmi_start)(void) __read_only;
56682+static void (*pmi_pal) (void) __read_only;
56683 static int depth __read_mostly;
56684 static int vga_compat __read_mostly;
56685 /* --------------------------------------------------------------------- */
56686@@ -233,6 +234,7 @@ static int vesafb_probe(struct platform_device *dev)
56687 unsigned int size_remap;
56688 unsigned int size_total;
56689 char *option = NULL;
56690+ void *pmi_code = NULL;
56691
56692 /* ignore error return of fb_get_options */
56693 fb_get_options("vesafb", &option);
56694@@ -279,10 +281,6 @@ static int vesafb_probe(struct platform_device *dev)
56695 size_remap = size_total;
56696 vesafb_fix.smem_len = size_remap;
56697
56698-#ifndef __i386__
56699- screen_info.vesapm_seg = 0;
56700-#endif
56701-
56702 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
56703 printk(KERN_WARNING
56704 "vesafb: cannot reserve video memory at 0x%lx\n",
56705@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
56706 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
56707 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
56708
56709+#ifdef __i386__
56710+
56711+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56712+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
56713+ if (!pmi_code)
56714+#elif !defined(CONFIG_PAX_KERNEXEC)
56715+ if (0)
56716+#endif
56717+
56718+#endif
56719+ screen_info.vesapm_seg = 0;
56720+
56721 if (screen_info.vesapm_seg) {
56722- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
56723- screen_info.vesapm_seg,screen_info.vesapm_off);
56724+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
56725+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
56726 }
56727
56728 if (screen_info.vesapm_seg < 0xc000)
56729@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
56730
56731 if (ypan || pmi_setpal) {
56732 unsigned short *pmi_base;
56733+
56734 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
56735- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
56736- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
56737+
56738+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56739+ pax_open_kernel();
56740+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
56741+#else
56742+ pmi_code = pmi_base;
56743+#endif
56744+
56745+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
56746+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
56747+
56748+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56749+ pmi_start = ktva_ktla(pmi_start);
56750+ pmi_pal = ktva_ktla(pmi_pal);
56751+ pax_close_kernel();
56752+#endif
56753+
56754 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
56755 if (pmi_base[3]) {
56756 printk(KERN_INFO "vesafb: pmi: ports = ");
56757@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
56758 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
56759 (ypan ? FBINFO_HWACCEL_YPAN : 0);
56760
56761- if (!ypan)
56762- info->fbops->fb_pan_display = NULL;
56763+ if (!ypan) {
56764+ pax_open_kernel();
56765+ *(void **)&info->fbops->fb_pan_display = NULL;
56766+ pax_close_kernel();
56767+ }
56768
56769 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
56770 err = -ENOMEM;
56771@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
56772 fb_info(info, "%s frame buffer device\n", info->fix.id);
56773 return 0;
56774 err:
56775+
56776+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56777+ module_free_exec(NULL, pmi_code);
56778+#endif
56779+
56780 if (info->screen_base)
56781 iounmap(info->screen_base);
56782 framebuffer_release(info);
56783diff --git a/drivers/video/fbdev/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
56784index 88714ae..16c2e11 100644
56785--- a/drivers/video/fbdev/via/via_clock.h
56786+++ b/drivers/video/fbdev/via/via_clock.h
56787@@ -56,7 +56,7 @@ struct via_clock {
56788
56789 void (*set_engine_pll_state)(u8 state);
56790 void (*set_engine_pll)(struct via_pll_config config);
56791-};
56792+} __no_const;
56793
56794
56795 static inline u32 get_pll_internal_frequency(u32 ref_freq,
56796diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
56797index 3c14e43..2630570 100644
56798--- a/drivers/video/logo/logo_linux_clut224.ppm
56799+++ b/drivers/video/logo/logo_linux_clut224.ppm
56800@@ -2,1603 +2,1123 @@ P3
56801 # Standard 224-color Linux logo
56802 80 80
56803 255
56804- 0 0 0 0 0 0 0 0 0 0 0 0
56805- 0 0 0 0 0 0 0 0 0 0 0 0
56806- 0 0 0 0 0 0 0 0 0 0 0 0
56807- 0 0 0 0 0 0 0 0 0 0 0 0
56808- 0 0 0 0 0 0 0 0 0 0 0 0
56809- 0 0 0 0 0 0 0 0 0 0 0 0
56810- 0 0 0 0 0 0 0 0 0 0 0 0
56811- 0 0 0 0 0 0 0 0 0 0 0 0
56812- 0 0 0 0 0 0 0 0 0 0 0 0
56813- 6 6 6 6 6 6 10 10 10 10 10 10
56814- 10 10 10 6 6 6 6 6 6 6 6 6
56815- 0 0 0 0 0 0 0 0 0 0 0 0
56816- 0 0 0 0 0 0 0 0 0 0 0 0
56817- 0 0 0 0 0 0 0 0 0 0 0 0
56818- 0 0 0 0 0 0 0 0 0 0 0 0
56819- 0 0 0 0 0 0 0 0 0 0 0 0
56820- 0 0 0 0 0 0 0 0 0 0 0 0
56821- 0 0 0 0 0 0 0 0 0 0 0 0
56822- 0 0 0 0 0 0 0 0 0 0 0 0
56823- 0 0 0 0 0 0 0 0 0 0 0 0
56824- 0 0 0 0 0 0 0 0 0 0 0 0
56825- 0 0 0 0 0 0 0 0 0 0 0 0
56826- 0 0 0 0 0 0 0 0 0 0 0 0
56827- 0 0 0 0 0 0 0 0 0 0 0 0
56828- 0 0 0 0 0 0 0 0 0 0 0 0
56829- 0 0 0 0 0 0 0 0 0 0 0 0
56830- 0 0 0 0 0 0 0 0 0 0 0 0
56831- 0 0 0 0 0 0 0 0 0 0 0 0
56832- 0 0 0 6 6 6 10 10 10 14 14 14
56833- 22 22 22 26 26 26 30 30 30 34 34 34
56834- 30 30 30 30 30 30 26 26 26 18 18 18
56835- 14 14 14 10 10 10 6 6 6 0 0 0
56836- 0 0 0 0 0 0 0 0 0 0 0 0
56837- 0 0 0 0 0 0 0 0 0 0 0 0
56838- 0 0 0 0 0 0 0 0 0 0 0 0
56839- 0 0 0 0 0 0 0 0 0 0 0 0
56840- 0 0 0 0 0 0 0 0 0 0 0 0
56841- 0 0 0 0 0 0 0 0 0 0 0 0
56842- 0 0 0 0 0 0 0 0 0 0 0 0
56843- 0 0 0 0 0 0 0 0 0 0 0 0
56844- 0 0 0 0 0 0 0 0 0 0 0 0
56845- 0 0 0 0 0 1 0 0 1 0 0 0
56846- 0 0 0 0 0 0 0 0 0 0 0 0
56847- 0 0 0 0 0 0 0 0 0 0 0 0
56848- 0 0 0 0 0 0 0 0 0 0 0 0
56849- 0 0 0 0 0 0 0 0 0 0 0 0
56850- 0 0 0 0 0 0 0 0 0 0 0 0
56851- 0 0 0 0 0 0 0 0 0 0 0 0
56852- 6 6 6 14 14 14 26 26 26 42 42 42
56853- 54 54 54 66 66 66 78 78 78 78 78 78
56854- 78 78 78 74 74 74 66 66 66 54 54 54
56855- 42 42 42 26 26 26 18 18 18 10 10 10
56856- 6 6 6 0 0 0 0 0 0 0 0 0
56857- 0 0 0 0 0 0 0 0 0 0 0 0
56858- 0 0 0 0 0 0 0 0 0 0 0 0
56859- 0 0 0 0 0 0 0 0 0 0 0 0
56860- 0 0 0 0 0 0 0 0 0 0 0 0
56861- 0 0 0 0 0 0 0 0 0 0 0 0
56862- 0 0 0 0 0 0 0 0 0 0 0 0
56863- 0 0 0 0 0 0 0 0 0 0 0 0
56864- 0 0 0 0 0 0 0 0 0 0 0 0
56865- 0 0 1 0 0 0 0 0 0 0 0 0
56866- 0 0 0 0 0 0 0 0 0 0 0 0
56867- 0 0 0 0 0 0 0 0 0 0 0 0
56868- 0 0 0 0 0 0 0 0 0 0 0 0
56869- 0 0 0 0 0 0 0 0 0 0 0 0
56870- 0 0 0 0 0 0 0 0 0 0 0 0
56871- 0 0 0 0 0 0 0 0 0 10 10 10
56872- 22 22 22 42 42 42 66 66 66 86 86 86
56873- 66 66 66 38 38 38 38 38 38 22 22 22
56874- 26 26 26 34 34 34 54 54 54 66 66 66
56875- 86 86 86 70 70 70 46 46 46 26 26 26
56876- 14 14 14 6 6 6 0 0 0 0 0 0
56877- 0 0 0 0 0 0 0 0 0 0 0 0
56878- 0 0 0 0 0 0 0 0 0 0 0 0
56879- 0 0 0 0 0 0 0 0 0 0 0 0
56880- 0 0 0 0 0 0 0 0 0 0 0 0
56881- 0 0 0 0 0 0 0 0 0 0 0 0
56882- 0 0 0 0 0 0 0 0 0 0 0 0
56883- 0 0 0 0 0 0 0 0 0 0 0 0
56884- 0 0 0 0 0 0 0 0 0 0 0 0
56885- 0 0 1 0 0 1 0 0 1 0 0 0
56886- 0 0 0 0 0 0 0 0 0 0 0 0
56887- 0 0 0 0 0 0 0 0 0 0 0 0
56888- 0 0 0 0 0 0 0 0 0 0 0 0
56889- 0 0 0 0 0 0 0 0 0 0 0 0
56890- 0 0 0 0 0 0 0 0 0 0 0 0
56891- 0 0 0 0 0 0 10 10 10 26 26 26
56892- 50 50 50 82 82 82 58 58 58 6 6 6
56893- 2 2 6 2 2 6 2 2 6 2 2 6
56894- 2 2 6 2 2 6 2 2 6 2 2 6
56895- 6 6 6 54 54 54 86 86 86 66 66 66
56896- 38 38 38 18 18 18 6 6 6 0 0 0
56897- 0 0 0 0 0 0 0 0 0 0 0 0
56898- 0 0 0 0 0 0 0 0 0 0 0 0
56899- 0 0 0 0 0 0 0 0 0 0 0 0
56900- 0 0 0 0 0 0 0 0 0 0 0 0
56901- 0 0 0 0 0 0 0 0 0 0 0 0
56902- 0 0 0 0 0 0 0 0 0 0 0 0
56903- 0 0 0 0 0 0 0 0 0 0 0 0
56904- 0 0 0 0 0 0 0 0 0 0 0 0
56905- 0 0 0 0 0 0 0 0 0 0 0 0
56906- 0 0 0 0 0 0 0 0 0 0 0 0
56907- 0 0 0 0 0 0 0 0 0 0 0 0
56908- 0 0 0 0 0 0 0 0 0 0 0 0
56909- 0 0 0 0 0 0 0 0 0 0 0 0
56910- 0 0 0 0 0 0 0 0 0 0 0 0
56911- 0 0 0 6 6 6 22 22 22 50 50 50
56912- 78 78 78 34 34 34 2 2 6 2 2 6
56913- 2 2 6 2 2 6 2 2 6 2 2 6
56914- 2 2 6 2 2 6 2 2 6 2 2 6
56915- 2 2 6 2 2 6 6 6 6 70 70 70
56916- 78 78 78 46 46 46 22 22 22 6 6 6
56917- 0 0 0 0 0 0 0 0 0 0 0 0
56918- 0 0 0 0 0 0 0 0 0 0 0 0
56919- 0 0 0 0 0 0 0 0 0 0 0 0
56920- 0 0 0 0 0 0 0 0 0 0 0 0
56921- 0 0 0 0 0 0 0 0 0 0 0 0
56922- 0 0 0 0 0 0 0 0 0 0 0 0
56923- 0 0 0 0 0 0 0 0 0 0 0 0
56924- 0 0 0 0 0 0 0 0 0 0 0 0
56925- 0 0 1 0 0 1 0 0 1 0 0 0
56926- 0 0 0 0 0 0 0 0 0 0 0 0
56927- 0 0 0 0 0 0 0 0 0 0 0 0
56928- 0 0 0 0 0 0 0 0 0 0 0 0
56929- 0 0 0 0 0 0 0 0 0 0 0 0
56930- 0 0 0 0 0 0 0 0 0 0 0 0
56931- 6 6 6 18 18 18 42 42 42 82 82 82
56932- 26 26 26 2 2 6 2 2 6 2 2 6
56933- 2 2 6 2 2 6 2 2 6 2 2 6
56934- 2 2 6 2 2 6 2 2 6 14 14 14
56935- 46 46 46 34 34 34 6 6 6 2 2 6
56936- 42 42 42 78 78 78 42 42 42 18 18 18
56937- 6 6 6 0 0 0 0 0 0 0 0 0
56938- 0 0 0 0 0 0 0 0 0 0 0 0
56939- 0 0 0 0 0 0 0 0 0 0 0 0
56940- 0 0 0 0 0 0 0 0 0 0 0 0
56941- 0 0 0 0 0 0 0 0 0 0 0 0
56942- 0 0 0 0 0 0 0 0 0 0 0 0
56943- 0 0 0 0 0 0 0 0 0 0 0 0
56944- 0 0 0 0 0 0 0 0 0 0 0 0
56945- 0 0 1 0 0 0 0 0 1 0 0 0
56946- 0 0 0 0 0 0 0 0 0 0 0 0
56947- 0 0 0 0 0 0 0 0 0 0 0 0
56948- 0 0 0 0 0 0 0 0 0 0 0 0
56949- 0 0 0 0 0 0 0 0 0 0 0 0
56950- 0 0 0 0 0 0 0 0 0 0 0 0
56951- 10 10 10 30 30 30 66 66 66 58 58 58
56952- 2 2 6 2 2 6 2 2 6 2 2 6
56953- 2 2 6 2 2 6 2 2 6 2 2 6
56954- 2 2 6 2 2 6 2 2 6 26 26 26
56955- 86 86 86 101 101 101 46 46 46 10 10 10
56956- 2 2 6 58 58 58 70 70 70 34 34 34
56957- 10 10 10 0 0 0 0 0 0 0 0 0
56958- 0 0 0 0 0 0 0 0 0 0 0 0
56959- 0 0 0 0 0 0 0 0 0 0 0 0
56960- 0 0 0 0 0 0 0 0 0 0 0 0
56961- 0 0 0 0 0 0 0 0 0 0 0 0
56962- 0 0 0 0 0 0 0 0 0 0 0 0
56963- 0 0 0 0 0 0 0 0 0 0 0 0
56964- 0 0 0 0 0 0 0 0 0 0 0 0
56965- 0 0 1 0 0 1 0 0 1 0 0 0
56966- 0 0 0 0 0 0 0 0 0 0 0 0
56967- 0 0 0 0 0 0 0 0 0 0 0 0
56968- 0 0 0 0 0 0 0 0 0 0 0 0
56969- 0 0 0 0 0 0 0 0 0 0 0 0
56970- 0 0 0 0 0 0 0 0 0 0 0 0
56971- 14 14 14 42 42 42 86 86 86 10 10 10
56972- 2 2 6 2 2 6 2 2 6 2 2 6
56973- 2 2 6 2 2 6 2 2 6 2 2 6
56974- 2 2 6 2 2 6 2 2 6 30 30 30
56975- 94 94 94 94 94 94 58 58 58 26 26 26
56976- 2 2 6 6 6 6 78 78 78 54 54 54
56977- 22 22 22 6 6 6 0 0 0 0 0 0
56978- 0 0 0 0 0 0 0 0 0 0 0 0
56979- 0 0 0 0 0 0 0 0 0 0 0 0
56980- 0 0 0 0 0 0 0 0 0 0 0 0
56981- 0 0 0 0 0 0 0 0 0 0 0 0
56982- 0 0 0 0 0 0 0 0 0 0 0 0
56983- 0 0 0 0 0 0 0 0 0 0 0 0
56984- 0 0 0 0 0 0 0 0 0 0 0 0
56985- 0 0 0 0 0 0 0 0 0 0 0 0
56986- 0 0 0 0 0 0 0 0 0 0 0 0
56987- 0 0 0 0 0 0 0 0 0 0 0 0
56988- 0 0 0 0 0 0 0 0 0 0 0 0
56989- 0 0 0 0 0 0 0 0 0 0 0 0
56990- 0 0 0 0 0 0 0 0 0 6 6 6
56991- 22 22 22 62 62 62 62 62 62 2 2 6
56992- 2 2 6 2 2 6 2 2 6 2 2 6
56993- 2 2 6 2 2 6 2 2 6 2 2 6
56994- 2 2 6 2 2 6 2 2 6 26 26 26
56995- 54 54 54 38 38 38 18 18 18 10 10 10
56996- 2 2 6 2 2 6 34 34 34 82 82 82
56997- 38 38 38 14 14 14 0 0 0 0 0 0
56998- 0 0 0 0 0 0 0 0 0 0 0 0
56999- 0 0 0 0 0 0 0 0 0 0 0 0
57000- 0 0 0 0 0 0 0 0 0 0 0 0
57001- 0 0 0 0 0 0 0 0 0 0 0 0
57002- 0 0 0 0 0 0 0 0 0 0 0 0
57003- 0 0 0 0 0 0 0 0 0 0 0 0
57004- 0 0 0 0 0 0 0 0 0 0 0 0
57005- 0 0 0 0 0 1 0 0 1 0 0 0
57006- 0 0 0 0 0 0 0 0 0 0 0 0
57007- 0 0 0 0 0 0 0 0 0 0 0 0
57008- 0 0 0 0 0 0 0 0 0 0 0 0
57009- 0 0 0 0 0 0 0 0 0 0 0 0
57010- 0 0 0 0 0 0 0 0 0 6 6 6
57011- 30 30 30 78 78 78 30 30 30 2 2 6
57012- 2 2 6 2 2 6 2 2 6 2 2 6
57013- 2 2 6 2 2 6 2 2 6 2 2 6
57014- 2 2 6 2 2 6 2 2 6 10 10 10
57015- 10 10 10 2 2 6 2 2 6 2 2 6
57016- 2 2 6 2 2 6 2 2 6 78 78 78
57017- 50 50 50 18 18 18 6 6 6 0 0 0
57018- 0 0 0 0 0 0 0 0 0 0 0 0
57019- 0 0 0 0 0 0 0 0 0 0 0 0
57020- 0 0 0 0 0 0 0 0 0 0 0 0
57021- 0 0 0 0 0 0 0 0 0 0 0 0
57022- 0 0 0 0 0 0 0 0 0 0 0 0
57023- 0 0 0 0 0 0 0 0 0 0 0 0
57024- 0 0 0 0 0 0 0 0 0 0 0 0
57025- 0 0 1 0 0 0 0 0 0 0 0 0
57026- 0 0 0 0 0 0 0 0 0 0 0 0
57027- 0 0 0 0 0 0 0 0 0 0 0 0
57028- 0 0 0 0 0 0 0 0 0 0 0 0
57029- 0 0 0 0 0 0 0 0 0 0 0 0
57030- 0 0 0 0 0 0 0 0 0 10 10 10
57031- 38 38 38 86 86 86 14 14 14 2 2 6
57032- 2 2 6 2 2 6 2 2 6 2 2 6
57033- 2 2 6 2 2 6 2 2 6 2 2 6
57034- 2 2 6 2 2 6 2 2 6 2 2 6
57035- 2 2 6 2 2 6 2 2 6 2 2 6
57036- 2 2 6 2 2 6 2 2 6 54 54 54
57037- 66 66 66 26 26 26 6 6 6 0 0 0
57038- 0 0 0 0 0 0 0 0 0 0 0 0
57039- 0 0 0 0 0 0 0 0 0 0 0 0
57040- 0 0 0 0 0 0 0 0 0 0 0 0
57041- 0 0 0 0 0 0 0 0 0 0 0 0
57042- 0 0 0 0 0 0 0 0 0 0 0 0
57043- 0 0 0 0 0 0 0 0 0 0 0 0
57044- 0 0 0 0 0 0 0 0 0 0 0 0
57045- 0 0 0 0 0 1 0 0 1 0 0 0
57046- 0 0 0 0 0 0 0 0 0 0 0 0
57047- 0 0 0 0 0 0 0 0 0 0 0 0
57048- 0 0 0 0 0 0 0 0 0 0 0 0
57049- 0 0 0 0 0 0 0 0 0 0 0 0
57050- 0 0 0 0 0 0 0 0 0 14 14 14
57051- 42 42 42 82 82 82 2 2 6 2 2 6
57052- 2 2 6 6 6 6 10 10 10 2 2 6
57053- 2 2 6 2 2 6 2 2 6 2 2 6
57054- 2 2 6 2 2 6 2 2 6 6 6 6
57055- 14 14 14 10 10 10 2 2 6 2 2 6
57056- 2 2 6 2 2 6 2 2 6 18 18 18
57057- 82 82 82 34 34 34 10 10 10 0 0 0
57058- 0 0 0 0 0 0 0 0 0 0 0 0
57059- 0 0 0 0 0 0 0 0 0 0 0 0
57060- 0 0 0 0 0 0 0 0 0 0 0 0
57061- 0 0 0 0 0 0 0 0 0 0 0 0
57062- 0 0 0 0 0 0 0 0 0 0 0 0
57063- 0 0 0 0 0 0 0 0 0 0 0 0
57064- 0 0 0 0 0 0 0 0 0 0 0 0
57065- 0 0 1 0 0 0 0 0 0 0 0 0
57066- 0 0 0 0 0 0 0 0 0 0 0 0
57067- 0 0 0 0 0 0 0 0 0 0 0 0
57068- 0 0 0 0 0 0 0 0 0 0 0 0
57069- 0 0 0 0 0 0 0 0 0 0 0 0
57070- 0 0 0 0 0 0 0 0 0 14 14 14
57071- 46 46 46 86 86 86 2 2 6 2 2 6
57072- 6 6 6 6 6 6 22 22 22 34 34 34
57073- 6 6 6 2 2 6 2 2 6 2 2 6
57074- 2 2 6 2 2 6 18 18 18 34 34 34
57075- 10 10 10 50 50 50 22 22 22 2 2 6
57076- 2 2 6 2 2 6 2 2 6 10 10 10
57077- 86 86 86 42 42 42 14 14 14 0 0 0
57078- 0 0 0 0 0 0 0 0 0 0 0 0
57079- 0 0 0 0 0 0 0 0 0 0 0 0
57080- 0 0 0 0 0 0 0 0 0 0 0 0
57081- 0 0 0 0 0 0 0 0 0 0 0 0
57082- 0 0 0 0 0 0 0 0 0 0 0 0
57083- 0 0 0 0 0 0 0 0 0 0 0 0
57084- 0 0 0 0 0 0 0 0 0 0 0 0
57085- 0 0 1 0 0 1 0 0 1 0 0 0
57086- 0 0 0 0 0 0 0 0 0 0 0 0
57087- 0 0 0 0 0 0 0 0 0 0 0 0
57088- 0 0 0 0 0 0 0 0 0 0 0 0
57089- 0 0 0 0 0 0 0 0 0 0 0 0
57090- 0 0 0 0 0 0 0 0 0 14 14 14
57091- 46 46 46 86 86 86 2 2 6 2 2 6
57092- 38 38 38 116 116 116 94 94 94 22 22 22
57093- 22 22 22 2 2 6 2 2 6 2 2 6
57094- 14 14 14 86 86 86 138 138 138 162 162 162
57095-154 154 154 38 38 38 26 26 26 6 6 6
57096- 2 2 6 2 2 6 2 2 6 2 2 6
57097- 86 86 86 46 46 46 14 14 14 0 0 0
57098- 0 0 0 0 0 0 0 0 0 0 0 0
57099- 0 0 0 0 0 0 0 0 0 0 0 0
57100- 0 0 0 0 0 0 0 0 0 0 0 0
57101- 0 0 0 0 0 0 0 0 0 0 0 0
57102- 0 0 0 0 0 0 0 0 0 0 0 0
57103- 0 0 0 0 0 0 0 0 0 0 0 0
57104- 0 0 0 0 0 0 0 0 0 0 0 0
57105- 0 0 0 0 0 0 0 0 0 0 0 0
57106- 0 0 0 0 0 0 0 0 0 0 0 0
57107- 0 0 0 0 0 0 0 0 0 0 0 0
57108- 0 0 0 0 0 0 0 0 0 0 0 0
57109- 0 0 0 0 0 0 0 0 0 0 0 0
57110- 0 0 0 0 0 0 0 0 0 14 14 14
57111- 46 46 46 86 86 86 2 2 6 14 14 14
57112-134 134 134 198 198 198 195 195 195 116 116 116
57113- 10 10 10 2 2 6 2 2 6 6 6 6
57114-101 98 89 187 187 187 210 210 210 218 218 218
57115-214 214 214 134 134 134 14 14 14 6 6 6
57116- 2 2 6 2 2 6 2 2 6 2 2 6
57117- 86 86 86 50 50 50 18 18 18 6 6 6
57118- 0 0 0 0 0 0 0 0 0 0 0 0
57119- 0 0 0 0 0 0 0 0 0 0 0 0
57120- 0 0 0 0 0 0 0 0 0 0 0 0
57121- 0 0 0 0 0 0 0 0 0 0 0 0
57122- 0 0 0 0 0 0 0 0 0 0 0 0
57123- 0 0 0 0 0 0 0 0 0 0 0 0
57124- 0 0 0 0 0 0 0 0 1 0 0 0
57125- 0 0 1 0 0 1 0 0 1 0 0 0
57126- 0 0 0 0 0 0 0 0 0 0 0 0
57127- 0 0 0 0 0 0 0 0 0 0 0 0
57128- 0 0 0 0 0 0 0 0 0 0 0 0
57129- 0 0 0 0 0 0 0 0 0 0 0 0
57130- 0 0 0 0 0 0 0 0 0 14 14 14
57131- 46 46 46 86 86 86 2 2 6 54 54 54
57132-218 218 218 195 195 195 226 226 226 246 246 246
57133- 58 58 58 2 2 6 2 2 6 30 30 30
57134-210 210 210 253 253 253 174 174 174 123 123 123
57135-221 221 221 234 234 234 74 74 74 2 2 6
57136- 2 2 6 2 2 6 2 2 6 2 2 6
57137- 70 70 70 58 58 58 22 22 22 6 6 6
57138- 0 0 0 0 0 0 0 0 0 0 0 0
57139- 0 0 0 0 0 0 0 0 0 0 0 0
57140- 0 0 0 0 0 0 0 0 0 0 0 0
57141- 0 0 0 0 0 0 0 0 0 0 0 0
57142- 0 0 0 0 0 0 0 0 0 0 0 0
57143- 0 0 0 0 0 0 0 0 0 0 0 0
57144- 0 0 0 0 0 0 0 0 0 0 0 0
57145- 0 0 0 0 0 0 0 0 0 0 0 0
57146- 0 0 0 0 0 0 0 0 0 0 0 0
57147- 0 0 0 0 0 0 0 0 0 0 0 0
57148- 0 0 0 0 0 0 0 0 0 0 0 0
57149- 0 0 0 0 0 0 0 0 0 0 0 0
57150- 0 0 0 0 0 0 0 0 0 14 14 14
57151- 46 46 46 82 82 82 2 2 6 106 106 106
57152-170 170 170 26 26 26 86 86 86 226 226 226
57153-123 123 123 10 10 10 14 14 14 46 46 46
57154-231 231 231 190 190 190 6 6 6 70 70 70
57155- 90 90 90 238 238 238 158 158 158 2 2 6
57156- 2 2 6 2 2 6 2 2 6 2 2 6
57157- 70 70 70 58 58 58 22 22 22 6 6 6
57158- 0 0 0 0 0 0 0 0 0 0 0 0
57159- 0 0 0 0 0 0 0 0 0 0 0 0
57160- 0 0 0 0 0 0 0 0 0 0 0 0
57161- 0 0 0 0 0 0 0 0 0 0 0 0
57162- 0 0 0 0 0 0 0 0 0 0 0 0
57163- 0 0 0 0 0 0 0 0 0 0 0 0
57164- 0 0 0 0 0 0 0 0 1 0 0 0
57165- 0 0 1 0 0 1 0 0 1 0 0 0
57166- 0 0 0 0 0 0 0 0 0 0 0 0
57167- 0 0 0 0 0 0 0 0 0 0 0 0
57168- 0 0 0 0 0 0 0 0 0 0 0 0
57169- 0 0 0 0 0 0 0 0 0 0 0 0
57170- 0 0 0 0 0 0 0 0 0 14 14 14
57171- 42 42 42 86 86 86 6 6 6 116 116 116
57172-106 106 106 6 6 6 70 70 70 149 149 149
57173-128 128 128 18 18 18 38 38 38 54 54 54
57174-221 221 221 106 106 106 2 2 6 14 14 14
57175- 46 46 46 190 190 190 198 198 198 2 2 6
57176- 2 2 6 2 2 6 2 2 6 2 2 6
57177- 74 74 74 62 62 62 22 22 22 6 6 6
57178- 0 0 0 0 0 0 0 0 0 0 0 0
57179- 0 0 0 0 0 0 0 0 0 0 0 0
57180- 0 0 0 0 0 0 0 0 0 0 0 0
57181- 0 0 0 0 0 0 0 0 0 0 0 0
57182- 0 0 0 0 0 0 0 0 0 0 0 0
57183- 0 0 0 0 0 0 0 0 0 0 0 0
57184- 0 0 0 0 0 0 0 0 1 0 0 0
57185- 0 0 1 0 0 0 0 0 1 0 0 0
57186- 0 0 0 0 0 0 0 0 0 0 0 0
57187- 0 0 0 0 0 0 0 0 0 0 0 0
57188- 0 0 0 0 0 0 0 0 0 0 0 0
57189- 0 0 0 0 0 0 0 0 0 0 0 0
57190- 0 0 0 0 0 0 0 0 0 14 14 14
57191- 42 42 42 94 94 94 14 14 14 101 101 101
57192-128 128 128 2 2 6 18 18 18 116 116 116
57193-118 98 46 121 92 8 121 92 8 98 78 10
57194-162 162 162 106 106 106 2 2 6 2 2 6
57195- 2 2 6 195 195 195 195 195 195 6 6 6
57196- 2 2 6 2 2 6 2 2 6 2 2 6
57197- 74 74 74 62 62 62 22 22 22 6 6 6
57198- 0 0 0 0 0 0 0 0 0 0 0 0
57199- 0 0 0 0 0 0 0 0 0 0 0 0
57200- 0 0 0 0 0 0 0 0 0 0 0 0
57201- 0 0 0 0 0 0 0 0 0 0 0 0
57202- 0 0 0 0 0 0 0 0 0 0 0 0
57203- 0 0 0 0 0 0 0 0 0 0 0 0
57204- 0 0 0 0 0 0 0 0 1 0 0 1
57205- 0 0 1 0 0 0 0 0 1 0 0 0
57206- 0 0 0 0 0 0 0 0 0 0 0 0
57207- 0 0 0 0 0 0 0 0 0 0 0 0
57208- 0 0 0 0 0 0 0 0 0 0 0 0
57209- 0 0 0 0 0 0 0 0 0 0 0 0
57210- 0 0 0 0 0 0 0 0 0 10 10 10
57211- 38 38 38 90 90 90 14 14 14 58 58 58
57212-210 210 210 26 26 26 54 38 6 154 114 10
57213-226 170 11 236 186 11 225 175 15 184 144 12
57214-215 174 15 175 146 61 37 26 9 2 2 6
57215- 70 70 70 246 246 246 138 138 138 2 2 6
57216- 2 2 6 2 2 6 2 2 6 2 2 6
57217- 70 70 70 66 66 66 26 26 26 6 6 6
57218- 0 0 0 0 0 0 0 0 0 0 0 0
57219- 0 0 0 0 0 0 0 0 0 0 0 0
57220- 0 0 0 0 0 0 0 0 0 0 0 0
57221- 0 0 0 0 0 0 0 0 0 0 0 0
57222- 0 0 0 0 0 0 0 0 0 0 0 0
57223- 0 0 0 0 0 0 0 0 0 0 0 0
57224- 0 0 0 0 0 0 0 0 0 0 0 0
57225- 0 0 0 0 0 0 0 0 0 0 0 0
57226- 0 0 0 0 0 0 0 0 0 0 0 0
57227- 0 0 0 0 0 0 0 0 0 0 0 0
57228- 0 0 0 0 0 0 0 0 0 0 0 0
57229- 0 0 0 0 0 0 0 0 0 0 0 0
57230- 0 0 0 0 0 0 0 0 0 10 10 10
57231- 38 38 38 86 86 86 14 14 14 10 10 10
57232-195 195 195 188 164 115 192 133 9 225 175 15
57233-239 182 13 234 190 10 232 195 16 232 200 30
57234-245 207 45 241 208 19 232 195 16 184 144 12
57235-218 194 134 211 206 186 42 42 42 2 2 6
57236- 2 2 6 2 2 6 2 2 6 2 2 6
57237- 50 50 50 74 74 74 30 30 30 6 6 6
57238- 0 0 0 0 0 0 0 0 0 0 0 0
57239- 0 0 0 0 0 0 0 0 0 0 0 0
57240- 0 0 0 0 0 0 0 0 0 0 0 0
57241- 0 0 0 0 0 0 0 0 0 0 0 0
57242- 0 0 0 0 0 0 0 0 0 0 0 0
57243- 0 0 0 0 0 0 0 0 0 0 0 0
57244- 0 0 0 0 0 0 0 0 0 0 0 0
57245- 0 0 0 0 0 0 0 0 0 0 0 0
57246- 0 0 0 0 0 0 0 0 0 0 0 0
57247- 0 0 0 0 0 0 0 0 0 0 0 0
57248- 0 0 0 0 0 0 0 0 0 0 0 0
57249- 0 0 0 0 0 0 0 0 0 0 0 0
57250- 0 0 0 0 0 0 0 0 0 10 10 10
57251- 34 34 34 86 86 86 14 14 14 2 2 6
57252-121 87 25 192 133 9 219 162 10 239 182 13
57253-236 186 11 232 195 16 241 208 19 244 214 54
57254-246 218 60 246 218 38 246 215 20 241 208 19
57255-241 208 19 226 184 13 121 87 25 2 2 6
57256- 2 2 6 2 2 6 2 2 6 2 2 6
57257- 50 50 50 82 82 82 34 34 34 10 10 10
57258- 0 0 0 0 0 0 0 0 0 0 0 0
57259- 0 0 0 0 0 0 0 0 0 0 0 0
57260- 0 0 0 0 0 0 0 0 0 0 0 0
57261- 0 0 0 0 0 0 0 0 0 0 0 0
57262- 0 0 0 0 0 0 0 0 0 0 0 0
57263- 0 0 0 0 0 0 0 0 0 0 0 0
57264- 0 0 0 0 0 0 0 0 0 0 0 0
57265- 0 0 0 0 0 0 0 0 0 0 0 0
57266- 0 0 0 0 0 0 0 0 0 0 0 0
57267- 0 0 0 0 0 0 0 0 0 0 0 0
57268- 0 0 0 0 0 0 0 0 0 0 0 0
57269- 0 0 0 0 0 0 0 0 0 0 0 0
57270- 0 0 0 0 0 0 0 0 0 10 10 10
57271- 34 34 34 82 82 82 30 30 30 61 42 6
57272-180 123 7 206 145 10 230 174 11 239 182 13
57273-234 190 10 238 202 15 241 208 19 246 218 74
57274-246 218 38 246 215 20 246 215 20 246 215 20
57275-226 184 13 215 174 15 184 144 12 6 6 6
57276- 2 2 6 2 2 6 2 2 6 2 2 6
57277- 26 26 26 94 94 94 42 42 42 14 14 14
57278- 0 0 0 0 0 0 0 0 0 0 0 0
57279- 0 0 0 0 0 0 0 0 0 0 0 0
57280- 0 0 0 0 0 0 0 0 0 0 0 0
57281- 0 0 0 0 0 0 0 0 0 0 0 0
57282- 0 0 0 0 0 0 0 0 0 0 0 0
57283- 0 0 0 0 0 0 0 0 0 0 0 0
57284- 0 0 0 0 0 0 0 0 0 0 0 0
57285- 0 0 0 0 0 0 0 0 0 0 0 0
57286- 0 0 0 0 0 0 0 0 0 0 0 0
57287- 0 0 0 0 0 0 0 0 0 0 0 0
57288- 0 0 0 0 0 0 0 0 0 0 0 0
57289- 0 0 0 0 0 0 0 0 0 0 0 0
57290- 0 0 0 0 0 0 0 0 0 10 10 10
57291- 30 30 30 78 78 78 50 50 50 104 69 6
57292-192 133 9 216 158 10 236 178 12 236 186 11
57293-232 195 16 241 208 19 244 214 54 245 215 43
57294-246 215 20 246 215 20 241 208 19 198 155 10
57295-200 144 11 216 158 10 156 118 10 2 2 6
57296- 2 2 6 2 2 6 2 2 6 2 2 6
57297- 6 6 6 90 90 90 54 54 54 18 18 18
57298- 6 6 6 0 0 0 0 0 0 0 0 0
57299- 0 0 0 0 0 0 0 0 0 0 0 0
57300- 0 0 0 0 0 0 0 0 0 0 0 0
57301- 0 0 0 0 0 0 0 0 0 0 0 0
57302- 0 0 0 0 0 0 0 0 0 0 0 0
57303- 0 0 0 0 0 0 0 0 0 0 0 0
57304- 0 0 0 0 0 0 0 0 0 0 0 0
57305- 0 0 0 0 0 0 0 0 0 0 0 0
57306- 0 0 0 0 0 0 0 0 0 0 0 0
57307- 0 0 0 0 0 0 0 0 0 0 0 0
57308- 0 0 0 0 0 0 0 0 0 0 0 0
57309- 0 0 0 0 0 0 0 0 0 0 0 0
57310- 0 0 0 0 0 0 0 0 0 10 10 10
57311- 30 30 30 78 78 78 46 46 46 22 22 22
57312-137 92 6 210 162 10 239 182 13 238 190 10
57313-238 202 15 241 208 19 246 215 20 246 215 20
57314-241 208 19 203 166 17 185 133 11 210 150 10
57315-216 158 10 210 150 10 102 78 10 2 2 6
57316- 6 6 6 54 54 54 14 14 14 2 2 6
57317- 2 2 6 62 62 62 74 74 74 30 30 30
57318- 10 10 10 0 0 0 0 0 0 0 0 0
57319- 0 0 0 0 0 0 0 0 0 0 0 0
57320- 0 0 0 0 0 0 0 0 0 0 0 0
57321- 0 0 0 0 0 0 0 0 0 0 0 0
57322- 0 0 0 0 0 0 0 0 0 0 0 0
57323- 0 0 0 0 0 0 0 0 0 0 0 0
57324- 0 0 0 0 0 0 0 0 0 0 0 0
57325- 0 0 0 0 0 0 0 0 0 0 0 0
57326- 0 0 0 0 0 0 0 0 0 0 0 0
57327- 0 0 0 0 0 0 0 0 0 0 0 0
57328- 0 0 0 0 0 0 0 0 0 0 0 0
57329- 0 0 0 0 0 0 0 0 0 0 0 0
57330- 0 0 0 0 0 0 0 0 0 10 10 10
57331- 34 34 34 78 78 78 50 50 50 6 6 6
57332- 94 70 30 139 102 15 190 146 13 226 184 13
57333-232 200 30 232 195 16 215 174 15 190 146 13
57334-168 122 10 192 133 9 210 150 10 213 154 11
57335-202 150 34 182 157 106 101 98 89 2 2 6
57336- 2 2 6 78 78 78 116 116 116 58 58 58
57337- 2 2 6 22 22 22 90 90 90 46 46 46
57338- 18 18 18 6 6 6 0 0 0 0 0 0
57339- 0 0 0 0 0 0 0 0 0 0 0 0
57340- 0 0 0 0 0 0 0 0 0 0 0 0
57341- 0 0 0 0 0 0 0 0 0 0 0 0
57342- 0 0 0 0 0 0 0 0 0 0 0 0
57343- 0 0 0 0 0 0 0 0 0 0 0 0
57344- 0 0 0 0 0 0 0 0 0 0 0 0
57345- 0 0 0 0 0 0 0 0 0 0 0 0
57346- 0 0 0 0 0 0 0 0 0 0 0 0
57347- 0 0 0 0 0 0 0 0 0 0 0 0
57348- 0 0 0 0 0 0 0 0 0 0 0 0
57349- 0 0 0 0 0 0 0 0 0 0 0 0
57350- 0 0 0 0 0 0 0 0 0 10 10 10
57351- 38 38 38 86 86 86 50 50 50 6 6 6
57352-128 128 128 174 154 114 156 107 11 168 122 10
57353-198 155 10 184 144 12 197 138 11 200 144 11
57354-206 145 10 206 145 10 197 138 11 188 164 115
57355-195 195 195 198 198 198 174 174 174 14 14 14
57356- 2 2 6 22 22 22 116 116 116 116 116 116
57357- 22 22 22 2 2 6 74 74 74 70 70 70
57358- 30 30 30 10 10 10 0 0 0 0 0 0
57359- 0 0 0 0 0 0 0 0 0 0 0 0
57360- 0 0 0 0 0 0 0 0 0 0 0 0
57361- 0 0 0 0 0 0 0 0 0 0 0 0
57362- 0 0 0 0 0 0 0 0 0 0 0 0
57363- 0 0 0 0 0 0 0 0 0 0 0 0
57364- 0 0 0 0 0 0 0 0 0 0 0 0
57365- 0 0 0 0 0 0 0 0 0 0 0 0
57366- 0 0 0 0 0 0 0 0 0 0 0 0
57367- 0 0 0 0 0 0 0 0 0 0 0 0
57368- 0 0 0 0 0 0 0 0 0 0 0 0
57369- 0 0 0 0 0 0 0 0 0 0 0 0
57370- 0 0 0 0 0 0 6 6 6 18 18 18
57371- 50 50 50 101 101 101 26 26 26 10 10 10
57372-138 138 138 190 190 190 174 154 114 156 107 11
57373-197 138 11 200 144 11 197 138 11 192 133 9
57374-180 123 7 190 142 34 190 178 144 187 187 187
57375-202 202 202 221 221 221 214 214 214 66 66 66
57376- 2 2 6 2 2 6 50 50 50 62 62 62
57377- 6 6 6 2 2 6 10 10 10 90 90 90
57378- 50 50 50 18 18 18 6 6 6 0 0 0
57379- 0 0 0 0 0 0 0 0 0 0 0 0
57380- 0 0 0 0 0 0 0 0 0 0 0 0
57381- 0 0 0 0 0 0 0 0 0 0 0 0
57382- 0 0 0 0 0 0 0 0 0 0 0 0
57383- 0 0 0 0 0 0 0 0 0 0 0 0
57384- 0 0 0 0 0 0 0 0 0 0 0 0
57385- 0 0 0 0 0 0 0 0 0 0 0 0
57386- 0 0 0 0 0 0 0 0 0 0 0 0
57387- 0 0 0 0 0 0 0 0 0 0 0 0
57388- 0 0 0 0 0 0 0 0 0 0 0 0
57389- 0 0 0 0 0 0 0 0 0 0 0 0
57390- 0 0 0 0 0 0 10 10 10 34 34 34
57391- 74 74 74 74 74 74 2 2 6 6 6 6
57392-144 144 144 198 198 198 190 190 190 178 166 146
57393-154 121 60 156 107 11 156 107 11 168 124 44
57394-174 154 114 187 187 187 190 190 190 210 210 210
57395-246 246 246 253 253 253 253 253 253 182 182 182
57396- 6 6 6 2 2 6 2 2 6 2 2 6
57397- 2 2 6 2 2 6 2 2 6 62 62 62
57398- 74 74 74 34 34 34 14 14 14 0 0 0
57399- 0 0 0 0 0 0 0 0 0 0 0 0
57400- 0 0 0 0 0 0 0 0 0 0 0 0
57401- 0 0 0 0 0 0 0 0 0 0 0 0
57402- 0 0 0 0 0 0 0 0 0 0 0 0
57403- 0 0 0 0 0 0 0 0 0 0 0 0
57404- 0 0 0 0 0 0 0 0 0 0 0 0
57405- 0 0 0 0 0 0 0 0 0 0 0 0
57406- 0 0 0 0 0 0 0 0 0 0 0 0
57407- 0 0 0 0 0 0 0 0 0 0 0 0
57408- 0 0 0 0 0 0 0 0 0 0 0 0
57409- 0 0 0 0 0 0 0 0 0 0 0 0
57410- 0 0 0 10 10 10 22 22 22 54 54 54
57411- 94 94 94 18 18 18 2 2 6 46 46 46
57412-234 234 234 221 221 221 190 190 190 190 190 190
57413-190 190 190 187 187 187 187 187 187 190 190 190
57414-190 190 190 195 195 195 214 214 214 242 242 242
57415-253 253 253 253 253 253 253 253 253 253 253 253
57416- 82 82 82 2 2 6 2 2 6 2 2 6
57417- 2 2 6 2 2 6 2 2 6 14 14 14
57418- 86 86 86 54 54 54 22 22 22 6 6 6
57419- 0 0 0 0 0 0 0 0 0 0 0 0
57420- 0 0 0 0 0 0 0 0 0 0 0 0
57421- 0 0 0 0 0 0 0 0 0 0 0 0
57422- 0 0 0 0 0 0 0 0 0 0 0 0
57423- 0 0 0 0 0 0 0 0 0 0 0 0
57424- 0 0 0 0 0 0 0 0 0 0 0 0
57425- 0 0 0 0 0 0 0 0 0 0 0 0
57426- 0 0 0 0 0 0 0 0 0 0 0 0
57427- 0 0 0 0 0 0 0 0 0 0 0 0
57428- 0 0 0 0 0 0 0 0 0 0 0 0
57429- 0 0 0 0 0 0 0 0 0 0 0 0
57430- 6 6 6 18 18 18 46 46 46 90 90 90
57431- 46 46 46 18 18 18 6 6 6 182 182 182
57432-253 253 253 246 246 246 206 206 206 190 190 190
57433-190 190 190 190 190 190 190 190 190 190 190 190
57434-206 206 206 231 231 231 250 250 250 253 253 253
57435-253 253 253 253 253 253 253 253 253 253 253 253
57436-202 202 202 14 14 14 2 2 6 2 2 6
57437- 2 2 6 2 2 6 2 2 6 2 2 6
57438- 42 42 42 86 86 86 42 42 42 18 18 18
57439- 6 6 6 0 0 0 0 0 0 0 0 0
57440- 0 0 0 0 0 0 0 0 0 0 0 0
57441- 0 0 0 0 0 0 0 0 0 0 0 0
57442- 0 0 0 0 0 0 0 0 0 0 0 0
57443- 0 0 0 0 0 0 0 0 0 0 0 0
57444- 0 0 0 0 0 0 0 0 0 0 0 0
57445- 0 0 0 0 0 0 0 0 0 0 0 0
57446- 0 0 0 0 0 0 0 0 0 0 0 0
57447- 0 0 0 0 0 0 0 0 0 0 0 0
57448- 0 0 0 0 0 0 0 0 0 0 0 0
57449- 0 0 0 0 0 0 0 0 0 6 6 6
57450- 14 14 14 38 38 38 74 74 74 66 66 66
57451- 2 2 6 6 6 6 90 90 90 250 250 250
57452-253 253 253 253 253 253 238 238 238 198 198 198
57453-190 190 190 190 190 190 195 195 195 221 221 221
57454-246 246 246 253 253 253 253 253 253 253 253 253
57455-253 253 253 253 253 253 253 253 253 253 253 253
57456-253 253 253 82 82 82 2 2 6 2 2 6
57457- 2 2 6 2 2 6 2 2 6 2 2 6
57458- 2 2 6 78 78 78 70 70 70 34 34 34
57459- 14 14 14 6 6 6 0 0 0 0 0 0
57460- 0 0 0 0 0 0 0 0 0 0 0 0
57461- 0 0 0 0 0 0 0 0 0 0 0 0
57462- 0 0 0 0 0 0 0 0 0 0 0 0
57463- 0 0 0 0 0 0 0 0 0 0 0 0
57464- 0 0 0 0 0 0 0 0 0 0 0 0
57465- 0 0 0 0 0 0 0 0 0 0 0 0
57466- 0 0 0 0 0 0 0 0 0 0 0 0
57467- 0 0 0 0 0 0 0 0 0 0 0 0
57468- 0 0 0 0 0 0 0 0 0 0 0 0
57469- 0 0 0 0 0 0 0 0 0 14 14 14
57470- 34 34 34 66 66 66 78 78 78 6 6 6
57471- 2 2 6 18 18 18 218 218 218 253 253 253
57472-253 253 253 253 253 253 253 253 253 246 246 246
57473-226 226 226 231 231 231 246 246 246 253 253 253
57474-253 253 253 253 253 253 253 253 253 253 253 253
57475-253 253 253 253 253 253 253 253 253 253 253 253
57476-253 253 253 178 178 178 2 2 6 2 2 6
57477- 2 2 6 2 2 6 2 2 6 2 2 6
57478- 2 2 6 18 18 18 90 90 90 62 62 62
57479- 30 30 30 10 10 10 0 0 0 0 0 0
57480- 0 0 0 0 0 0 0 0 0 0 0 0
57481- 0 0 0 0 0 0 0 0 0 0 0 0
57482- 0 0 0 0 0 0 0 0 0 0 0 0
57483- 0 0 0 0 0 0 0 0 0 0 0 0
57484- 0 0 0 0 0 0 0 0 0 0 0 0
57485- 0 0 0 0 0 0 0 0 0 0 0 0
57486- 0 0 0 0 0 0 0 0 0 0 0 0
57487- 0 0 0 0 0 0 0 0 0 0 0 0
57488- 0 0 0 0 0 0 0 0 0 0 0 0
57489- 0 0 0 0 0 0 10 10 10 26 26 26
57490- 58 58 58 90 90 90 18 18 18 2 2 6
57491- 2 2 6 110 110 110 253 253 253 253 253 253
57492-253 253 253 253 253 253 253 253 253 253 253 253
57493-250 250 250 253 253 253 253 253 253 253 253 253
57494-253 253 253 253 253 253 253 253 253 253 253 253
57495-253 253 253 253 253 253 253 253 253 253 253 253
57496-253 253 253 231 231 231 18 18 18 2 2 6
57497- 2 2 6 2 2 6 2 2 6 2 2 6
57498- 2 2 6 2 2 6 18 18 18 94 94 94
57499- 54 54 54 26 26 26 10 10 10 0 0 0
57500- 0 0 0 0 0 0 0 0 0 0 0 0
57501- 0 0 0 0 0 0 0 0 0 0 0 0
57502- 0 0 0 0 0 0 0 0 0 0 0 0
57503- 0 0 0 0 0 0 0 0 0 0 0 0
57504- 0 0 0 0 0 0 0 0 0 0 0 0
57505- 0 0 0 0 0 0 0 0 0 0 0 0
57506- 0 0 0 0 0 0 0 0 0 0 0 0
57507- 0 0 0 0 0 0 0 0 0 0 0 0
57508- 0 0 0 0 0 0 0 0 0 0 0 0
57509- 0 0 0 6 6 6 22 22 22 50 50 50
57510- 90 90 90 26 26 26 2 2 6 2 2 6
57511- 14 14 14 195 195 195 250 250 250 253 253 253
57512-253 253 253 253 253 253 253 253 253 253 253 253
57513-253 253 253 253 253 253 253 253 253 253 253 253
57514-253 253 253 253 253 253 253 253 253 253 253 253
57515-253 253 253 253 253 253 253 253 253 253 253 253
57516-250 250 250 242 242 242 54 54 54 2 2 6
57517- 2 2 6 2 2 6 2 2 6 2 2 6
57518- 2 2 6 2 2 6 2 2 6 38 38 38
57519- 86 86 86 50 50 50 22 22 22 6 6 6
57520- 0 0 0 0 0 0 0 0 0 0 0 0
57521- 0 0 0 0 0 0 0 0 0 0 0 0
57522- 0 0 0 0 0 0 0 0 0 0 0 0
57523- 0 0 0 0 0 0 0 0 0 0 0 0
57524- 0 0 0 0 0 0 0 0 0 0 0 0
57525- 0 0 0 0 0 0 0 0 0 0 0 0
57526- 0 0 0 0 0 0 0 0 0 0 0 0
57527- 0 0 0 0 0 0 0 0 0 0 0 0
57528- 0 0 0 0 0 0 0 0 0 0 0 0
57529- 6 6 6 14 14 14 38 38 38 82 82 82
57530- 34 34 34 2 2 6 2 2 6 2 2 6
57531- 42 42 42 195 195 195 246 246 246 253 253 253
57532-253 253 253 253 253 253 253 253 253 250 250 250
57533-242 242 242 242 242 242 250 250 250 253 253 253
57534-253 253 253 253 253 253 253 253 253 253 253 253
57535-253 253 253 250 250 250 246 246 246 238 238 238
57536-226 226 226 231 231 231 101 101 101 6 6 6
57537- 2 2 6 2 2 6 2 2 6 2 2 6
57538- 2 2 6 2 2 6 2 2 6 2 2 6
57539- 38 38 38 82 82 82 42 42 42 14 14 14
57540- 6 6 6 0 0 0 0 0 0 0 0 0
57541- 0 0 0 0 0 0 0 0 0 0 0 0
57542- 0 0 0 0 0 0 0 0 0 0 0 0
57543- 0 0 0 0 0 0 0 0 0 0 0 0
57544- 0 0 0 0 0 0 0 0 0 0 0 0
57545- 0 0 0 0 0 0 0 0 0 0 0 0
57546- 0 0 0 0 0 0 0 0 0 0 0 0
57547- 0 0 0 0 0 0 0 0 0 0 0 0
57548- 0 0 0 0 0 0 0 0 0 0 0 0
57549- 10 10 10 26 26 26 62 62 62 66 66 66
57550- 2 2 6 2 2 6 2 2 6 6 6 6
57551- 70 70 70 170 170 170 206 206 206 234 234 234
57552-246 246 246 250 250 250 250 250 250 238 238 238
57553-226 226 226 231 231 231 238 238 238 250 250 250
57554-250 250 250 250 250 250 246 246 246 231 231 231
57555-214 214 214 206 206 206 202 202 202 202 202 202
57556-198 198 198 202 202 202 182 182 182 18 18 18
57557- 2 2 6 2 2 6 2 2 6 2 2 6
57558- 2 2 6 2 2 6 2 2 6 2 2 6
57559- 2 2 6 62 62 62 66 66 66 30 30 30
57560- 10 10 10 0 0 0 0 0 0 0 0 0
57561- 0 0 0 0 0 0 0 0 0 0 0 0
57562- 0 0 0 0 0 0 0 0 0 0 0 0
57563- 0 0 0 0 0 0 0 0 0 0 0 0
57564- 0 0 0 0 0 0 0 0 0 0 0 0
57565- 0 0 0 0 0 0 0 0 0 0 0 0
57566- 0 0 0 0 0 0 0 0 0 0 0 0
57567- 0 0 0 0 0 0 0 0 0 0 0 0
57568- 0 0 0 0 0 0 0 0 0 0 0 0
57569- 14 14 14 42 42 42 82 82 82 18 18 18
57570- 2 2 6 2 2 6 2 2 6 10 10 10
57571- 94 94 94 182 182 182 218 218 218 242 242 242
57572-250 250 250 253 253 253 253 253 253 250 250 250
57573-234 234 234 253 253 253 253 253 253 253 253 253
57574-253 253 253 253 253 253 253 253 253 246 246 246
57575-238 238 238 226 226 226 210 210 210 202 202 202
57576-195 195 195 195 195 195 210 210 210 158 158 158
57577- 6 6 6 14 14 14 50 50 50 14 14 14
57578- 2 2 6 2 2 6 2 2 6 2 2 6
57579- 2 2 6 6 6 6 86 86 86 46 46 46
57580- 18 18 18 6 6 6 0 0 0 0 0 0
57581- 0 0 0 0 0 0 0 0 0 0 0 0
57582- 0 0 0 0 0 0 0 0 0 0 0 0
57583- 0 0 0 0 0 0 0 0 0 0 0 0
57584- 0 0 0 0 0 0 0 0 0 0 0 0
57585- 0 0 0 0 0 0 0 0 0 0 0 0
57586- 0 0 0 0 0 0 0 0 0 0 0 0
57587- 0 0 0 0 0 0 0 0 0 0 0 0
57588- 0 0 0 0 0 0 0 0 0 6 6 6
57589- 22 22 22 54 54 54 70 70 70 2 2 6
57590- 2 2 6 10 10 10 2 2 6 22 22 22
57591-166 166 166 231 231 231 250 250 250 253 253 253
57592-253 253 253 253 253 253 253 253 253 250 250 250
57593-242 242 242 253 253 253 253 253 253 253 253 253
57594-253 253 253 253 253 253 253 253 253 253 253 253
57595-253 253 253 253 253 253 253 253 253 246 246 246
57596-231 231 231 206 206 206 198 198 198 226 226 226
57597- 94 94 94 2 2 6 6 6 6 38 38 38
57598- 30 30 30 2 2 6 2 2 6 2 2 6
57599- 2 2 6 2 2 6 62 62 62 66 66 66
57600- 26 26 26 10 10 10 0 0 0 0 0 0
57601- 0 0 0 0 0 0 0 0 0 0 0 0
57602- 0 0 0 0 0 0 0 0 0 0 0 0
57603- 0 0 0 0 0 0 0 0 0 0 0 0
57604- 0 0 0 0 0 0 0 0 0 0 0 0
57605- 0 0 0 0 0 0 0 0 0 0 0 0
57606- 0 0 0 0 0 0 0 0 0 0 0 0
57607- 0 0 0 0 0 0 0 0 0 0 0 0
57608- 0 0 0 0 0 0 0 0 0 10 10 10
57609- 30 30 30 74 74 74 50 50 50 2 2 6
57610- 26 26 26 26 26 26 2 2 6 106 106 106
57611-238 238 238 253 253 253 253 253 253 253 253 253
57612-253 253 253 253 253 253 253 253 253 253 253 253
57613-253 253 253 253 253 253 253 253 253 253 253 253
57614-253 253 253 253 253 253 253 253 253 253 253 253
57615-253 253 253 253 253 253 253 253 253 253 253 253
57616-253 253 253 246 246 246 218 218 218 202 202 202
57617-210 210 210 14 14 14 2 2 6 2 2 6
57618- 30 30 30 22 22 22 2 2 6 2 2 6
57619- 2 2 6 2 2 6 18 18 18 86 86 86
57620- 42 42 42 14 14 14 0 0 0 0 0 0
57621- 0 0 0 0 0 0 0 0 0 0 0 0
57622- 0 0 0 0 0 0 0 0 0 0 0 0
57623- 0 0 0 0 0 0 0 0 0 0 0 0
57624- 0 0 0 0 0 0 0 0 0 0 0 0
57625- 0 0 0 0 0 0 0 0 0 0 0 0
57626- 0 0 0 0 0 0 0 0 0 0 0 0
57627- 0 0 0 0 0 0 0 0 0 0 0 0
57628- 0 0 0 0 0 0 0 0 0 14 14 14
57629- 42 42 42 90 90 90 22 22 22 2 2 6
57630- 42 42 42 2 2 6 18 18 18 218 218 218
57631-253 253 253 253 253 253 253 253 253 253 253 253
57632-253 253 253 253 253 253 253 253 253 253 253 253
57633-253 253 253 253 253 253 253 253 253 253 253 253
57634-253 253 253 253 253 253 253 253 253 253 253 253
57635-253 253 253 253 253 253 253 253 253 253 253 253
57636-253 253 253 253 253 253 250 250 250 221 221 221
57637-218 218 218 101 101 101 2 2 6 14 14 14
57638- 18 18 18 38 38 38 10 10 10 2 2 6
57639- 2 2 6 2 2 6 2 2 6 78 78 78
57640- 58 58 58 22 22 22 6 6 6 0 0 0
57641- 0 0 0 0 0 0 0 0 0 0 0 0
57642- 0 0 0 0 0 0 0 0 0 0 0 0
57643- 0 0 0 0 0 0 0 0 0 0 0 0
57644- 0 0 0 0 0 0 0 0 0 0 0 0
57645- 0 0 0 0 0 0 0 0 0 0 0 0
57646- 0 0 0 0 0 0 0 0 0 0 0 0
57647- 0 0 0 0 0 0 0 0 0 0 0 0
57648- 0 0 0 0 0 0 6 6 6 18 18 18
57649- 54 54 54 82 82 82 2 2 6 26 26 26
57650- 22 22 22 2 2 6 123 123 123 253 253 253
57651-253 253 253 253 253 253 253 253 253 253 253 253
57652-253 253 253 253 253 253 253 253 253 253 253 253
57653-253 253 253 253 253 253 253 253 253 253 253 253
57654-253 253 253 253 253 253 253 253 253 253 253 253
57655-253 253 253 253 253 253 253 253 253 253 253 253
57656-253 253 253 253 253 253 253 253 253 250 250 250
57657-238 238 238 198 198 198 6 6 6 38 38 38
57658- 58 58 58 26 26 26 38 38 38 2 2 6
57659- 2 2 6 2 2 6 2 2 6 46 46 46
57660- 78 78 78 30 30 30 10 10 10 0 0 0
57661- 0 0 0 0 0 0 0 0 0 0 0 0
57662- 0 0 0 0 0 0 0 0 0 0 0 0
57663- 0 0 0 0 0 0 0 0 0 0 0 0
57664- 0 0 0 0 0 0 0 0 0 0 0 0
57665- 0 0 0 0 0 0 0 0 0 0 0 0
57666- 0 0 0 0 0 0 0 0 0 0 0 0
57667- 0 0 0 0 0 0 0 0 0 0 0 0
57668- 0 0 0 0 0 0 10 10 10 30 30 30
57669- 74 74 74 58 58 58 2 2 6 42 42 42
57670- 2 2 6 22 22 22 231 231 231 253 253 253
57671-253 253 253 253 253 253 253 253 253 253 253 253
57672-253 253 253 253 253 253 253 253 253 250 250 250
57673-253 253 253 253 253 253 253 253 253 253 253 253
57674-253 253 253 253 253 253 253 253 253 253 253 253
57675-253 253 253 253 253 253 253 253 253 253 253 253
57676-253 253 253 253 253 253 253 253 253 253 253 253
57677-253 253 253 246 246 246 46 46 46 38 38 38
57678- 42 42 42 14 14 14 38 38 38 14 14 14
57679- 2 2 6 2 2 6 2 2 6 6 6 6
57680- 86 86 86 46 46 46 14 14 14 0 0 0
57681- 0 0 0 0 0 0 0 0 0 0 0 0
57682- 0 0 0 0 0 0 0 0 0 0 0 0
57683- 0 0 0 0 0 0 0 0 0 0 0 0
57684- 0 0 0 0 0 0 0 0 0 0 0 0
57685- 0 0 0 0 0 0 0 0 0 0 0 0
57686- 0 0 0 0 0 0 0 0 0 0 0 0
57687- 0 0 0 0 0 0 0 0 0 0 0 0
57688- 0 0 0 6 6 6 14 14 14 42 42 42
57689- 90 90 90 18 18 18 18 18 18 26 26 26
57690- 2 2 6 116 116 116 253 253 253 253 253 253
57691-253 253 253 253 253 253 253 253 253 253 253 253
57692-253 253 253 253 253 253 250 250 250 238 238 238
57693-253 253 253 253 253 253 253 253 253 253 253 253
57694-253 253 253 253 253 253 253 253 253 253 253 253
57695-253 253 253 253 253 253 253 253 253 253 253 253
57696-253 253 253 253 253 253 253 253 253 253 253 253
57697-253 253 253 253 253 253 94 94 94 6 6 6
57698- 2 2 6 2 2 6 10 10 10 34 34 34
57699- 2 2 6 2 2 6 2 2 6 2 2 6
57700- 74 74 74 58 58 58 22 22 22 6 6 6
57701- 0 0 0 0 0 0 0 0 0 0 0 0
57702- 0 0 0 0 0 0 0 0 0 0 0 0
57703- 0 0 0 0 0 0 0 0 0 0 0 0
57704- 0 0 0 0 0 0 0 0 0 0 0 0
57705- 0 0 0 0 0 0 0 0 0 0 0 0
57706- 0 0 0 0 0 0 0 0 0 0 0 0
57707- 0 0 0 0 0 0 0 0 0 0 0 0
57708- 0 0 0 10 10 10 26 26 26 66 66 66
57709- 82 82 82 2 2 6 38 38 38 6 6 6
57710- 14 14 14 210 210 210 253 253 253 253 253 253
57711-253 253 253 253 253 253 253 253 253 253 253 253
57712-253 253 253 253 253 253 246 246 246 242 242 242
57713-253 253 253 253 253 253 253 253 253 253 253 253
57714-253 253 253 253 253 253 253 253 253 253 253 253
57715-253 253 253 253 253 253 253 253 253 253 253 253
57716-253 253 253 253 253 253 253 253 253 253 253 253
57717-253 253 253 253 253 253 144 144 144 2 2 6
57718- 2 2 6 2 2 6 2 2 6 46 46 46
57719- 2 2 6 2 2 6 2 2 6 2 2 6
57720- 42 42 42 74 74 74 30 30 30 10 10 10
57721- 0 0 0 0 0 0 0 0 0 0 0 0
57722- 0 0 0 0 0 0 0 0 0 0 0 0
57723- 0 0 0 0 0 0 0 0 0 0 0 0
57724- 0 0 0 0 0 0 0 0 0 0 0 0
57725- 0 0 0 0 0 0 0 0 0 0 0 0
57726- 0 0 0 0 0 0 0 0 0 0 0 0
57727- 0 0 0 0 0 0 0 0 0 0 0 0
57728- 6 6 6 14 14 14 42 42 42 90 90 90
57729- 26 26 26 6 6 6 42 42 42 2 2 6
57730- 74 74 74 250 250 250 253 253 253 253 253 253
57731-253 253 253 253 253 253 253 253 253 253 253 253
57732-253 253 253 253 253 253 242 242 242 242 242 242
57733-253 253 253 253 253 253 253 253 253 253 253 253
57734-253 253 253 253 253 253 253 253 253 253 253 253
57735-253 253 253 253 253 253 253 253 253 253 253 253
57736-253 253 253 253 253 253 253 253 253 253 253 253
57737-253 253 253 253 253 253 182 182 182 2 2 6
57738- 2 2 6 2 2 6 2 2 6 46 46 46
57739- 2 2 6 2 2 6 2 2 6 2 2 6
57740- 10 10 10 86 86 86 38 38 38 10 10 10
57741- 0 0 0 0 0 0 0 0 0 0 0 0
57742- 0 0 0 0 0 0 0 0 0 0 0 0
57743- 0 0 0 0 0 0 0 0 0 0 0 0
57744- 0 0 0 0 0 0 0 0 0 0 0 0
57745- 0 0 0 0 0 0 0 0 0 0 0 0
57746- 0 0 0 0 0 0 0 0 0 0 0 0
57747- 0 0 0 0 0 0 0 0 0 0 0 0
57748- 10 10 10 26 26 26 66 66 66 82 82 82
57749- 2 2 6 22 22 22 18 18 18 2 2 6
57750-149 149 149 253 253 253 253 253 253 253 253 253
57751-253 253 253 253 253 253 253 253 253 253 253 253
57752-253 253 253 253 253 253 234 234 234 242 242 242
57753-253 253 253 253 253 253 253 253 253 253 253 253
57754-253 253 253 253 253 253 253 253 253 253 253 253
57755-253 253 253 253 253 253 253 253 253 253 253 253
57756-253 253 253 253 253 253 253 253 253 253 253 253
57757-253 253 253 253 253 253 206 206 206 2 2 6
57758- 2 2 6 2 2 6 2 2 6 38 38 38
57759- 2 2 6 2 2 6 2 2 6 2 2 6
57760- 6 6 6 86 86 86 46 46 46 14 14 14
57761- 0 0 0 0 0 0 0 0 0 0 0 0
57762- 0 0 0 0 0 0 0 0 0 0 0 0
57763- 0 0 0 0 0 0 0 0 0 0 0 0
57764- 0 0 0 0 0 0 0 0 0 0 0 0
57765- 0 0 0 0 0 0 0 0 0 0 0 0
57766- 0 0 0 0 0 0 0 0 0 0 0 0
57767- 0 0 0 0 0 0 0 0 0 6 6 6
57768- 18 18 18 46 46 46 86 86 86 18 18 18
57769- 2 2 6 34 34 34 10 10 10 6 6 6
57770-210 210 210 253 253 253 253 253 253 253 253 253
57771-253 253 253 253 253 253 253 253 253 253 253 253
57772-253 253 253 253 253 253 234 234 234 242 242 242
57773-253 253 253 253 253 253 253 253 253 253 253 253
57774-253 253 253 253 253 253 253 253 253 253 253 253
57775-253 253 253 253 253 253 253 253 253 253 253 253
57776-253 253 253 253 253 253 253 253 253 253 253 253
57777-253 253 253 253 253 253 221 221 221 6 6 6
57778- 2 2 6 2 2 6 6 6 6 30 30 30
57779- 2 2 6 2 2 6 2 2 6 2 2 6
57780- 2 2 6 82 82 82 54 54 54 18 18 18
57781- 6 6 6 0 0 0 0 0 0 0 0 0
57782- 0 0 0 0 0 0 0 0 0 0 0 0
57783- 0 0 0 0 0 0 0 0 0 0 0 0
57784- 0 0 0 0 0 0 0 0 0 0 0 0
57785- 0 0 0 0 0 0 0 0 0 0 0 0
57786- 0 0 0 0 0 0 0 0 0 0 0 0
57787- 0 0 0 0 0 0 0 0 0 10 10 10
57788- 26 26 26 66 66 66 62 62 62 2 2 6
57789- 2 2 6 38 38 38 10 10 10 26 26 26
57790-238 238 238 253 253 253 253 253 253 253 253 253
57791-253 253 253 253 253 253 253 253 253 253 253 253
57792-253 253 253 253 253 253 231 231 231 238 238 238
57793-253 253 253 253 253 253 253 253 253 253 253 253
57794-253 253 253 253 253 253 253 253 253 253 253 253
57795-253 253 253 253 253 253 253 253 253 253 253 253
57796-253 253 253 253 253 253 253 253 253 253 253 253
57797-253 253 253 253 253 253 231 231 231 6 6 6
57798- 2 2 6 2 2 6 10 10 10 30 30 30
57799- 2 2 6 2 2 6 2 2 6 2 2 6
57800- 2 2 6 66 66 66 58 58 58 22 22 22
57801- 6 6 6 0 0 0 0 0 0 0 0 0
57802- 0 0 0 0 0 0 0 0 0 0 0 0
57803- 0 0 0 0 0 0 0 0 0 0 0 0
57804- 0 0 0 0 0 0 0 0 0 0 0 0
57805- 0 0 0 0 0 0 0 0 0 0 0 0
57806- 0 0 0 0 0 0 0 0 0 0 0 0
57807- 0 0 0 0 0 0 0 0 0 10 10 10
57808- 38 38 38 78 78 78 6 6 6 2 2 6
57809- 2 2 6 46 46 46 14 14 14 42 42 42
57810-246 246 246 253 253 253 253 253 253 253 253 253
57811-253 253 253 253 253 253 253 253 253 253 253 253
57812-253 253 253 253 253 253 231 231 231 242 242 242
57813-253 253 253 253 253 253 253 253 253 253 253 253
57814-253 253 253 253 253 253 253 253 253 253 253 253
57815-253 253 253 253 253 253 253 253 253 253 253 253
57816-253 253 253 253 253 253 253 253 253 253 253 253
57817-253 253 253 253 253 253 234 234 234 10 10 10
57818- 2 2 6 2 2 6 22 22 22 14 14 14
57819- 2 2 6 2 2 6 2 2 6 2 2 6
57820- 2 2 6 66 66 66 62 62 62 22 22 22
57821- 6 6 6 0 0 0 0 0 0 0 0 0
57822- 0 0 0 0 0 0 0 0 0 0 0 0
57823- 0 0 0 0 0 0 0 0 0 0 0 0
57824- 0 0 0 0 0 0 0 0 0 0 0 0
57825- 0 0 0 0 0 0 0 0 0 0 0 0
57826- 0 0 0 0 0 0 0 0 0 0 0 0
57827- 0 0 0 0 0 0 6 6 6 18 18 18
57828- 50 50 50 74 74 74 2 2 6 2 2 6
57829- 14 14 14 70 70 70 34 34 34 62 62 62
57830-250 250 250 253 253 253 253 253 253 253 253 253
57831-253 253 253 253 253 253 253 253 253 253 253 253
57832-253 253 253 253 253 253 231 231 231 246 246 246
57833-253 253 253 253 253 253 253 253 253 253 253 253
57834-253 253 253 253 253 253 253 253 253 253 253 253
57835-253 253 253 253 253 253 253 253 253 253 253 253
57836-253 253 253 253 253 253 253 253 253 253 253 253
57837-253 253 253 253 253 253 234 234 234 14 14 14
57838- 2 2 6 2 2 6 30 30 30 2 2 6
57839- 2 2 6 2 2 6 2 2 6 2 2 6
57840- 2 2 6 66 66 66 62 62 62 22 22 22
57841- 6 6 6 0 0 0 0 0 0 0 0 0
57842- 0 0 0 0 0 0 0 0 0 0 0 0
57843- 0 0 0 0 0 0 0 0 0 0 0 0
57844- 0 0 0 0 0 0 0 0 0 0 0 0
57845- 0 0 0 0 0 0 0 0 0 0 0 0
57846- 0 0 0 0 0 0 0 0 0 0 0 0
57847- 0 0 0 0 0 0 6 6 6 18 18 18
57848- 54 54 54 62 62 62 2 2 6 2 2 6
57849- 2 2 6 30 30 30 46 46 46 70 70 70
57850-250 250 250 253 253 253 253 253 253 253 253 253
57851-253 253 253 253 253 253 253 253 253 253 253 253
57852-253 253 253 253 253 253 231 231 231 246 246 246
57853-253 253 253 253 253 253 253 253 253 253 253 253
57854-253 253 253 253 253 253 253 253 253 253 253 253
57855-253 253 253 253 253 253 253 253 253 253 253 253
57856-253 253 253 253 253 253 253 253 253 253 253 253
57857-253 253 253 253 253 253 226 226 226 10 10 10
57858- 2 2 6 6 6 6 30 30 30 2 2 6
57859- 2 2 6 2 2 6 2 2 6 2 2 6
57860- 2 2 6 66 66 66 58 58 58 22 22 22
57861- 6 6 6 0 0 0 0 0 0 0 0 0
57862- 0 0 0 0 0 0 0 0 0 0 0 0
57863- 0 0 0 0 0 0 0 0 0 0 0 0
57864- 0 0 0 0 0 0 0 0 0 0 0 0
57865- 0 0 0 0 0 0 0 0 0 0 0 0
57866- 0 0 0 0 0 0 0 0 0 0 0 0
57867- 0 0 0 0 0 0 6 6 6 22 22 22
57868- 58 58 58 62 62 62 2 2 6 2 2 6
57869- 2 2 6 2 2 6 30 30 30 78 78 78
57870-250 250 250 253 253 253 253 253 253 253 253 253
57871-253 253 253 253 253 253 253 253 253 253 253 253
57872-253 253 253 253 253 253 231 231 231 246 246 246
57873-253 253 253 253 253 253 253 253 253 253 253 253
57874-253 253 253 253 253 253 253 253 253 253 253 253
57875-253 253 253 253 253 253 253 253 253 253 253 253
57876-253 253 253 253 253 253 253 253 253 253 253 253
57877-253 253 253 253 253 253 206 206 206 2 2 6
57878- 22 22 22 34 34 34 18 14 6 22 22 22
57879- 26 26 26 18 18 18 6 6 6 2 2 6
57880- 2 2 6 82 82 82 54 54 54 18 18 18
57881- 6 6 6 0 0 0 0 0 0 0 0 0
57882- 0 0 0 0 0 0 0 0 0 0 0 0
57883- 0 0 0 0 0 0 0 0 0 0 0 0
57884- 0 0 0 0 0 0 0 0 0 0 0 0
57885- 0 0 0 0 0 0 0 0 0 0 0 0
57886- 0 0 0 0 0 0 0 0 0 0 0 0
57887- 0 0 0 0 0 0 6 6 6 26 26 26
57888- 62 62 62 106 106 106 74 54 14 185 133 11
57889-210 162 10 121 92 8 6 6 6 62 62 62
57890-238 238 238 253 253 253 253 253 253 253 253 253
57891-253 253 253 253 253 253 253 253 253 253 253 253
57892-253 253 253 253 253 253 231 231 231 246 246 246
57893-253 253 253 253 253 253 253 253 253 253 253 253
57894-253 253 253 253 253 253 253 253 253 253 253 253
57895-253 253 253 253 253 253 253 253 253 253 253 253
57896-253 253 253 253 253 253 253 253 253 253 253 253
57897-253 253 253 253 253 253 158 158 158 18 18 18
57898- 14 14 14 2 2 6 2 2 6 2 2 6
57899- 6 6 6 18 18 18 66 66 66 38 38 38
57900- 6 6 6 94 94 94 50 50 50 18 18 18
57901- 6 6 6 0 0 0 0 0 0 0 0 0
57902- 0 0 0 0 0 0 0 0 0 0 0 0
57903- 0 0 0 0 0 0 0 0 0 0 0 0
57904- 0 0 0 0 0 0 0 0 0 0 0 0
57905- 0 0 0 0 0 0 0 0 0 0 0 0
57906- 0 0 0 0 0 0 0 0 0 6 6 6
57907- 10 10 10 10 10 10 18 18 18 38 38 38
57908- 78 78 78 142 134 106 216 158 10 242 186 14
57909-246 190 14 246 190 14 156 118 10 10 10 10
57910- 90 90 90 238 238 238 253 253 253 253 253 253
57911-253 253 253 253 253 253 253 253 253 253 253 253
57912-253 253 253 253 253 253 231 231 231 250 250 250
57913-253 253 253 253 253 253 253 253 253 253 253 253
57914-253 253 253 253 253 253 253 253 253 253 253 253
57915-253 253 253 253 253 253 253 253 253 253 253 253
57916-253 253 253 253 253 253 253 253 253 246 230 190
57917-238 204 91 238 204 91 181 142 44 37 26 9
57918- 2 2 6 2 2 6 2 2 6 2 2 6
57919- 2 2 6 2 2 6 38 38 38 46 46 46
57920- 26 26 26 106 106 106 54 54 54 18 18 18
57921- 6 6 6 0 0 0 0 0 0 0 0 0
57922- 0 0 0 0 0 0 0 0 0 0 0 0
57923- 0 0 0 0 0 0 0 0 0 0 0 0
57924- 0 0 0 0 0 0 0 0 0 0 0 0
57925- 0 0 0 0 0 0 0 0 0 0 0 0
57926- 0 0 0 6 6 6 14 14 14 22 22 22
57927- 30 30 30 38 38 38 50 50 50 70 70 70
57928-106 106 106 190 142 34 226 170 11 242 186 14
57929-246 190 14 246 190 14 246 190 14 154 114 10
57930- 6 6 6 74 74 74 226 226 226 253 253 253
57931-253 253 253 253 253 253 253 253 253 253 253 253
57932-253 253 253 253 253 253 231 231 231 250 250 250
57933-253 253 253 253 253 253 253 253 253 253 253 253
57934-253 253 253 253 253 253 253 253 253 253 253 253
57935-253 253 253 253 253 253 253 253 253 253 253 253
57936-253 253 253 253 253 253 253 253 253 228 184 62
57937-241 196 14 241 208 19 232 195 16 38 30 10
57938- 2 2 6 2 2 6 2 2 6 2 2 6
57939- 2 2 6 6 6 6 30 30 30 26 26 26
57940-203 166 17 154 142 90 66 66 66 26 26 26
57941- 6 6 6 0 0 0 0 0 0 0 0 0
57942- 0 0 0 0 0 0 0 0 0 0 0 0
57943- 0 0 0 0 0 0 0 0 0 0 0 0
57944- 0 0 0 0 0 0 0 0 0 0 0 0
57945- 0 0 0 0 0 0 0 0 0 0 0 0
57946- 6 6 6 18 18 18 38 38 38 58 58 58
57947- 78 78 78 86 86 86 101 101 101 123 123 123
57948-175 146 61 210 150 10 234 174 13 246 186 14
57949-246 190 14 246 190 14 246 190 14 238 190 10
57950-102 78 10 2 2 6 46 46 46 198 198 198
57951-253 253 253 253 253 253 253 253 253 253 253 253
57952-253 253 253 253 253 253 234 234 234 242 242 242
57953-253 253 253 253 253 253 253 253 253 253 253 253
57954-253 253 253 253 253 253 253 253 253 253 253 253
57955-253 253 253 253 253 253 253 253 253 253 253 253
57956-253 253 253 253 253 253 253 253 253 224 178 62
57957-242 186 14 241 196 14 210 166 10 22 18 6
57958- 2 2 6 2 2 6 2 2 6 2 2 6
57959- 2 2 6 2 2 6 6 6 6 121 92 8
57960-238 202 15 232 195 16 82 82 82 34 34 34
57961- 10 10 10 0 0 0 0 0 0 0 0 0
57962- 0 0 0 0 0 0 0 0 0 0 0 0
57963- 0 0 0 0 0 0 0 0 0 0 0 0
57964- 0 0 0 0 0 0 0 0 0 0 0 0
57965- 0 0 0 0 0 0 0 0 0 0 0 0
57966- 14 14 14 38 38 38 70 70 70 154 122 46
57967-190 142 34 200 144 11 197 138 11 197 138 11
57968-213 154 11 226 170 11 242 186 14 246 190 14
57969-246 190 14 246 190 14 246 190 14 246 190 14
57970-225 175 15 46 32 6 2 2 6 22 22 22
57971-158 158 158 250 250 250 253 253 253 253 253 253
57972-253 253 253 253 253 253 253 253 253 253 253 253
57973-253 253 253 253 253 253 253 253 253 253 253 253
57974-253 253 253 253 253 253 253 253 253 253 253 253
57975-253 253 253 253 253 253 253 253 253 253 253 253
57976-253 253 253 250 250 250 242 242 242 224 178 62
57977-239 182 13 236 186 11 213 154 11 46 32 6
57978- 2 2 6 2 2 6 2 2 6 2 2 6
57979- 2 2 6 2 2 6 61 42 6 225 175 15
57980-238 190 10 236 186 11 112 100 78 42 42 42
57981- 14 14 14 0 0 0 0 0 0 0 0 0
57982- 0 0 0 0 0 0 0 0 0 0 0 0
57983- 0 0 0 0 0 0 0 0 0 0 0 0
57984- 0 0 0 0 0 0 0 0 0 0 0 0
57985- 0 0 0 0 0 0 0 0 0 6 6 6
57986- 22 22 22 54 54 54 154 122 46 213 154 11
57987-226 170 11 230 174 11 226 170 11 226 170 11
57988-236 178 12 242 186 14 246 190 14 246 190 14
57989-246 190 14 246 190 14 246 190 14 246 190 14
57990-241 196 14 184 144 12 10 10 10 2 2 6
57991- 6 6 6 116 116 116 242 242 242 253 253 253
57992-253 253 253 253 253 253 253 253 253 253 253 253
57993-253 253 253 253 253 253 253 253 253 253 253 253
57994-253 253 253 253 253 253 253 253 253 253 253 253
57995-253 253 253 253 253 253 253 253 253 253 253 253
57996-253 253 253 231 231 231 198 198 198 214 170 54
57997-236 178 12 236 178 12 210 150 10 137 92 6
57998- 18 14 6 2 2 6 2 2 6 2 2 6
57999- 6 6 6 70 47 6 200 144 11 236 178 12
58000-239 182 13 239 182 13 124 112 88 58 58 58
58001- 22 22 22 6 6 6 0 0 0 0 0 0
58002- 0 0 0 0 0 0 0 0 0 0 0 0
58003- 0 0 0 0 0 0 0 0 0 0 0 0
58004- 0 0 0 0 0 0 0 0 0 0 0 0
58005- 0 0 0 0 0 0 0 0 0 10 10 10
58006- 30 30 30 70 70 70 180 133 36 226 170 11
58007-239 182 13 242 186 14 242 186 14 246 186 14
58008-246 190 14 246 190 14 246 190 14 246 190 14
58009-246 190 14 246 190 14 246 190 14 246 190 14
58010-246 190 14 232 195 16 98 70 6 2 2 6
58011- 2 2 6 2 2 6 66 66 66 221 221 221
58012-253 253 253 253 253 253 253 253 253 253 253 253
58013-253 253 253 253 253 253 253 253 253 253 253 253
58014-253 253 253 253 253 253 253 253 253 253 253 253
58015-253 253 253 253 253 253 253 253 253 253 253 253
58016-253 253 253 206 206 206 198 198 198 214 166 58
58017-230 174 11 230 174 11 216 158 10 192 133 9
58018-163 110 8 116 81 8 102 78 10 116 81 8
58019-167 114 7 197 138 11 226 170 11 239 182 13
58020-242 186 14 242 186 14 162 146 94 78 78 78
58021- 34 34 34 14 14 14 6 6 6 0 0 0
58022- 0 0 0 0 0 0 0 0 0 0 0 0
58023- 0 0 0 0 0 0 0 0 0 0 0 0
58024- 0 0 0 0 0 0 0 0 0 0 0 0
58025- 0 0 0 0 0 0 0 0 0 6 6 6
58026- 30 30 30 78 78 78 190 142 34 226 170 11
58027-239 182 13 246 190 14 246 190 14 246 190 14
58028-246 190 14 246 190 14 246 190 14 246 190 14
58029-246 190 14 246 190 14 246 190 14 246 190 14
58030-246 190 14 241 196 14 203 166 17 22 18 6
58031- 2 2 6 2 2 6 2 2 6 38 38 38
58032-218 218 218 253 253 253 253 253 253 253 253 253
58033-253 253 253 253 253 253 253 253 253 253 253 253
58034-253 253 253 253 253 253 253 253 253 253 253 253
58035-253 253 253 253 253 253 253 253 253 253 253 253
58036-250 250 250 206 206 206 198 198 198 202 162 69
58037-226 170 11 236 178 12 224 166 10 210 150 10
58038-200 144 11 197 138 11 192 133 9 197 138 11
58039-210 150 10 226 170 11 242 186 14 246 190 14
58040-246 190 14 246 186 14 225 175 15 124 112 88
58041- 62 62 62 30 30 30 14 14 14 6 6 6
58042- 0 0 0 0 0 0 0 0 0 0 0 0
58043- 0 0 0 0 0 0 0 0 0 0 0 0
58044- 0 0 0 0 0 0 0 0 0 0 0 0
58045- 0 0 0 0 0 0 0 0 0 10 10 10
58046- 30 30 30 78 78 78 174 135 50 224 166 10
58047-239 182 13 246 190 14 246 190 14 246 190 14
58048-246 190 14 246 190 14 246 190 14 246 190 14
58049-246 190 14 246 190 14 246 190 14 246 190 14
58050-246 190 14 246 190 14 241 196 14 139 102 15
58051- 2 2 6 2 2 6 2 2 6 2 2 6
58052- 78 78 78 250 250 250 253 253 253 253 253 253
58053-253 253 253 253 253 253 253 253 253 253 253 253
58054-253 253 253 253 253 253 253 253 253 253 253 253
58055-253 253 253 253 253 253 253 253 253 253 253 253
58056-250 250 250 214 214 214 198 198 198 190 150 46
58057-219 162 10 236 178 12 234 174 13 224 166 10
58058-216 158 10 213 154 11 213 154 11 216 158 10
58059-226 170 11 239 182 13 246 190 14 246 190 14
58060-246 190 14 246 190 14 242 186 14 206 162 42
58061-101 101 101 58 58 58 30 30 30 14 14 14
58062- 6 6 6 0 0 0 0 0 0 0 0 0
58063- 0 0 0 0 0 0 0 0 0 0 0 0
58064- 0 0 0 0 0 0 0 0 0 0 0 0
58065- 0 0 0 0 0 0 0 0 0 10 10 10
58066- 30 30 30 74 74 74 174 135 50 216 158 10
58067-236 178 12 246 190 14 246 190 14 246 190 14
58068-246 190 14 246 190 14 246 190 14 246 190 14
58069-246 190 14 246 190 14 246 190 14 246 190 14
58070-246 190 14 246 190 14 241 196 14 226 184 13
58071- 61 42 6 2 2 6 2 2 6 2 2 6
58072- 22 22 22 238 238 238 253 253 253 253 253 253
58073-253 253 253 253 253 253 253 253 253 253 253 253
58074-253 253 253 253 253 253 253 253 253 253 253 253
58075-253 253 253 253 253 253 253 253 253 253 253 253
58076-253 253 253 226 226 226 187 187 187 180 133 36
58077-216 158 10 236 178 12 239 182 13 236 178 12
58078-230 174 11 226 170 11 226 170 11 230 174 11
58079-236 178 12 242 186 14 246 190 14 246 190 14
58080-246 190 14 246 190 14 246 186 14 239 182 13
58081-206 162 42 106 106 106 66 66 66 34 34 34
58082- 14 14 14 6 6 6 0 0 0 0 0 0
58083- 0 0 0 0 0 0 0 0 0 0 0 0
58084- 0 0 0 0 0 0 0 0 0 0 0 0
58085- 0 0 0 0 0 0 0 0 0 6 6 6
58086- 26 26 26 70 70 70 163 133 67 213 154 11
58087-236 178 12 246 190 14 246 190 14 246 190 14
58088-246 190 14 246 190 14 246 190 14 246 190 14
58089-246 190 14 246 190 14 246 190 14 246 190 14
58090-246 190 14 246 190 14 246 190 14 241 196 14
58091-190 146 13 18 14 6 2 2 6 2 2 6
58092- 46 46 46 246 246 246 253 253 253 253 253 253
58093-253 253 253 253 253 253 253 253 253 253 253 253
58094-253 253 253 253 253 253 253 253 253 253 253 253
58095-253 253 253 253 253 253 253 253 253 253 253 253
58096-253 253 253 221 221 221 86 86 86 156 107 11
58097-216 158 10 236 178 12 242 186 14 246 186 14
58098-242 186 14 239 182 13 239 182 13 242 186 14
58099-242 186 14 246 186 14 246 190 14 246 190 14
58100-246 190 14 246 190 14 246 190 14 246 190 14
58101-242 186 14 225 175 15 142 122 72 66 66 66
58102- 30 30 30 10 10 10 0 0 0 0 0 0
58103- 0 0 0 0 0 0 0 0 0 0 0 0
58104- 0 0 0 0 0 0 0 0 0 0 0 0
58105- 0 0 0 0 0 0 0 0 0 6 6 6
58106- 26 26 26 70 70 70 163 133 67 210 150 10
58107-236 178 12 246 190 14 246 190 14 246 190 14
58108-246 190 14 246 190 14 246 190 14 246 190 14
58109-246 190 14 246 190 14 246 190 14 246 190 14
58110-246 190 14 246 190 14 246 190 14 246 190 14
58111-232 195 16 121 92 8 34 34 34 106 106 106
58112-221 221 221 253 253 253 253 253 253 253 253 253
58113-253 253 253 253 253 253 253 253 253 253 253 253
58114-253 253 253 253 253 253 253 253 253 253 253 253
58115-253 253 253 253 253 253 253 253 253 253 253 253
58116-242 242 242 82 82 82 18 14 6 163 110 8
58117-216 158 10 236 178 12 242 186 14 246 190 14
58118-246 190 14 246 190 14 246 190 14 246 190 14
58119-246 190 14 246 190 14 246 190 14 246 190 14
58120-246 190 14 246 190 14 246 190 14 246 190 14
58121-246 190 14 246 190 14 242 186 14 163 133 67
58122- 46 46 46 18 18 18 6 6 6 0 0 0
58123- 0 0 0 0 0 0 0 0 0 0 0 0
58124- 0 0 0 0 0 0 0 0 0 0 0 0
58125- 0 0 0 0 0 0 0 0 0 10 10 10
58126- 30 30 30 78 78 78 163 133 67 210 150 10
58127-236 178 12 246 186 14 246 190 14 246 190 14
58128-246 190 14 246 190 14 246 190 14 246 190 14
58129-246 190 14 246 190 14 246 190 14 246 190 14
58130-246 190 14 246 190 14 246 190 14 246 190 14
58131-241 196 14 215 174 15 190 178 144 253 253 253
58132-253 253 253 253 253 253 253 253 253 253 253 253
58133-253 253 253 253 253 253 253 253 253 253 253 253
58134-253 253 253 253 253 253 253 253 253 253 253 253
58135-253 253 253 253 253 253 253 253 253 218 218 218
58136- 58 58 58 2 2 6 22 18 6 167 114 7
58137-216 158 10 236 178 12 246 186 14 246 190 14
58138-246 190 14 246 190 14 246 190 14 246 190 14
58139-246 190 14 246 190 14 246 190 14 246 190 14
58140-246 190 14 246 190 14 246 190 14 246 190 14
58141-246 190 14 246 186 14 242 186 14 190 150 46
58142- 54 54 54 22 22 22 6 6 6 0 0 0
58143- 0 0 0 0 0 0 0 0 0 0 0 0
58144- 0 0 0 0 0 0 0 0 0 0 0 0
58145- 0 0 0 0 0 0 0 0 0 14 14 14
58146- 38 38 38 86 86 86 180 133 36 213 154 11
58147-236 178 12 246 186 14 246 190 14 246 190 14
58148-246 190 14 246 190 14 246 190 14 246 190 14
58149-246 190 14 246 190 14 246 190 14 246 190 14
58150-246 190 14 246 190 14 246 190 14 246 190 14
58151-246 190 14 232 195 16 190 146 13 214 214 214
58152-253 253 253 253 253 253 253 253 253 253 253 253
58153-253 253 253 253 253 253 253 253 253 253 253 253
58154-253 253 253 253 253 253 253 253 253 253 253 253
58155-253 253 253 250 250 250 170 170 170 26 26 26
58156- 2 2 6 2 2 6 37 26 9 163 110 8
58157-219 162 10 239 182 13 246 186 14 246 190 14
58158-246 190 14 246 190 14 246 190 14 246 190 14
58159-246 190 14 246 190 14 246 190 14 246 190 14
58160-246 190 14 246 190 14 246 190 14 246 190 14
58161-246 186 14 236 178 12 224 166 10 142 122 72
58162- 46 46 46 18 18 18 6 6 6 0 0 0
58163- 0 0 0 0 0 0 0 0 0 0 0 0
58164- 0 0 0 0 0 0 0 0 0 0 0 0
58165- 0 0 0 0 0 0 6 6 6 18 18 18
58166- 50 50 50 109 106 95 192 133 9 224 166 10
58167-242 186 14 246 190 14 246 190 14 246 190 14
58168-246 190 14 246 190 14 246 190 14 246 190 14
58169-246 190 14 246 190 14 246 190 14 246 190 14
58170-246 190 14 246 190 14 246 190 14 246 190 14
58171-242 186 14 226 184 13 210 162 10 142 110 46
58172-226 226 226 253 253 253 253 253 253 253 253 253
58173-253 253 253 253 253 253 253 253 253 253 253 253
58174-253 253 253 253 253 253 253 253 253 253 253 253
58175-198 198 198 66 66 66 2 2 6 2 2 6
58176- 2 2 6 2 2 6 50 34 6 156 107 11
58177-219 162 10 239 182 13 246 186 14 246 190 14
58178-246 190 14 246 190 14 246 190 14 246 190 14
58179-246 190 14 246 190 14 246 190 14 246 190 14
58180-246 190 14 246 190 14 246 190 14 242 186 14
58181-234 174 13 213 154 11 154 122 46 66 66 66
58182- 30 30 30 10 10 10 0 0 0 0 0 0
58183- 0 0 0 0 0 0 0 0 0 0 0 0
58184- 0 0 0 0 0 0 0 0 0 0 0 0
58185- 0 0 0 0 0 0 6 6 6 22 22 22
58186- 58 58 58 154 121 60 206 145 10 234 174 13
58187-242 186 14 246 186 14 246 190 14 246 190 14
58188-246 190 14 246 190 14 246 190 14 246 190 14
58189-246 190 14 246 190 14 246 190 14 246 190 14
58190-246 190 14 246 190 14 246 190 14 246 190 14
58191-246 186 14 236 178 12 210 162 10 163 110 8
58192- 61 42 6 138 138 138 218 218 218 250 250 250
58193-253 253 253 253 253 253 253 253 253 250 250 250
58194-242 242 242 210 210 210 144 144 144 66 66 66
58195- 6 6 6 2 2 6 2 2 6 2 2 6
58196- 2 2 6 2 2 6 61 42 6 163 110 8
58197-216 158 10 236 178 12 246 190 14 246 190 14
58198-246 190 14 246 190 14 246 190 14 246 190 14
58199-246 190 14 246 190 14 246 190 14 246 190 14
58200-246 190 14 239 182 13 230 174 11 216 158 10
58201-190 142 34 124 112 88 70 70 70 38 38 38
58202- 18 18 18 6 6 6 0 0 0 0 0 0
58203- 0 0 0 0 0 0 0 0 0 0 0 0
58204- 0 0 0 0 0 0 0 0 0 0 0 0
58205- 0 0 0 0 0 0 6 6 6 22 22 22
58206- 62 62 62 168 124 44 206 145 10 224 166 10
58207-236 178 12 239 182 13 242 186 14 242 186 14
58208-246 186 14 246 190 14 246 190 14 246 190 14
58209-246 190 14 246 190 14 246 190 14 246 190 14
58210-246 190 14 246 190 14 246 190 14 246 190 14
58211-246 190 14 236 178 12 216 158 10 175 118 6
58212- 80 54 7 2 2 6 6 6 6 30 30 30
58213- 54 54 54 62 62 62 50 50 50 38 38 38
58214- 14 14 14 2 2 6 2 2 6 2 2 6
58215- 2 2 6 2 2 6 2 2 6 2 2 6
58216- 2 2 6 6 6 6 80 54 7 167 114 7
58217-213 154 11 236 178 12 246 190 14 246 190 14
58218-246 190 14 246 190 14 246 190 14 246 190 14
58219-246 190 14 242 186 14 239 182 13 239 182 13
58220-230 174 11 210 150 10 174 135 50 124 112 88
58221- 82 82 82 54 54 54 34 34 34 18 18 18
58222- 6 6 6 0 0 0 0 0 0 0 0 0
58223- 0 0 0 0 0 0 0 0 0 0 0 0
58224- 0 0 0 0 0 0 0 0 0 0 0 0
58225- 0 0 0 0 0 0 6 6 6 18 18 18
58226- 50 50 50 158 118 36 192 133 9 200 144 11
58227-216 158 10 219 162 10 224 166 10 226 170 11
58228-230 174 11 236 178 12 239 182 13 239 182 13
58229-242 186 14 246 186 14 246 190 14 246 190 14
58230-246 190 14 246 190 14 246 190 14 246 190 14
58231-246 186 14 230 174 11 210 150 10 163 110 8
58232-104 69 6 10 10 10 2 2 6 2 2 6
58233- 2 2 6 2 2 6 2 2 6 2 2 6
58234- 2 2 6 2 2 6 2 2 6 2 2 6
58235- 2 2 6 2 2 6 2 2 6 2 2 6
58236- 2 2 6 6 6 6 91 60 6 167 114 7
58237-206 145 10 230 174 11 242 186 14 246 190 14
58238-246 190 14 246 190 14 246 186 14 242 186 14
58239-239 182 13 230 174 11 224 166 10 213 154 11
58240-180 133 36 124 112 88 86 86 86 58 58 58
58241- 38 38 38 22 22 22 10 10 10 6 6 6
58242- 0 0 0 0 0 0 0 0 0 0 0 0
58243- 0 0 0 0 0 0 0 0 0 0 0 0
58244- 0 0 0 0 0 0 0 0 0 0 0 0
58245- 0 0 0 0 0 0 0 0 0 14 14 14
58246- 34 34 34 70 70 70 138 110 50 158 118 36
58247-167 114 7 180 123 7 192 133 9 197 138 11
58248-200 144 11 206 145 10 213 154 11 219 162 10
58249-224 166 10 230 174 11 239 182 13 242 186 14
58250-246 186 14 246 186 14 246 186 14 246 186 14
58251-239 182 13 216 158 10 185 133 11 152 99 6
58252-104 69 6 18 14 6 2 2 6 2 2 6
58253- 2 2 6 2 2 6 2 2 6 2 2 6
58254- 2 2 6 2 2 6 2 2 6 2 2 6
58255- 2 2 6 2 2 6 2 2 6 2 2 6
58256- 2 2 6 6 6 6 80 54 7 152 99 6
58257-192 133 9 219 162 10 236 178 12 239 182 13
58258-246 186 14 242 186 14 239 182 13 236 178 12
58259-224 166 10 206 145 10 192 133 9 154 121 60
58260- 94 94 94 62 62 62 42 42 42 22 22 22
58261- 14 14 14 6 6 6 0 0 0 0 0 0
58262- 0 0 0 0 0 0 0 0 0 0 0 0
58263- 0 0 0 0 0 0 0 0 0 0 0 0
58264- 0 0 0 0 0 0 0 0 0 0 0 0
58265- 0 0 0 0 0 0 0 0 0 6 6 6
58266- 18 18 18 34 34 34 58 58 58 78 78 78
58267-101 98 89 124 112 88 142 110 46 156 107 11
58268-163 110 8 167 114 7 175 118 6 180 123 7
58269-185 133 11 197 138 11 210 150 10 219 162 10
58270-226 170 11 236 178 12 236 178 12 234 174 13
58271-219 162 10 197 138 11 163 110 8 130 83 6
58272- 91 60 6 10 10 10 2 2 6 2 2 6
58273- 18 18 18 38 38 38 38 38 38 38 38 38
58274- 38 38 38 38 38 38 38 38 38 38 38 38
58275- 38 38 38 38 38 38 26 26 26 2 2 6
58276- 2 2 6 6 6 6 70 47 6 137 92 6
58277-175 118 6 200 144 11 219 162 10 230 174 11
58278-234 174 13 230 174 11 219 162 10 210 150 10
58279-192 133 9 163 110 8 124 112 88 82 82 82
58280- 50 50 50 30 30 30 14 14 14 6 6 6
58281- 0 0 0 0 0 0 0 0 0 0 0 0
58282- 0 0 0 0 0 0 0 0 0 0 0 0
58283- 0 0 0 0 0 0 0 0 0 0 0 0
58284- 0 0 0 0 0 0 0 0 0 0 0 0
58285- 0 0 0 0 0 0 0 0 0 0 0 0
58286- 6 6 6 14 14 14 22 22 22 34 34 34
58287- 42 42 42 58 58 58 74 74 74 86 86 86
58288-101 98 89 122 102 70 130 98 46 121 87 25
58289-137 92 6 152 99 6 163 110 8 180 123 7
58290-185 133 11 197 138 11 206 145 10 200 144 11
58291-180 123 7 156 107 11 130 83 6 104 69 6
58292- 50 34 6 54 54 54 110 110 110 101 98 89
58293- 86 86 86 82 82 82 78 78 78 78 78 78
58294- 78 78 78 78 78 78 78 78 78 78 78 78
58295- 78 78 78 82 82 82 86 86 86 94 94 94
58296-106 106 106 101 101 101 86 66 34 124 80 6
58297-156 107 11 180 123 7 192 133 9 200 144 11
58298-206 145 10 200 144 11 192 133 9 175 118 6
58299-139 102 15 109 106 95 70 70 70 42 42 42
58300- 22 22 22 10 10 10 0 0 0 0 0 0
58301- 0 0 0 0 0 0 0 0 0 0 0 0
58302- 0 0 0 0 0 0 0 0 0 0 0 0
58303- 0 0 0 0 0 0 0 0 0 0 0 0
58304- 0 0 0 0 0 0 0 0 0 0 0 0
58305- 0 0 0 0 0 0 0 0 0 0 0 0
58306- 0 0 0 0 0 0 6 6 6 10 10 10
58307- 14 14 14 22 22 22 30 30 30 38 38 38
58308- 50 50 50 62 62 62 74 74 74 90 90 90
58309-101 98 89 112 100 78 121 87 25 124 80 6
58310-137 92 6 152 99 6 152 99 6 152 99 6
58311-138 86 6 124 80 6 98 70 6 86 66 30
58312-101 98 89 82 82 82 58 58 58 46 46 46
58313- 38 38 38 34 34 34 34 34 34 34 34 34
58314- 34 34 34 34 34 34 34 34 34 34 34 34
58315- 34 34 34 34 34 34 38 38 38 42 42 42
58316- 54 54 54 82 82 82 94 86 76 91 60 6
58317-134 86 6 156 107 11 167 114 7 175 118 6
58318-175 118 6 167 114 7 152 99 6 121 87 25
58319-101 98 89 62 62 62 34 34 34 18 18 18
58320- 6 6 6 0 0 0 0 0 0 0 0 0
58321- 0 0 0 0 0 0 0 0 0 0 0 0
58322- 0 0 0 0 0 0 0 0 0 0 0 0
58323- 0 0 0 0 0 0 0 0 0 0 0 0
58324- 0 0 0 0 0 0 0 0 0 0 0 0
58325- 0 0 0 0 0 0 0 0 0 0 0 0
58326- 0 0 0 0 0 0 0 0 0 0 0 0
58327- 0 0 0 6 6 6 6 6 6 10 10 10
58328- 18 18 18 22 22 22 30 30 30 42 42 42
58329- 50 50 50 66 66 66 86 86 86 101 98 89
58330-106 86 58 98 70 6 104 69 6 104 69 6
58331-104 69 6 91 60 6 82 62 34 90 90 90
58332- 62 62 62 38 38 38 22 22 22 14 14 14
58333- 10 10 10 10 10 10 10 10 10 10 10 10
58334- 10 10 10 10 10 10 6 6 6 10 10 10
58335- 10 10 10 10 10 10 10 10 10 14 14 14
58336- 22 22 22 42 42 42 70 70 70 89 81 66
58337- 80 54 7 104 69 6 124 80 6 137 92 6
58338-134 86 6 116 81 8 100 82 52 86 86 86
58339- 58 58 58 30 30 30 14 14 14 6 6 6
58340- 0 0 0 0 0 0 0 0 0 0 0 0
58341- 0 0 0 0 0 0 0 0 0 0 0 0
58342- 0 0 0 0 0 0 0 0 0 0 0 0
58343- 0 0 0 0 0 0 0 0 0 0 0 0
58344- 0 0 0 0 0 0 0 0 0 0 0 0
58345- 0 0 0 0 0 0 0 0 0 0 0 0
58346- 0 0 0 0 0 0 0 0 0 0 0 0
58347- 0 0 0 0 0 0 0 0 0 0 0 0
58348- 0 0 0 6 6 6 10 10 10 14 14 14
58349- 18 18 18 26 26 26 38 38 38 54 54 54
58350- 70 70 70 86 86 86 94 86 76 89 81 66
58351- 89 81 66 86 86 86 74 74 74 50 50 50
58352- 30 30 30 14 14 14 6 6 6 0 0 0
58353- 0 0 0 0 0 0 0 0 0 0 0 0
58354- 0 0 0 0 0 0 0 0 0 0 0 0
58355- 0 0 0 0 0 0 0 0 0 0 0 0
58356- 6 6 6 18 18 18 34 34 34 58 58 58
58357- 82 82 82 89 81 66 89 81 66 89 81 66
58358- 94 86 66 94 86 76 74 74 74 50 50 50
58359- 26 26 26 14 14 14 6 6 6 0 0 0
58360- 0 0 0 0 0 0 0 0 0 0 0 0
58361- 0 0 0 0 0 0 0 0 0 0 0 0
58362- 0 0 0 0 0 0 0 0 0 0 0 0
58363- 0 0 0 0 0 0 0 0 0 0 0 0
58364- 0 0 0 0 0 0 0 0 0 0 0 0
58365- 0 0 0 0 0 0 0 0 0 0 0 0
58366- 0 0 0 0 0 0 0 0 0 0 0 0
58367- 0 0 0 0 0 0 0 0 0 0 0 0
58368- 0 0 0 0 0 0 0 0 0 0 0 0
58369- 6 6 6 6 6 6 14 14 14 18 18 18
58370- 30 30 30 38 38 38 46 46 46 54 54 54
58371- 50 50 50 42 42 42 30 30 30 18 18 18
58372- 10 10 10 0 0 0 0 0 0 0 0 0
58373- 0 0 0 0 0 0 0 0 0 0 0 0
58374- 0 0 0 0 0 0 0 0 0 0 0 0
58375- 0 0 0 0 0 0 0 0 0 0 0 0
58376- 0 0 0 6 6 6 14 14 14 26 26 26
58377- 38 38 38 50 50 50 58 58 58 58 58 58
58378- 54 54 54 42 42 42 30 30 30 18 18 18
58379- 10 10 10 0 0 0 0 0 0 0 0 0
58380- 0 0 0 0 0 0 0 0 0 0 0 0
58381- 0 0 0 0 0 0 0 0 0 0 0 0
58382- 0 0 0 0 0 0 0 0 0 0 0 0
58383- 0 0 0 0 0 0 0 0 0 0 0 0
58384- 0 0 0 0 0 0 0 0 0 0 0 0
58385- 0 0 0 0 0 0 0 0 0 0 0 0
58386- 0 0 0 0 0 0 0 0 0 0 0 0
58387- 0 0 0 0 0 0 0 0 0 0 0 0
58388- 0 0 0 0 0 0 0 0 0 0 0 0
58389- 0 0 0 0 0 0 0 0 0 6 6 6
58390- 6 6 6 10 10 10 14 14 14 18 18 18
58391- 18 18 18 14 14 14 10 10 10 6 6 6
58392- 0 0 0 0 0 0 0 0 0 0 0 0
58393- 0 0 0 0 0 0 0 0 0 0 0 0
58394- 0 0 0 0 0 0 0 0 0 0 0 0
58395- 0 0 0 0 0 0 0 0 0 0 0 0
58396- 0 0 0 0 0 0 0 0 0 6 6 6
58397- 14 14 14 18 18 18 22 22 22 22 22 22
58398- 18 18 18 14 14 14 10 10 10 6 6 6
58399- 0 0 0 0 0 0 0 0 0 0 0 0
58400- 0 0 0 0 0 0 0 0 0 0 0 0
58401- 0 0 0 0 0 0 0 0 0 0 0 0
58402- 0 0 0 0 0 0 0 0 0 0 0 0
58403- 0 0 0 0 0 0 0 0 0 0 0 0
58404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58409+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58410+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58411+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58412+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58413+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58414+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58416+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58417+4 4 4 4 4 4
58418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58423+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58424+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58425+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58426+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58428+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58431+4 4 4 4 4 4
58432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58437+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58438+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58439+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58440+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58441+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58445+4 4 4 4 4 4
58446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58451+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58453+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58454+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58455+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58456+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58459+4 4 4 4 4 4
58460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58465+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58466+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58467+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58468+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58469+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58470+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58472+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58473+4 4 4 4 4 4
58474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58476+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58477+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58478+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58479+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58480+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58481+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58482+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58483+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58484+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58486+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58487+4 4 4 4 4 4
58488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58490+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58491+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58492+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
58493+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
58494+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58495+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58496+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58497+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
58498+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58499+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
58500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58501+4 4 4 4 4 4
58502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58504+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58505+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58506+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
58507+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
58508+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58509+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58510+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58511+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
58512+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
58513+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
58514+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58515+4 4 4 4 4 4
58516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58517+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58518+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58519+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58520+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
58521+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
58522+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58523+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58524+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58525+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
58526+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
58527+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
58528+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
58529+4 4 4 4 4 4
58530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58531+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58532+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58533+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
58534+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
58535+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
58536+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
58537+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58538+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
58539+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
58540+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
58541+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
58542+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
58543+4 4 4 4 4 4
58544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58545+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58546+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58547+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
58548+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
58549+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
58550+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
58551+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58552+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
58553+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
58554+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
58555+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
58556+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
58557+4 4 4 4 4 4
58558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58559+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58560+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58561+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
58562+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
58563+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
58564+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
58565+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
58566+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
58567+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
58568+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
58569+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
58570+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
58571+4 4 4 4 4 4
58572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58574+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
58575+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
58576+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
58577+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
58578+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
58579+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
58580+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
58581+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
58582+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
58583+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
58584+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
58585+4 4 4 4 4 4
58586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58587+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58588+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
58589+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
58590+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
58591+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
58592+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
58593+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
58594+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
58595+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
58596+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
58597+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
58598+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
58599+4 4 4 4 4 4
58600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58602+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
58603+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
58604+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
58605+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
58606+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
58607+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
58608+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
58609+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
58610+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
58611+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
58612+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58613+4 4 4 4 4 4
58614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58615+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58616+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
58617+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
58618+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
58619+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
58620+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
58621+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
58622+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
58623+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
58624+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
58625+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
58626+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
58627+4 4 4 4 4 4
58628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58629+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
58630+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
58631+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
58632+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
58633+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
58634+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
58635+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
58636+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
58637+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
58638+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
58639+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
58640+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
58641+4 4 4 4 4 4
58642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58643+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
58644+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
58645+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
58646+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
58647+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
58648+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
58649+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
58650+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
58651+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
58652+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
58653+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
58654+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
58655+0 0 0 4 4 4
58656+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
58657+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
58658+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
58659+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
58660+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
58661+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
58662+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
58663+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
58664+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
58665+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
58666+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
58667+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
58668+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
58669+2 0 0 0 0 0
58670+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
58671+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
58672+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
58673+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
58674+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
58675+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
58676+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
58677+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
58678+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
58679+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
58680+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
58681+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
58682+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
58683+37 38 37 0 0 0
58684+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
58685+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
58686+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
58687+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
58688+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
58689+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
58690+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
58691+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
58692+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
58693+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
58694+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
58695+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
58696+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
58697+85 115 134 4 0 0
58698+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
58699+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
58700+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
58701+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
58702+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
58703+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
58704+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
58705+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
58706+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
58707+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
58708+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
58709+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
58710+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
58711+60 73 81 4 0 0
58712+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
58713+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
58714+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
58715+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
58716+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
58717+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
58718+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
58719+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
58720+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
58721+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
58722+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
58723+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
58724+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
58725+16 19 21 4 0 0
58726+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
58727+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
58728+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
58729+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
58730+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
58731+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
58732+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
58733+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
58734+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
58735+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
58736+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
58737+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
58738+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
58739+4 0 0 4 3 3
58740+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
58741+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
58742+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
58743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
58744+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
58745+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
58746+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
58747+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
58748+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
58749+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
58750+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
58751+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
58752+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
58753+3 2 2 4 4 4
58754+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
58755+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
58756+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
58757+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
58758+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
58759+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
58760+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
58761+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
58762+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
58763+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
58764+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
58765+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
58766+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
58767+4 4 4 4 4 4
58768+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
58769+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
58770+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
58771+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
58772+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
58773+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
58774+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
58775+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
58776+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
58777+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
58778+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
58779+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
58780+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
58781+4 4 4 4 4 4
58782+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
58783+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
58784+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
58785+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
58786+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
58787+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
58788+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
58789+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
58790+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
58791+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
58792+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
58793+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
58794+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
58795+5 5 5 5 5 5
58796+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
58797+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
58798+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
58799+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
58800+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
58801+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58802+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
58803+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
58804+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
58805+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
58806+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
58807+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
58808+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
58809+5 5 5 4 4 4
58810+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
58811+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
58812+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
58813+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
58814+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58815+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
58816+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
58817+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
58818+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
58819+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
58820+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
58821+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
58822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58823+4 4 4 4 4 4
58824+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
58825+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
58826+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
58827+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
58828+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
58829+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58830+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58831+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
58832+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
58833+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
58834+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
58835+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
58836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58837+4 4 4 4 4 4
58838+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
58839+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
58840+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
58841+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
58842+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58843+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
58844+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
58845+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
58846+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
58847+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
58848+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
58849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58851+4 4 4 4 4 4
58852+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
58853+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
58854+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
58855+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
58856+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58857+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58858+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58859+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
58860+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
58861+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
58862+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
58863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58865+4 4 4 4 4 4
58866+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
58867+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
58868+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
58869+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
58870+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58871+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
58872+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
58873+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
58874+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
58875+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
58876+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58879+4 4 4 4 4 4
58880+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
58881+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
58882+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
58883+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
58884+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58885+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
58886+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
58887+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
58888+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
58889+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
58890+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
58891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58893+4 4 4 4 4 4
58894+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
58895+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
58896+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
58897+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
58898+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58899+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
58900+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
58901+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
58902+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
58903+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
58904+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
58905+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58907+4 4 4 4 4 4
58908+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
58909+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
58910+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
58911+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
58912+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
58913+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
58914+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
58915+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
58916+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
58917+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
58918+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58919+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58921+4 4 4 4 4 4
58922+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
58923+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
58924+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
58925+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
58926+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58927+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
58928+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
58929+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
58930+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
58931+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
58932+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58933+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58935+4 4 4 4 4 4
58936+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
58937+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
58938+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
58939+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
58940+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58941+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
58942+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
58943+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
58944+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
58945+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
58946+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58947+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58949+4 4 4 4 4 4
58950+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
58951+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
58952+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
58953+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
58954+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58955+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
58956+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
58957+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
58958+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
58959+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58960+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58961+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58963+4 4 4 4 4 4
58964+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
58965+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
58966+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
58967+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
58968+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
58969+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
58970+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
58971+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
58972+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58973+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58974+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58977+4 4 4 4 4 4
58978+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
58979+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
58980+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
58981+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
58982+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58983+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
58984+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
58985+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
58986+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
58987+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58988+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58991+4 4 4 4 4 4
58992+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
58993+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
58994+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
58995+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
58996+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
58997+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
58998+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
58999+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
59000+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
59001+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59002+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59005+4 4 4 4 4 4
59006+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
59007+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
59008+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
59009+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
59010+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
59011+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
59012+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
59013+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
59014+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
59015+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59016+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59019+4 4 4 4 4 4
59020+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
59021+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
59022+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
59023+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
59024+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
59025+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
59026+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
59027+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
59028+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
59029+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59030+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59033+4 4 4 4 4 4
59034+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
59035+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
59036+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
59037+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
59038+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
59039+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
59040+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
59041+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
59042+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
59043+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59044+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59047+4 4 4 4 4 4
59048+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
59049+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
59050+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
59051+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
59052+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
59053+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
59054+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
59055+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
59056+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
59057+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59058+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59061+4 4 4 4 4 4
59062+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
59063+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
59064+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
59065+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
59066+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
59067+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
59068+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
59069+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
59070+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
59071+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59072+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59075+4 4 4 4 4 4
59076+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
59077+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
59078+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
59079+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
59080+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
59081+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
59082+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
59083+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
59084+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
59085+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59086+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59089+4 4 4 4 4 4
59090+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
59091+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
59092+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
59093+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
59094+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
59095+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
59096+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
59097+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
59098+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
59099+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59100+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59103+4 4 4 4 4 4
59104+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
59105+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
59106+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
59107+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
59108+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
59109+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
59110+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
59111+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
59112+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
59113+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59114+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59117+4 4 4 4 4 4
59118+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
59119+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
59120+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
59121+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
59122+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
59123+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
59124+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
59125+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
59126+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
59127+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59128+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59131+4 4 4 4 4 4
59132+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
59133+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
59134+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
59135+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
59136+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
59137+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
59138+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
59139+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
59140+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
59141+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59142+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59145+4 4 4 4 4 4
59146+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
59147+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
59148+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
59149+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
59150+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
59151+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
59152+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
59153+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
59154+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
59155+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59156+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59159+4 4 4 4 4 4
59160+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
59161+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
59162+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
59163+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
59164+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
59165+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
59166+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
59167+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
59168+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
59169+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
59170+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59173+4 4 4 4 4 4
59174+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
59175+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
59176+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
59177+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
59178+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
59179+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
59180+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
59181+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
59182+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
59183+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
59184+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59187+4 4 4 4 4 4
59188+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
59189+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
59190+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
59191+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
59192+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
59193+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
59194+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59195+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
59196+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
59197+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
59198+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59201+4 4 4 4 4 4
59202+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
59203+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
59204+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
59205+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
59206+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
59207+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
59208+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
59209+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
59210+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
59211+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
59212+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59215+4 4 4 4 4 4
59216+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
59217+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
59218+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
59219+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
59220+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
59221+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
59222+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
59223+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
59224+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
59225+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
59226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59229+4 4 4 4 4 4
59230+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
59231+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
59232+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
59233+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
59234+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
59235+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
59236+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
59237+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
59238+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
59239+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
59240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59243+4 4 4 4 4 4
59244+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
59245+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
59246+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
59247+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
59248+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
59249+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
59250+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
59251+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
59252+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
59253+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
59254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59257+4 4 4 4 4 4
59258+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
59259+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
59260+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
59261+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
59262+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
59263+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
59264+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
59265+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
59266+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
59267+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59271+4 4 4 4 4 4
59272+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
59273+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
59274+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
59275+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
59276+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
59277+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
59278+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
59279+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
59280+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
59281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59285+4 4 4 4 4 4
59286+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
59287+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
59288+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
59289+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
59290+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
59291+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
59292+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
59293+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
59294+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
59295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59299+4 4 4 4 4 4
59300+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
59301+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
59302+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
59303+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
59304+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
59305+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
59306+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
59307+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
59308+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59313+4 4 4 4 4 4
59314+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
59315+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
59316+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
59317+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
59318+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
59319+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
59320+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
59321+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
59322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59327+4 4 4 4 4 4
59328+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
59329+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
59330+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
59331+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
59332+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
59333+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
59334+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
59335+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
59336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59340+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59341+4 4 4 4 4 4
59342+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59343+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
59344+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59345+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
59346+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
59347+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
59348+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
59349+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
59350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59353+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59354+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59355+4 4 4 4 4 4
59356+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59357+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
59358+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
59359+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
59360+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
59361+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
59362+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
59363+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
59364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59366+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59367+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59368+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59369+4 4 4 4 4 4
59370+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59371+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
59372+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
59373+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
59374+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
59375+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
59376+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
59377+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59381+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59382+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59383+4 4 4 4 4 4
59384+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59385+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59386+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
59387+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
59388+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
59389+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
59390+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
59391+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59392+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59394+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59395+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59396+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59397+4 4 4 4 4 4
59398+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59399+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59400+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59401+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59402+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
59403+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
59404+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
59405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59409+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59410+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59411+4 4 4 4 4 4
59412+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59413+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59414+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59415+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
59416+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
59417+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
59418+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
59419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59423+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59424+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59425+4 4 4 4 4 4
59426+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59428+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59429+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
59430+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
59431+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
59432+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
59433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59437+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59438+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59439+4 4 4 4 4 4
59440+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59441+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59443+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
59444+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
59445+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
59446+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
59447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59451+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59453+4 4 4 4 4 4
59454+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59455+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59456+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59458+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
59459+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59460+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59465+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59466+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59467+4 4 4 4 4 4
59468+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59469+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59470+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59472+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
59473+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
59474+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
59475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59476+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59477+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59478+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59479+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59480+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59481+4 4 4 4 4 4
59482+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59483+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59484+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59486+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
59487+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
59488+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59490+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59491+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59492+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59493+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59494+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59495+4 4 4 4 4 4
59496+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59500+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
59501+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
59502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59504+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59505+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59507+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59508+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59509+4 4 4 4 4 4
59510+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59511+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59514+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
59515+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
59516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59517+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59518+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59519+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59520+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59521+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59522+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59523+4 4 4 4 4 4
59524diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
59525index fef20db..d28b1ab 100644
59526--- a/drivers/xen/xenfs/xenstored.c
59527+++ b/drivers/xen/xenfs/xenstored.c
59528@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
59529 static int xsd_kva_open(struct inode *inode, struct file *file)
59530 {
59531 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
59532+#ifdef CONFIG_GRKERNSEC_HIDESYM
59533+ NULL);
59534+#else
59535 xen_store_interface);
59536+#endif
59537+
59538 if (!file->private_data)
59539 return -ENOMEM;
59540 return 0;
59541diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
59542index cc1cfae..41158ad 100644
59543--- a/fs/9p/vfs_addr.c
59544+++ b/fs/9p/vfs_addr.c
59545@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
59546
59547 retval = v9fs_file_write_internal(inode,
59548 v9inode->writeback_fid,
59549- (__force const char __user *)buffer,
59550+ (const char __force_user *)buffer,
59551 len, &offset, 0);
59552 if (retval > 0)
59553 retval = 0;
59554diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
59555index 7fa4f7a..a7ebf8c 100644
59556--- a/fs/9p/vfs_inode.c
59557+++ b/fs/9p/vfs_inode.c
59558@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
59559 void
59560 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
59561 {
59562- char *s = nd_get_link(nd);
59563+ const char *s = nd_get_link(nd);
59564
59565 p9_debug(P9_DEBUG_VFS, " %s %s\n",
59566 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
59567diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
59568index 370b24c..ff0be7b 100644
59569--- a/fs/Kconfig.binfmt
59570+++ b/fs/Kconfig.binfmt
59571@@ -103,7 +103,7 @@ config HAVE_AOUT
59572
59573 config BINFMT_AOUT
59574 tristate "Kernel support for a.out and ECOFF binaries"
59575- depends on HAVE_AOUT
59576+ depends on HAVE_AOUT && BROKEN
59577 ---help---
59578 A.out (Assembler.OUTput) is a set of formats for libraries and
59579 executables used in the earliest versions of UNIX. Linux used
59580diff --git a/fs/afs/inode.c b/fs/afs/inode.c
59581index 2946712..f737435 100644
59582--- a/fs/afs/inode.c
59583+++ b/fs/afs/inode.c
59584@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
59585 struct afs_vnode *vnode;
59586 struct super_block *sb;
59587 struct inode *inode;
59588- static atomic_t afs_autocell_ino;
59589+ static atomic_unchecked_t afs_autocell_ino;
59590
59591 _enter("{%x:%u},%*.*s,",
59592 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
59593@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
59594 data.fid.unique = 0;
59595 data.fid.vnode = 0;
59596
59597- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
59598+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
59599 afs_iget5_autocell_test, afs_iget5_set,
59600 &data);
59601 if (!inode) {
59602diff --git a/fs/aio.c b/fs/aio.c
59603index 1c9c5f0..c935d6e 100644
59604--- a/fs/aio.c
59605+++ b/fs/aio.c
59606@@ -141,6 +141,7 @@ struct kioctx {
59607
59608 struct {
59609 unsigned tail;
59610+ unsigned completed_events;
59611 spinlock_t completion_lock;
59612 } ____cacheline_aligned_in_smp;
59613
59614@@ -380,7 +381,7 @@ static int aio_setup_ring(struct kioctx *ctx)
59615 size += sizeof(struct io_event) * nr_events;
59616
59617 nr_pages = PFN_UP(size);
59618- if (nr_pages < 0)
59619+ if (nr_pages <= 0)
59620 return -EINVAL;
59621
59622 file = aio_private_file(ctx, nr_pages);
59623@@ -880,6 +881,68 @@ out:
59624 return ret;
59625 }
59626
59627+/* refill_reqs_available
59628+ * Updates the reqs_available reference counts used for tracking the
59629+ * number of free slots in the completion ring. This can be called
59630+ * from aio_complete() (to optimistically update reqs_available) or
59631+ * from aio_get_req() (the we're out of events case). It must be
59632+ * called holding ctx->completion_lock.
59633+ */
59634+static void refill_reqs_available(struct kioctx *ctx, unsigned head,
59635+ unsigned tail)
59636+{
59637+ unsigned events_in_ring, completed;
59638+
59639+ /* Clamp head since userland can write to it. */
59640+ head %= ctx->nr_events;
59641+ if (head <= tail)
59642+ events_in_ring = tail - head;
59643+ else
59644+ events_in_ring = ctx->nr_events - (head - tail);
59645+
59646+ completed = ctx->completed_events;
59647+ if (events_in_ring < completed)
59648+ completed -= events_in_ring;
59649+ else
59650+ completed = 0;
59651+
59652+ if (!completed)
59653+ return;
59654+
59655+ ctx->completed_events -= completed;
59656+ put_reqs_available(ctx, completed);
59657+}
59658+
59659+/* user_refill_reqs_available
59660+ * Called to refill reqs_available when aio_get_req() encounters an
59661+ * out of space in the completion ring.
59662+ */
59663+static void user_refill_reqs_available(struct kioctx *ctx)
59664+{
59665+ spin_lock_irq(&ctx->completion_lock);
59666+ if (ctx->completed_events) {
59667+ struct aio_ring *ring;
59668+ unsigned head;
59669+
59670+ /* Access of ring->head may race with aio_read_events_ring()
59671+ * here, but that's okay since whether we read the old version
59672+ * or the new version, and either will be valid. The important
59673+ * part is that head cannot pass tail since we prevent
59674+ * aio_complete() from updating tail by holding
59675+ * ctx->completion_lock. Even if head is invalid, the check
59676+ * against ctx->completed_events below will make sure we do the
59677+ * safe/right thing.
59678+ */
59679+ ring = kmap_atomic(ctx->ring_pages[0]);
59680+ head = ring->head;
59681+ kunmap_atomic(ring);
59682+
59683+ refill_reqs_available(ctx, head, ctx->tail);
59684+ }
59685+
59686+ spin_unlock_irq(&ctx->completion_lock);
59687+}
59688+
59689 /* aio_get_req
59690 * Allocate a slot for an aio request.
59691 * Returns NULL if no requests are free.
59692@@ -888,8 +951,11 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx)
59693 {
59694 struct kiocb *req;
59695
59696- if (!get_reqs_available(ctx))
59697- return NULL;
59698+ if (!get_reqs_available(ctx)) {
59699+ user_refill_reqs_available(ctx);
59700+ if (!get_reqs_available(ctx))
59701+ return NULL;
59702+ }
59703
59704 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
59705 if (unlikely(!req))
59706@@ -948,8 +1014,8 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
59707 struct kioctx *ctx = iocb->ki_ctx;
59708 struct aio_ring *ring;
59709 struct io_event *ev_page, *event;
59710+ unsigned tail, pos, head;
59711 unsigned long flags;
59712- unsigned tail, pos;
59713
59714 /*
59715 * Special case handling for sync iocbs:
59716@@ -1010,10 +1076,14 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
59717 ctx->tail = tail;
59718
59719 ring = kmap_atomic(ctx->ring_pages[0]);
59720+ head = ring->head;
59721 ring->tail = tail;
59722 kunmap_atomic(ring);
59723 flush_dcache_page(ctx->ring_pages[0]);
59724
59725+ ctx->completed_events++;
59726+ if (ctx->completed_events > 1)
59727+ refill_reqs_available(ctx, head, tail);
59728 spin_unlock_irqrestore(&ctx->completion_lock, flags);
59729
59730 pr_debug("added to ring %p at [%u]\n", iocb, tail);
59731@@ -1028,7 +1098,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
59732
59733 /* everything turned out well, dispose of the aiocb. */
59734 kiocb_free(iocb);
59735- put_reqs_available(ctx, 1);
59736
59737 /*
59738 * We have to order our ring_info tail store above and test
59739@@ -1065,6 +1134,12 @@ static long aio_read_events_ring(struct kioctx *ctx,
59740 tail = ring->tail;
59741 kunmap_atomic(ring);
59742
59743+ /*
59744+ * Ensure that once we've read the current tail pointer, that
59745+ * we also see the events that were stored up to the tail.
59746+ */
59747+ smp_rmb();
59748+
59749 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
59750
59751 if (head == tail)
59752diff --git a/fs/attr.c b/fs/attr.c
59753index 6530ced..4a827e2 100644
59754--- a/fs/attr.c
59755+++ b/fs/attr.c
59756@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
59757 unsigned long limit;
59758
59759 limit = rlimit(RLIMIT_FSIZE);
59760+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
59761 if (limit != RLIM_INFINITY && offset > limit)
59762 goto out_sig;
59763 if (offset > inode->i_sb->s_maxbytes)
59764diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
59765index 116fd38..c04182da 100644
59766--- a/fs/autofs4/waitq.c
59767+++ b/fs/autofs4/waitq.c
59768@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
59769 {
59770 unsigned long sigpipe, flags;
59771 mm_segment_t fs;
59772- const char *data = (const char *)addr;
59773+ const char __user *data = (const char __force_user *)addr;
59774 ssize_t wr = 0;
59775
59776 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
59777@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
59778 return 1;
59779 }
59780
59781+#ifdef CONFIG_GRKERNSEC_HIDESYM
59782+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
59783+#endif
59784+
59785 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
59786 enum autofs_notify notify)
59787 {
59788@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
59789
59790 /* If this is a direct mount request create a dummy name */
59791 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
59792+#ifdef CONFIG_GRKERNSEC_HIDESYM
59793+ /* this name does get written to userland via autofs4_write() */
59794+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
59795+#else
59796 qstr.len = sprintf(name, "%p", dentry);
59797+#endif
59798 else {
59799 qstr.len = autofs4_getpath(sbi, dentry, &name);
59800 if (!qstr.len) {
59801diff --git a/fs/befs/endian.h b/fs/befs/endian.h
59802index 2722387..56059b5 100644
59803--- a/fs/befs/endian.h
59804+++ b/fs/befs/endian.h
59805@@ -11,7 +11,7 @@
59806
59807 #include <asm/byteorder.h>
59808
59809-static inline u64
59810+static inline u64 __intentional_overflow(-1)
59811 fs64_to_cpu(const struct super_block *sb, fs64 n)
59812 {
59813 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
59814@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
59815 return (__force fs64)cpu_to_be64(n);
59816 }
59817
59818-static inline u32
59819+static inline u32 __intentional_overflow(-1)
59820 fs32_to_cpu(const struct super_block *sb, fs32 n)
59821 {
59822 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
59823@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
59824 return (__force fs32)cpu_to_be32(n);
59825 }
59826
59827-static inline u16
59828+static inline u16 __intentional_overflow(-1)
59829 fs16_to_cpu(const struct super_block *sb, fs16 n)
59830 {
59831 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
59832diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
59833index ca0ba15..0fa3257 100644
59834--- a/fs/binfmt_aout.c
59835+++ b/fs/binfmt_aout.c
59836@@ -16,6 +16,7 @@
59837 #include <linux/string.h>
59838 #include <linux/fs.h>
59839 #include <linux/file.h>
59840+#include <linux/security.h>
59841 #include <linux/stat.h>
59842 #include <linux/fcntl.h>
59843 #include <linux/ptrace.h>
59844@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
59845 #endif
59846 # define START_STACK(u) ((void __user *)u.start_stack)
59847
59848+ memset(&dump, 0, sizeof(dump));
59849+
59850 fs = get_fs();
59851 set_fs(KERNEL_DS);
59852 has_dumped = 1;
59853@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
59854
59855 /* If the size of the dump file exceeds the rlimit, then see what would happen
59856 if we wrote the stack, but not the data area. */
59857+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
59858 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
59859 dump.u_dsize = 0;
59860
59861 /* Make sure we have enough room to write the stack and data areas. */
59862+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
59863 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
59864 dump.u_ssize = 0;
59865
59866@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
59867 rlim = rlimit(RLIMIT_DATA);
59868 if (rlim >= RLIM_INFINITY)
59869 rlim = ~0;
59870+
59871+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
59872 if (ex.a_data + ex.a_bss > rlim)
59873 return -ENOMEM;
59874
59875@@ -264,6 +271,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
59876
59877 install_exec_creds(bprm);
59878
59879+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59880+ current->mm->pax_flags = 0UL;
59881+#endif
59882+
59883+#ifdef CONFIG_PAX_PAGEEXEC
59884+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
59885+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
59886+
59887+#ifdef CONFIG_PAX_EMUTRAMP
59888+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
59889+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
59890+#endif
59891+
59892+#ifdef CONFIG_PAX_MPROTECT
59893+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
59894+ current->mm->pax_flags |= MF_PAX_MPROTECT;
59895+#endif
59896+
59897+ }
59898+#endif
59899+
59900 if (N_MAGIC(ex) == OMAGIC) {
59901 unsigned long text_addr, map_size;
59902 loff_t pos;
59903@@ -321,7 +349,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
59904 }
59905
59906 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
59907- PROT_READ | PROT_WRITE | PROT_EXEC,
59908+ PROT_READ | PROT_WRITE,
59909 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
59910 fd_offset + ex.a_text);
59911 if (error != N_DATADDR(ex)) {
59912diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
59913index 3892c1a..4e27c04 100644
59914--- a/fs/binfmt_elf.c
59915+++ b/fs/binfmt_elf.c
59916@@ -34,6 +34,7 @@
59917 #include <linux/utsname.h>
59918 #include <linux/coredump.h>
59919 #include <linux/sched.h>
59920+#include <linux/xattr.h>
59921 #include <asm/uaccess.h>
59922 #include <asm/param.h>
59923 #include <asm/page.h>
59924@@ -47,7 +48,7 @@
59925
59926 static int load_elf_binary(struct linux_binprm *bprm);
59927 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
59928- int, int, unsigned long);
59929+ int, int, unsigned long) __intentional_overflow(-1);
59930
59931 #ifdef CONFIG_USELIB
59932 static int load_elf_library(struct file *);
59933@@ -65,6 +66,14 @@ static int elf_core_dump(struct coredump_params *cprm);
59934 #define elf_core_dump NULL
59935 #endif
59936
59937+#ifdef CONFIG_PAX_MPROTECT
59938+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
59939+#endif
59940+
59941+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59942+static void elf_handle_mmap(struct file *file);
59943+#endif
59944+
59945 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
59946 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
59947 #else
59948@@ -84,6 +93,15 @@ static struct linux_binfmt elf_format = {
59949 .load_binary = load_elf_binary,
59950 .load_shlib = load_elf_library,
59951 .core_dump = elf_core_dump,
59952+
59953+#ifdef CONFIG_PAX_MPROTECT
59954+ .handle_mprotect= elf_handle_mprotect,
59955+#endif
59956+
59957+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59958+ .handle_mmap = elf_handle_mmap,
59959+#endif
59960+
59961 .min_coredump = ELF_EXEC_PAGESIZE,
59962 };
59963
59964@@ -91,6 +109,8 @@ static struct linux_binfmt elf_format = {
59965
59966 static int set_brk(unsigned long start, unsigned long end)
59967 {
59968+ unsigned long e = end;
59969+
59970 start = ELF_PAGEALIGN(start);
59971 end = ELF_PAGEALIGN(end);
59972 if (end > start) {
59973@@ -99,7 +119,7 @@ static int set_brk(unsigned long start, unsigned long end)
59974 if (BAD_ADDR(addr))
59975 return addr;
59976 }
59977- current->mm->start_brk = current->mm->brk = end;
59978+ current->mm->start_brk = current->mm->brk = e;
59979 return 0;
59980 }
59981
59982@@ -160,12 +180,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
59983 elf_addr_t __user *u_rand_bytes;
59984 const char *k_platform = ELF_PLATFORM;
59985 const char *k_base_platform = ELF_BASE_PLATFORM;
59986- unsigned char k_rand_bytes[16];
59987+ u32 k_rand_bytes[4];
59988 int items;
59989 elf_addr_t *elf_info;
59990 int ei_index = 0;
59991 const struct cred *cred = current_cred();
59992 struct vm_area_struct *vma;
59993+ unsigned long saved_auxv[AT_VECTOR_SIZE];
59994
59995 /*
59996 * In some cases (e.g. Hyper-Threading), we want to avoid L1
59997@@ -207,8 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
59998 * Generate 16 random bytes for userspace PRNG seeding.
59999 */
60000 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
60001- u_rand_bytes = (elf_addr_t __user *)
60002- STACK_ALLOC(p, sizeof(k_rand_bytes));
60003+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
60004+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
60005+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
60006+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
60007+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
60008+ u_rand_bytes = (elf_addr_t __user *) p;
60009 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
60010 return -EFAULT;
60011
60012@@ -323,9 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
60013 return -EFAULT;
60014 current->mm->env_end = p;
60015
60016+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
60017+
60018 /* Put the elf_info on the stack in the right place. */
60019 sp = (elf_addr_t __user *)envp + 1;
60020- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
60021+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
60022 return -EFAULT;
60023 return 0;
60024 }
60025@@ -393,15 +420,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
60026 an ELF header */
60027
60028 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
60029- struct file *interpreter, unsigned long *interp_map_addr,
60030- unsigned long no_base)
60031+ struct file *interpreter, unsigned long no_base)
60032 {
60033 struct elf_phdr *elf_phdata;
60034 struct elf_phdr *eppnt;
60035- unsigned long load_addr = 0;
60036+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
60037 int load_addr_set = 0;
60038 unsigned long last_bss = 0, elf_bss = 0;
60039- unsigned long error = ~0UL;
60040+ unsigned long error = -EINVAL;
60041 unsigned long total_size;
60042 int retval, i, size;
60043
60044@@ -447,6 +473,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
60045 goto out_close;
60046 }
60047
60048+#ifdef CONFIG_PAX_SEGMEXEC
60049+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
60050+ pax_task_size = SEGMEXEC_TASK_SIZE;
60051+#endif
60052+
60053 eppnt = elf_phdata;
60054 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
60055 if (eppnt->p_type == PT_LOAD) {
60056@@ -470,8 +501,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
60057 map_addr = elf_map(interpreter, load_addr + vaddr,
60058 eppnt, elf_prot, elf_type, total_size);
60059 total_size = 0;
60060- if (!*interp_map_addr)
60061- *interp_map_addr = map_addr;
60062 error = map_addr;
60063 if (BAD_ADDR(map_addr))
60064 goto out_close;
60065@@ -490,8 +519,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
60066 k = load_addr + eppnt->p_vaddr;
60067 if (BAD_ADDR(k) ||
60068 eppnt->p_filesz > eppnt->p_memsz ||
60069- eppnt->p_memsz > TASK_SIZE ||
60070- TASK_SIZE - eppnt->p_memsz < k) {
60071+ eppnt->p_memsz > pax_task_size ||
60072+ pax_task_size - eppnt->p_memsz < k) {
60073 error = -ENOMEM;
60074 goto out_close;
60075 }
60076@@ -530,9 +559,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
60077 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
60078
60079 /* Map the last of the bss segment */
60080- error = vm_brk(elf_bss, last_bss - elf_bss);
60081- if (BAD_ADDR(error))
60082- goto out_close;
60083+ if (last_bss > elf_bss) {
60084+ error = vm_brk(elf_bss, last_bss - elf_bss);
60085+ if (BAD_ADDR(error))
60086+ goto out_close;
60087+ }
60088 }
60089
60090 error = load_addr;
60091@@ -543,6 +574,336 @@ out:
60092 return error;
60093 }
60094
60095+#ifdef CONFIG_PAX_PT_PAX_FLAGS
60096+#ifdef CONFIG_PAX_SOFTMODE
60097+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
60098+{
60099+ unsigned long pax_flags = 0UL;
60100+
60101+#ifdef CONFIG_PAX_PAGEEXEC
60102+ if (elf_phdata->p_flags & PF_PAGEEXEC)
60103+ pax_flags |= MF_PAX_PAGEEXEC;
60104+#endif
60105+
60106+#ifdef CONFIG_PAX_SEGMEXEC
60107+ if (elf_phdata->p_flags & PF_SEGMEXEC)
60108+ pax_flags |= MF_PAX_SEGMEXEC;
60109+#endif
60110+
60111+#ifdef CONFIG_PAX_EMUTRAMP
60112+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
60113+ pax_flags |= MF_PAX_EMUTRAMP;
60114+#endif
60115+
60116+#ifdef CONFIG_PAX_MPROTECT
60117+ if (elf_phdata->p_flags & PF_MPROTECT)
60118+ pax_flags |= MF_PAX_MPROTECT;
60119+#endif
60120+
60121+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
60122+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
60123+ pax_flags |= MF_PAX_RANDMMAP;
60124+#endif
60125+
60126+ return pax_flags;
60127+}
60128+#endif
60129+
60130+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
60131+{
60132+ unsigned long pax_flags = 0UL;
60133+
60134+#ifdef CONFIG_PAX_PAGEEXEC
60135+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
60136+ pax_flags |= MF_PAX_PAGEEXEC;
60137+#endif
60138+
60139+#ifdef CONFIG_PAX_SEGMEXEC
60140+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
60141+ pax_flags |= MF_PAX_SEGMEXEC;
60142+#endif
60143+
60144+#ifdef CONFIG_PAX_EMUTRAMP
60145+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
60146+ pax_flags |= MF_PAX_EMUTRAMP;
60147+#endif
60148+
60149+#ifdef CONFIG_PAX_MPROTECT
60150+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
60151+ pax_flags |= MF_PAX_MPROTECT;
60152+#endif
60153+
60154+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
60155+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
60156+ pax_flags |= MF_PAX_RANDMMAP;
60157+#endif
60158+
60159+ return pax_flags;
60160+}
60161+#endif
60162+
60163+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
60164+#ifdef CONFIG_PAX_SOFTMODE
60165+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
60166+{
60167+ unsigned long pax_flags = 0UL;
60168+
60169+#ifdef CONFIG_PAX_PAGEEXEC
60170+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
60171+ pax_flags |= MF_PAX_PAGEEXEC;
60172+#endif
60173+
60174+#ifdef CONFIG_PAX_SEGMEXEC
60175+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
60176+ pax_flags |= MF_PAX_SEGMEXEC;
60177+#endif
60178+
60179+#ifdef CONFIG_PAX_EMUTRAMP
60180+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
60181+ pax_flags |= MF_PAX_EMUTRAMP;
60182+#endif
60183+
60184+#ifdef CONFIG_PAX_MPROTECT
60185+ if (pax_flags_softmode & MF_PAX_MPROTECT)
60186+ pax_flags |= MF_PAX_MPROTECT;
60187+#endif
60188+
60189+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
60190+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
60191+ pax_flags |= MF_PAX_RANDMMAP;
60192+#endif
60193+
60194+ return pax_flags;
60195+}
60196+#endif
60197+
60198+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
60199+{
60200+ unsigned long pax_flags = 0UL;
60201+
60202+#ifdef CONFIG_PAX_PAGEEXEC
60203+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
60204+ pax_flags |= MF_PAX_PAGEEXEC;
60205+#endif
60206+
60207+#ifdef CONFIG_PAX_SEGMEXEC
60208+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
60209+ pax_flags |= MF_PAX_SEGMEXEC;
60210+#endif
60211+
60212+#ifdef CONFIG_PAX_EMUTRAMP
60213+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
60214+ pax_flags |= MF_PAX_EMUTRAMP;
60215+#endif
60216+
60217+#ifdef CONFIG_PAX_MPROTECT
60218+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
60219+ pax_flags |= MF_PAX_MPROTECT;
60220+#endif
60221+
60222+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
60223+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
60224+ pax_flags |= MF_PAX_RANDMMAP;
60225+#endif
60226+
60227+ return pax_flags;
60228+}
60229+#endif
60230+
60231+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60232+static unsigned long pax_parse_defaults(void)
60233+{
60234+ unsigned long pax_flags = 0UL;
60235+
60236+#ifdef CONFIG_PAX_SOFTMODE
60237+ if (pax_softmode)
60238+ return pax_flags;
60239+#endif
60240+
60241+#ifdef CONFIG_PAX_PAGEEXEC
60242+ pax_flags |= MF_PAX_PAGEEXEC;
60243+#endif
60244+
60245+#ifdef CONFIG_PAX_SEGMEXEC
60246+ pax_flags |= MF_PAX_SEGMEXEC;
60247+#endif
60248+
60249+#ifdef CONFIG_PAX_MPROTECT
60250+ pax_flags |= MF_PAX_MPROTECT;
60251+#endif
60252+
60253+#ifdef CONFIG_PAX_RANDMMAP
60254+ if (randomize_va_space)
60255+ pax_flags |= MF_PAX_RANDMMAP;
60256+#endif
60257+
60258+ return pax_flags;
60259+}
60260+
60261+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
60262+{
60263+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
60264+
60265+#ifdef CONFIG_PAX_EI_PAX
60266+
60267+#ifdef CONFIG_PAX_SOFTMODE
60268+ if (pax_softmode)
60269+ return pax_flags;
60270+#endif
60271+
60272+ pax_flags = 0UL;
60273+
60274+#ifdef CONFIG_PAX_PAGEEXEC
60275+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
60276+ pax_flags |= MF_PAX_PAGEEXEC;
60277+#endif
60278+
60279+#ifdef CONFIG_PAX_SEGMEXEC
60280+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
60281+ pax_flags |= MF_PAX_SEGMEXEC;
60282+#endif
60283+
60284+#ifdef CONFIG_PAX_EMUTRAMP
60285+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
60286+ pax_flags |= MF_PAX_EMUTRAMP;
60287+#endif
60288+
60289+#ifdef CONFIG_PAX_MPROTECT
60290+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
60291+ pax_flags |= MF_PAX_MPROTECT;
60292+#endif
60293+
60294+#ifdef CONFIG_PAX_ASLR
60295+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
60296+ pax_flags |= MF_PAX_RANDMMAP;
60297+#endif
60298+
60299+#endif
60300+
60301+ return pax_flags;
60302+
60303+}
60304+
60305+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
60306+{
60307+
60308+#ifdef CONFIG_PAX_PT_PAX_FLAGS
60309+ unsigned long i;
60310+
60311+ for (i = 0UL; i < elf_ex->e_phnum; i++)
60312+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
60313+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
60314+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
60315+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
60316+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
60317+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
60318+ return PAX_PARSE_FLAGS_FALLBACK;
60319+
60320+#ifdef CONFIG_PAX_SOFTMODE
60321+ if (pax_softmode)
60322+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
60323+ else
60324+#endif
60325+
60326+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
60327+ break;
60328+ }
60329+#endif
60330+
60331+ return PAX_PARSE_FLAGS_FALLBACK;
60332+}
60333+
60334+static unsigned long pax_parse_xattr_pax(struct file * const file)
60335+{
60336+
60337+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
60338+ ssize_t xattr_size, i;
60339+ unsigned char xattr_value[sizeof("pemrs") - 1];
60340+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
60341+
60342+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
60343+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
60344+ return PAX_PARSE_FLAGS_FALLBACK;
60345+
60346+ for (i = 0; i < xattr_size; i++)
60347+ switch (xattr_value[i]) {
60348+ default:
60349+ return PAX_PARSE_FLAGS_FALLBACK;
60350+
60351+#define parse_flag(option1, option2, flag) \
60352+ case option1: \
60353+ if (pax_flags_hardmode & MF_PAX_##flag) \
60354+ return PAX_PARSE_FLAGS_FALLBACK;\
60355+ pax_flags_hardmode |= MF_PAX_##flag; \
60356+ break; \
60357+ case option2: \
60358+ if (pax_flags_softmode & MF_PAX_##flag) \
60359+ return PAX_PARSE_FLAGS_FALLBACK;\
60360+ pax_flags_softmode |= MF_PAX_##flag; \
60361+ break;
60362+
60363+ parse_flag('p', 'P', PAGEEXEC);
60364+ parse_flag('e', 'E', EMUTRAMP);
60365+ parse_flag('m', 'M', MPROTECT);
60366+ parse_flag('r', 'R', RANDMMAP);
60367+ parse_flag('s', 'S', SEGMEXEC);
60368+
60369+#undef parse_flag
60370+ }
60371+
60372+ if (pax_flags_hardmode & pax_flags_softmode)
60373+ return PAX_PARSE_FLAGS_FALLBACK;
60374+
60375+#ifdef CONFIG_PAX_SOFTMODE
60376+ if (pax_softmode)
60377+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
60378+ else
60379+#endif
60380+
60381+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
60382+#else
60383+ return PAX_PARSE_FLAGS_FALLBACK;
60384+#endif
60385+
60386+}
60387+
60388+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
60389+{
60390+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
60391+
60392+ pax_flags = pax_parse_defaults();
60393+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
60394+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
60395+ xattr_pax_flags = pax_parse_xattr_pax(file);
60396+
60397+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
60398+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
60399+ pt_pax_flags != xattr_pax_flags)
60400+ return -EINVAL;
60401+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
60402+ pax_flags = xattr_pax_flags;
60403+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
60404+ pax_flags = pt_pax_flags;
60405+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
60406+ pax_flags = ei_pax_flags;
60407+
60408+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
60409+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
60410+ if ((__supported_pte_mask & _PAGE_NX))
60411+ pax_flags &= ~MF_PAX_SEGMEXEC;
60412+ else
60413+ pax_flags &= ~MF_PAX_PAGEEXEC;
60414+ }
60415+#endif
60416+
60417+ if (0 > pax_check_flags(&pax_flags))
60418+ return -EINVAL;
60419+
60420+ current->mm->pax_flags = pax_flags;
60421+ return 0;
60422+}
60423+#endif
60424+
60425 /*
60426 * These are the functions used to load ELF style executables and shared
60427 * libraries. There is no binary dependent code anywhere else.
60428@@ -556,6 +917,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
60429 {
60430 unsigned int random_variable = 0;
60431
60432+#ifdef CONFIG_PAX_RANDUSTACK
60433+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
60434+ return stack_top - current->mm->delta_stack;
60435+#endif
60436+
60437 if ((current->flags & PF_RANDOMIZE) &&
60438 !(current->personality & ADDR_NO_RANDOMIZE)) {
60439 random_variable = get_random_int() & STACK_RND_MASK;
60440@@ -574,7 +940,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
60441 unsigned long load_addr = 0, load_bias = 0;
60442 int load_addr_set = 0;
60443 char * elf_interpreter = NULL;
60444- unsigned long error;
60445+ unsigned long error = 0;
60446 struct elf_phdr *elf_ppnt, *elf_phdata;
60447 unsigned long elf_bss, elf_brk;
60448 int retval, i;
60449@@ -589,6 +955,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
60450 struct elfhdr elf_ex;
60451 struct elfhdr interp_elf_ex;
60452 } *loc;
60453+ unsigned long pax_task_size;
60454
60455 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
60456 if (!loc) {
60457@@ -726,6 +1093,77 @@ static int load_elf_binary(struct linux_binprm *bprm)
60458 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
60459 may depend on the personality. */
60460 SET_PERSONALITY(loc->elf_ex);
60461+
60462+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60463+ current->mm->pax_flags = 0UL;
60464+#endif
60465+
60466+#ifdef CONFIG_PAX_DLRESOLVE
60467+ current->mm->call_dl_resolve = 0UL;
60468+#endif
60469+
60470+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
60471+ current->mm->call_syscall = 0UL;
60472+#endif
60473+
60474+#ifdef CONFIG_PAX_ASLR
60475+ current->mm->delta_mmap = 0UL;
60476+ current->mm->delta_stack = 0UL;
60477+#endif
60478+
60479+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60480+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
60481+ send_sig(SIGKILL, current, 0);
60482+ goto out_free_dentry;
60483+ }
60484+#endif
60485+
60486+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60487+ pax_set_initial_flags(bprm);
60488+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60489+ if (pax_set_initial_flags_func)
60490+ (pax_set_initial_flags_func)(bprm);
60491+#endif
60492+
60493+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
60494+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
60495+ current->mm->context.user_cs_limit = PAGE_SIZE;
60496+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
60497+ }
60498+#endif
60499+
60500+#ifdef CONFIG_PAX_SEGMEXEC
60501+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
60502+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
60503+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
60504+ pax_task_size = SEGMEXEC_TASK_SIZE;
60505+ current->mm->def_flags |= VM_NOHUGEPAGE;
60506+ } else
60507+#endif
60508+
60509+ pax_task_size = TASK_SIZE;
60510+
60511+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
60512+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
60513+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
60514+ put_cpu();
60515+ }
60516+#endif
60517+
60518+#ifdef CONFIG_PAX_ASLR
60519+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
60520+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
60521+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
60522+ }
60523+#endif
60524+
60525+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
60526+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
60527+ executable_stack = EXSTACK_DISABLE_X;
60528+ current->personality &= ~READ_IMPLIES_EXEC;
60529+ } else
60530+#endif
60531+
60532 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
60533 current->personality |= READ_IMPLIES_EXEC;
60534
60535@@ -815,6 +1253,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
60536 #else
60537 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
60538 #endif
60539+
60540+#ifdef CONFIG_PAX_RANDMMAP
60541+ /* PaX: randomize base address at the default exe base if requested */
60542+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
60543+#ifdef CONFIG_SPARC64
60544+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
60545+#else
60546+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
60547+#endif
60548+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
60549+ elf_flags |= MAP_FIXED;
60550+ }
60551+#endif
60552+
60553 }
60554
60555 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
60556@@ -847,9 +1299,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
60557 * allowed task size. Note that p_filesz must always be
60558 * <= p_memsz so it is only necessary to check p_memsz.
60559 */
60560- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
60561- elf_ppnt->p_memsz > TASK_SIZE ||
60562- TASK_SIZE - elf_ppnt->p_memsz < k) {
60563+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
60564+ elf_ppnt->p_memsz > pax_task_size ||
60565+ pax_task_size - elf_ppnt->p_memsz < k) {
60566 /* set_brk can never work. Avoid overflows. */
60567 send_sig(SIGKILL, current, 0);
60568 retval = -EINVAL;
60569@@ -888,17 +1340,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
60570 goto out_free_dentry;
60571 }
60572 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
60573- send_sig(SIGSEGV, current, 0);
60574- retval = -EFAULT; /* Nobody gets to see this, but.. */
60575- goto out_free_dentry;
60576+ /*
60577+ * This bss-zeroing can fail if the ELF
60578+ * file specifies odd protections. So
60579+ * we don't check the return value
60580+ */
60581 }
60582
60583+#ifdef CONFIG_PAX_RANDMMAP
60584+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
60585+ unsigned long start, size, flags;
60586+ vm_flags_t vm_flags;
60587+
60588+ start = ELF_PAGEALIGN(elf_brk);
60589+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
60590+ flags = MAP_FIXED | MAP_PRIVATE;
60591+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
60592+
60593+ down_write(&current->mm->mmap_sem);
60594+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
60595+ retval = -ENOMEM;
60596+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
60597+// if (current->personality & ADDR_NO_RANDOMIZE)
60598+// vm_flags |= VM_READ | VM_MAYREAD;
60599+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
60600+ retval = IS_ERR_VALUE(start) ? start : 0;
60601+ }
60602+ up_write(&current->mm->mmap_sem);
60603+ if (retval == 0)
60604+ retval = set_brk(start + size, start + size + PAGE_SIZE);
60605+ if (retval < 0) {
60606+ send_sig(SIGKILL, current, 0);
60607+ goto out_free_dentry;
60608+ }
60609+ }
60610+#endif
60611+
60612 if (elf_interpreter) {
60613- unsigned long interp_map_addr = 0;
60614-
60615 elf_entry = load_elf_interp(&loc->interp_elf_ex,
60616 interpreter,
60617- &interp_map_addr,
60618 load_bias);
60619 if (!IS_ERR((void *)elf_entry)) {
60620 /*
60621@@ -1130,7 +1610,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
60622 * Decide what to dump of a segment, part, all or none.
60623 */
60624 static unsigned long vma_dump_size(struct vm_area_struct *vma,
60625- unsigned long mm_flags)
60626+ unsigned long mm_flags, long signr)
60627 {
60628 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
60629
60630@@ -1168,7 +1648,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
60631 if (vma->vm_file == NULL)
60632 return 0;
60633
60634- if (FILTER(MAPPED_PRIVATE))
60635+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
60636 goto whole;
60637
60638 /*
60639@@ -1375,9 +1855,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
60640 {
60641 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
60642 int i = 0;
60643- do
60644+ do {
60645 i += 2;
60646- while (auxv[i - 2] != AT_NULL);
60647+ } while (auxv[i - 2] != AT_NULL);
60648 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
60649 }
60650
60651@@ -1386,7 +1866,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
60652 {
60653 mm_segment_t old_fs = get_fs();
60654 set_fs(KERNEL_DS);
60655- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
60656+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
60657 set_fs(old_fs);
60658 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
60659 }
60660@@ -2010,14 +2490,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
60661 }
60662
60663 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
60664- unsigned long mm_flags)
60665+ struct coredump_params *cprm)
60666 {
60667 struct vm_area_struct *vma;
60668 size_t size = 0;
60669
60670 for (vma = first_vma(current, gate_vma); vma != NULL;
60671 vma = next_vma(vma, gate_vma))
60672- size += vma_dump_size(vma, mm_flags);
60673+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
60674 return size;
60675 }
60676
60677@@ -2108,7 +2588,7 @@ static int elf_core_dump(struct coredump_params *cprm)
60678
60679 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
60680
60681- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
60682+ offset += elf_core_vma_data_size(gate_vma, cprm);
60683 offset += elf_core_extra_data_size();
60684 e_shoff = offset;
60685
60686@@ -2136,7 +2616,7 @@ static int elf_core_dump(struct coredump_params *cprm)
60687 phdr.p_offset = offset;
60688 phdr.p_vaddr = vma->vm_start;
60689 phdr.p_paddr = 0;
60690- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
60691+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
60692 phdr.p_memsz = vma->vm_end - vma->vm_start;
60693 offset += phdr.p_filesz;
60694 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
60695@@ -2169,7 +2649,7 @@ static int elf_core_dump(struct coredump_params *cprm)
60696 unsigned long addr;
60697 unsigned long end;
60698
60699- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
60700+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
60701
60702 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
60703 struct page *page;
60704@@ -2210,6 +2690,167 @@ out:
60705
60706 #endif /* CONFIG_ELF_CORE */
60707
60708+#ifdef CONFIG_PAX_MPROTECT
60709+/* PaX: non-PIC ELF libraries need relocations on their executable segments
60710+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
60711+ * we'll remove VM_MAYWRITE for good on RELRO segments.
60712+ *
60713+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
60714+ * basis because we want to allow the common case and not the special ones.
60715+ */
60716+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
60717+{
60718+ struct elfhdr elf_h;
60719+ struct elf_phdr elf_p;
60720+ unsigned long i;
60721+ unsigned long oldflags;
60722+ bool is_textrel_rw, is_textrel_rx, is_relro;
60723+
60724+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
60725+ return;
60726+
60727+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
60728+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
60729+
60730+#ifdef CONFIG_PAX_ELFRELOCS
60731+ /* possible TEXTREL */
60732+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
60733+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
60734+#else
60735+ is_textrel_rw = false;
60736+ is_textrel_rx = false;
60737+#endif
60738+
60739+ /* possible RELRO */
60740+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
60741+
60742+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
60743+ return;
60744+
60745+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
60746+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
60747+
60748+#ifdef CONFIG_PAX_ETEXECRELOCS
60749+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
60750+#else
60751+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
60752+#endif
60753+
60754+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
60755+ !elf_check_arch(&elf_h) ||
60756+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
60757+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
60758+ return;
60759+
60760+ for (i = 0UL; i < elf_h.e_phnum; i++) {
60761+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
60762+ return;
60763+ switch (elf_p.p_type) {
60764+ case PT_DYNAMIC:
60765+ if (!is_textrel_rw && !is_textrel_rx)
60766+ continue;
60767+ i = 0UL;
60768+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
60769+ elf_dyn dyn;
60770+
60771+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
60772+ break;
60773+ if (dyn.d_tag == DT_NULL)
60774+ break;
60775+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
60776+ gr_log_textrel(vma);
60777+ if (is_textrel_rw)
60778+ vma->vm_flags |= VM_MAYWRITE;
60779+ else
60780+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
60781+ vma->vm_flags &= ~VM_MAYWRITE;
60782+ break;
60783+ }
60784+ i++;
60785+ }
60786+ is_textrel_rw = false;
60787+ is_textrel_rx = false;
60788+ continue;
60789+
60790+ case PT_GNU_RELRO:
60791+ if (!is_relro)
60792+ continue;
60793+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
60794+ vma->vm_flags &= ~VM_MAYWRITE;
60795+ is_relro = false;
60796+ continue;
60797+
60798+#ifdef CONFIG_PAX_PT_PAX_FLAGS
60799+ case PT_PAX_FLAGS: {
60800+ const char *msg_mprotect = "", *msg_emutramp = "";
60801+ char *buffer_lib, *buffer_exe;
60802+
60803+ if (elf_p.p_flags & PF_NOMPROTECT)
60804+ msg_mprotect = "MPROTECT disabled";
60805+
60806+#ifdef CONFIG_PAX_EMUTRAMP
60807+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
60808+ msg_emutramp = "EMUTRAMP enabled";
60809+#endif
60810+
60811+ if (!msg_mprotect[0] && !msg_emutramp[0])
60812+ continue;
60813+
60814+ if (!printk_ratelimit())
60815+ continue;
60816+
60817+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
60818+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
60819+ if (buffer_lib && buffer_exe) {
60820+ char *path_lib, *path_exe;
60821+
60822+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
60823+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
60824+
60825+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
60826+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
60827+
60828+ }
60829+ free_page((unsigned long)buffer_exe);
60830+ free_page((unsigned long)buffer_lib);
60831+ continue;
60832+ }
60833+#endif
60834+
60835+ }
60836+ }
60837+}
60838+#endif
60839+
60840+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
60841+
60842+extern int grsec_enable_log_rwxmaps;
60843+
60844+static void elf_handle_mmap(struct file *file)
60845+{
60846+ struct elfhdr elf_h;
60847+ struct elf_phdr elf_p;
60848+ unsigned long i;
60849+
60850+ if (!grsec_enable_log_rwxmaps)
60851+ return;
60852+
60853+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
60854+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
60855+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
60856+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
60857+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
60858+ return;
60859+
60860+ for (i = 0UL; i < elf_h.e_phnum; i++) {
60861+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
60862+ return;
60863+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
60864+ gr_log_ptgnustack(file);
60865+ }
60866+}
60867+#endif
60868+
60869 static int __init init_elf_binfmt(void)
60870 {
60871 register_binfmt(&elf_format);
60872diff --git a/fs/block_dev.c b/fs/block_dev.c
60873index 6d72746..536d1db 100644
60874--- a/fs/block_dev.c
60875+++ b/fs/block_dev.c
60876@@ -701,7 +701,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
60877 else if (bdev->bd_contains == bdev)
60878 return true; /* is a whole device which isn't held */
60879
60880- else if (whole->bd_holder == bd_may_claim)
60881+ else if (whole->bd_holder == (void *)bd_may_claim)
60882 return true; /* is a partition of a device that is being partitioned */
60883 else if (whole->bd_holder != NULL)
60884 return false; /* is a partition of a held device */
60885diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
60886index aeab453..48dbafc 100644
60887--- a/fs/btrfs/ctree.c
60888+++ b/fs/btrfs/ctree.c
60889@@ -1184,9 +1184,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
60890 free_extent_buffer(buf);
60891 add_root_to_dirty_list(root);
60892 } else {
60893- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
60894- parent_start = parent->start;
60895- else
60896+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
60897+ if (parent)
60898+ parent_start = parent->start;
60899+ else
60900+ parent_start = 0;
60901+ } else
60902 parent_start = 0;
60903
60904 WARN_ON(trans->transid != btrfs_header_generation(parent));
60905diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
60906index a2e90f8..5135e5f 100644
60907--- a/fs/btrfs/delayed-inode.c
60908+++ b/fs/btrfs/delayed-inode.c
60909@@ -462,7 +462,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
60910
60911 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
60912 {
60913- int seq = atomic_inc_return(&delayed_root->items_seq);
60914+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
60915 if ((atomic_dec_return(&delayed_root->items) <
60916 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
60917 waitqueue_active(&delayed_root->wait))
60918@@ -1412,7 +1412,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
60919
60920 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
60921 {
60922- int val = atomic_read(&delayed_root->items_seq);
60923+ int val = atomic_read_unchecked(&delayed_root->items_seq);
60924
60925 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
60926 return 1;
60927@@ -1436,7 +1436,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
60928 int seq;
60929 int ret;
60930
60931- seq = atomic_read(&delayed_root->items_seq);
60932+ seq = atomic_read_unchecked(&delayed_root->items_seq);
60933
60934 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
60935 if (ret)
60936diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
60937index f70119f..ab5894d 100644
60938--- a/fs/btrfs/delayed-inode.h
60939+++ b/fs/btrfs/delayed-inode.h
60940@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
60941 */
60942 struct list_head prepare_list;
60943 atomic_t items; /* for delayed items */
60944- atomic_t items_seq; /* for delayed items */
60945+ atomic_unchecked_t items_seq; /* for delayed items */
60946 int nodes; /* for delayed nodes */
60947 wait_queue_head_t wait;
60948 };
60949@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
60950 struct btrfs_delayed_root *delayed_root)
60951 {
60952 atomic_set(&delayed_root->items, 0);
60953- atomic_set(&delayed_root->items_seq, 0);
60954+ atomic_set_unchecked(&delayed_root->items_seq, 0);
60955 delayed_root->nodes = 0;
60956 spin_lock_init(&delayed_root->lock);
60957 init_waitqueue_head(&delayed_root->wait);
60958diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
60959index 47aceb4..7d28b1c 100644
60960--- a/fs/btrfs/ioctl.c
60961+++ b/fs/btrfs/ioctl.c
60962@@ -3965,9 +3965,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
60963 for (i = 0; i < num_types; i++) {
60964 struct btrfs_space_info *tmp;
60965
60966+ /* Don't copy in more than we allocated */
60967 if (!slot_count)
60968 break;
60969
60970+ slot_count--;
60971+
60972 info = NULL;
60973 rcu_read_lock();
60974 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
60975@@ -3989,10 +3992,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
60976 memcpy(dest, &space, sizeof(space));
60977 dest++;
60978 space_args.total_spaces++;
60979- slot_count--;
60980 }
60981- if (!slot_count)
60982- break;
60983 }
60984 up_read(&info->groups_sem);
60985 }
60986diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
60987index 8e16bca..6eabd9e 100644
60988--- a/fs/btrfs/super.c
60989+++ b/fs/btrfs/super.c
60990@@ -270,7 +270,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
60991 function, line, errstr);
60992 return;
60993 }
60994- ACCESS_ONCE(trans->transaction->aborted) = errno;
60995+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
60996 /* Wake up anybody who may be waiting on this transaction */
60997 wake_up(&root->fs_info->transaction_wait);
60998 wake_up(&root->fs_info->transaction_blocked_wait);
60999diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
61000index 7869936..7e153dc 100644
61001--- a/fs/btrfs/sysfs.c
61002+++ b/fs/btrfs/sysfs.c
61003@@ -475,7 +475,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
61004 for (set = 0; set < FEAT_MAX; set++) {
61005 int i;
61006 struct attribute *attrs[2];
61007- struct attribute_group agroup = {
61008+ attribute_group_no_const agroup = {
61009 .name = "features",
61010 .attrs = attrs,
61011 };
61012diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
61013index 7f5b41b..e589c13 100644
61014--- a/fs/btrfs/tree-log.h
61015+++ b/fs/btrfs/tree-log.h
61016@@ -41,7 +41,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
61017 static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
61018 struct btrfs_trans_handle *trans)
61019 {
61020- ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid;
61021+ ACCESS_ONCE_RW(fs_info->last_trans_log_full_commit) = trans->transid;
61022 }
61023
61024 static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
61025diff --git a/fs/buffer.c b/fs/buffer.c
61026index eba6e4f..af1182c 100644
61027--- a/fs/buffer.c
61028+++ b/fs/buffer.c
61029@@ -3429,7 +3429,7 @@ void __init buffer_init(void)
61030 bh_cachep = kmem_cache_create("buffer_head",
61031 sizeof(struct buffer_head), 0,
61032 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
61033- SLAB_MEM_SPREAD),
61034+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
61035 NULL);
61036
61037 /*
61038diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
61039index d749731..dd333a6 100644
61040--- a/fs/cachefiles/bind.c
61041+++ b/fs/cachefiles/bind.c
61042@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
61043 args);
61044
61045 /* start by checking things over */
61046- ASSERT(cache->fstop_percent >= 0 &&
61047- cache->fstop_percent < cache->fcull_percent &&
61048+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
61049 cache->fcull_percent < cache->frun_percent &&
61050 cache->frun_percent < 100);
61051
61052- ASSERT(cache->bstop_percent >= 0 &&
61053- cache->bstop_percent < cache->bcull_percent &&
61054+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
61055 cache->bcull_percent < cache->brun_percent &&
61056 cache->brun_percent < 100);
61057
61058diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
61059index b078d30..db23012 100644
61060--- a/fs/cachefiles/daemon.c
61061+++ b/fs/cachefiles/daemon.c
61062@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
61063 if (n > buflen)
61064 return -EMSGSIZE;
61065
61066- if (copy_to_user(_buffer, buffer, n) != 0)
61067+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
61068 return -EFAULT;
61069
61070 return n;
61071@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
61072 if (test_bit(CACHEFILES_DEAD, &cache->flags))
61073 return -EIO;
61074
61075- if (datalen < 0 || datalen > PAGE_SIZE - 1)
61076+ if (datalen > PAGE_SIZE - 1)
61077 return -EOPNOTSUPP;
61078
61079 /* drag the command string into the kernel so we can parse it */
61080@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
61081 if (args[0] != '%' || args[1] != '\0')
61082 return -EINVAL;
61083
61084- if (fstop < 0 || fstop >= cache->fcull_percent)
61085+ if (fstop >= cache->fcull_percent)
61086 return cachefiles_daemon_range_error(cache, args);
61087
61088 cache->fstop_percent = fstop;
61089@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
61090 if (args[0] != '%' || args[1] != '\0')
61091 return -EINVAL;
61092
61093- if (bstop < 0 || bstop >= cache->bcull_percent)
61094+ if (bstop >= cache->bcull_percent)
61095 return cachefiles_daemon_range_error(cache, args);
61096
61097 cache->bstop_percent = bstop;
61098diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
61099index 3d50998..0550d67 100644
61100--- a/fs/cachefiles/internal.h
61101+++ b/fs/cachefiles/internal.h
61102@@ -66,7 +66,7 @@ struct cachefiles_cache {
61103 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
61104 struct rb_root active_nodes; /* active nodes (can't be culled) */
61105 rwlock_t active_lock; /* lock for active_nodes */
61106- atomic_t gravecounter; /* graveyard uniquifier */
61107+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
61108 unsigned frun_percent; /* when to stop culling (% files) */
61109 unsigned fcull_percent; /* when to start culling (% files) */
61110 unsigned fstop_percent; /* when to stop allocating (% files) */
61111@@ -178,19 +178,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
61112 * proc.c
61113 */
61114 #ifdef CONFIG_CACHEFILES_HISTOGRAM
61115-extern atomic_t cachefiles_lookup_histogram[HZ];
61116-extern atomic_t cachefiles_mkdir_histogram[HZ];
61117-extern atomic_t cachefiles_create_histogram[HZ];
61118+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
61119+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
61120+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
61121
61122 extern int __init cachefiles_proc_init(void);
61123 extern void cachefiles_proc_cleanup(void);
61124 static inline
61125-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
61126+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
61127 {
61128 unsigned long jif = jiffies - start_jif;
61129 if (jif >= HZ)
61130 jif = HZ - 1;
61131- atomic_inc(&histogram[jif]);
61132+ atomic_inc_unchecked(&histogram[jif]);
61133 }
61134
61135 #else
61136diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
61137index 5bf2b41..85b93f9 100644
61138--- a/fs/cachefiles/namei.c
61139+++ b/fs/cachefiles/namei.c
61140@@ -312,7 +312,7 @@ try_again:
61141 /* first step is to make up a grave dentry in the graveyard */
61142 sprintf(nbuffer, "%08x%08x",
61143 (uint32_t) get_seconds(),
61144- (uint32_t) atomic_inc_return(&cache->gravecounter));
61145+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
61146
61147 /* do the multiway lock magic */
61148 trap = lock_rename(cache->graveyard, dir);
61149diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
61150index eccd339..4c1d995 100644
61151--- a/fs/cachefiles/proc.c
61152+++ b/fs/cachefiles/proc.c
61153@@ -14,9 +14,9 @@
61154 #include <linux/seq_file.h>
61155 #include "internal.h"
61156
61157-atomic_t cachefiles_lookup_histogram[HZ];
61158-atomic_t cachefiles_mkdir_histogram[HZ];
61159-atomic_t cachefiles_create_histogram[HZ];
61160+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
61161+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
61162+atomic_unchecked_t cachefiles_create_histogram[HZ];
61163
61164 /*
61165 * display the latency histogram
61166@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
61167 return 0;
61168 default:
61169 index = (unsigned long) v - 3;
61170- x = atomic_read(&cachefiles_lookup_histogram[index]);
61171- y = atomic_read(&cachefiles_mkdir_histogram[index]);
61172- z = atomic_read(&cachefiles_create_histogram[index]);
61173+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
61174+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
61175+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
61176 if (x == 0 && y == 0 && z == 0)
61177 return 0;
61178
61179diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
61180index 4b1fb5c..0d2a699 100644
61181--- a/fs/cachefiles/rdwr.c
61182+++ b/fs/cachefiles/rdwr.c
61183@@ -943,7 +943,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
61184 old_fs = get_fs();
61185 set_fs(KERNEL_DS);
61186 ret = file->f_op->write(
61187- file, (const void __user *) data, len, &pos);
61188+ file, (const void __force_user *) data, len, &pos);
61189 set_fs(old_fs);
61190 kunmap(page);
61191 file_end_write(file);
61192diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
61193index c29d6ae..719b9bb 100644
61194--- a/fs/ceph/dir.c
61195+++ b/fs/ceph/dir.c
61196@@ -129,6 +129,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
61197 struct dentry *dentry, *last;
61198 struct ceph_dentry_info *di;
61199 int err = 0;
61200+ char d_name[DNAME_INLINE_LEN];
61201+ const unsigned char *name;
61202
61203 /* claim ref on last dentry we returned */
61204 last = fi->dentry;
61205@@ -192,7 +194,12 @@ more:
61206
61207 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos,
61208 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
61209- if (!dir_emit(ctx, dentry->d_name.name,
61210+ name = dentry->d_name.name;
61211+ if (name == dentry->d_iname) {
61212+ memcpy(d_name, name, dentry->d_name.len);
61213+ name = d_name;
61214+ }
61215+ if (!dir_emit(ctx, name,
61216 dentry->d_name.len,
61217 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
61218 dentry->d_inode->i_mode >> 12)) {
61219@@ -250,7 +257,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
61220 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
61221 struct ceph_mds_client *mdsc = fsc->mdsc;
61222 unsigned frag = fpos_frag(ctx->pos);
61223- int off = fpos_off(ctx->pos);
61224+ unsigned int off = fpos_off(ctx->pos);
61225 int err;
61226 u32 ftype;
61227 struct ceph_mds_reply_info_parsed *rinfo;
61228diff --git a/fs/ceph/super.c b/fs/ceph/super.c
61229index 06150fd..192061b 100644
61230--- a/fs/ceph/super.c
61231+++ b/fs/ceph/super.c
61232@@ -895,7 +895,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
61233 /*
61234 * construct our own bdi so we can control readahead, etc.
61235 */
61236-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
61237+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
61238
61239 static int ceph_register_bdi(struct super_block *sb,
61240 struct ceph_fs_client *fsc)
61241@@ -912,7 +912,7 @@ static int ceph_register_bdi(struct super_block *sb,
61242 default_backing_dev_info.ra_pages;
61243
61244 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
61245- atomic_long_inc_return(&bdi_seq));
61246+ atomic_long_inc_return_unchecked(&bdi_seq));
61247 if (!err)
61248 sb->s_bdi = &fsc->backing_dev_info;
61249 return err;
61250diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
61251index f3ac415..3d2420c 100644
61252--- a/fs/cifs/cifs_debug.c
61253+++ b/fs/cifs/cifs_debug.c
61254@@ -286,8 +286,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
61255
61256 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
61257 #ifdef CONFIG_CIFS_STATS2
61258- atomic_set(&totBufAllocCount, 0);
61259- atomic_set(&totSmBufAllocCount, 0);
61260+ atomic_set_unchecked(&totBufAllocCount, 0);
61261+ atomic_set_unchecked(&totSmBufAllocCount, 0);
61262 #endif /* CONFIG_CIFS_STATS2 */
61263 spin_lock(&cifs_tcp_ses_lock);
61264 list_for_each(tmp1, &cifs_tcp_ses_list) {
61265@@ -300,7 +300,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
61266 tcon = list_entry(tmp3,
61267 struct cifs_tcon,
61268 tcon_list);
61269- atomic_set(&tcon->num_smbs_sent, 0);
61270+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
61271 if (server->ops->clear_stats)
61272 server->ops->clear_stats(tcon);
61273 }
61274@@ -332,8 +332,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
61275 smBufAllocCount.counter, cifs_min_small);
61276 #ifdef CONFIG_CIFS_STATS2
61277 seq_printf(m, "Total Large %d Small %d Allocations\n",
61278- atomic_read(&totBufAllocCount),
61279- atomic_read(&totSmBufAllocCount));
61280+ atomic_read_unchecked(&totBufAllocCount),
61281+ atomic_read_unchecked(&totSmBufAllocCount));
61282 #endif /* CONFIG_CIFS_STATS2 */
61283
61284 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
61285@@ -362,7 +362,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
61286 if (tcon->need_reconnect)
61287 seq_puts(m, "\tDISCONNECTED ");
61288 seq_printf(m, "\nSMBs: %d",
61289- atomic_read(&tcon->num_smbs_sent));
61290+ atomic_read_unchecked(&tcon->num_smbs_sent));
61291 if (server->ops->print_stats)
61292 server->ops->print_stats(m, tcon);
61293 }
61294diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
61295index 8883980..c8ade72 100644
61296--- a/fs/cifs/cifsfs.c
61297+++ b/fs/cifs/cifsfs.c
61298@@ -1072,7 +1072,7 @@ cifs_init_request_bufs(void)
61299 */
61300 cifs_req_cachep = kmem_cache_create("cifs_request",
61301 CIFSMaxBufSize + max_hdr_size, 0,
61302- SLAB_HWCACHE_ALIGN, NULL);
61303+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
61304 if (cifs_req_cachep == NULL)
61305 return -ENOMEM;
61306
61307@@ -1099,7 +1099,7 @@ cifs_init_request_bufs(void)
61308 efficient to alloc 1 per page off the slab compared to 17K (5page)
61309 alloc of large cifs buffers even when page debugging is on */
61310 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
61311- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
61312+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
61313 NULL);
61314 if (cifs_sm_req_cachep == NULL) {
61315 mempool_destroy(cifs_req_poolp);
61316@@ -1184,8 +1184,8 @@ init_cifs(void)
61317 atomic_set(&bufAllocCount, 0);
61318 atomic_set(&smBufAllocCount, 0);
61319 #ifdef CONFIG_CIFS_STATS2
61320- atomic_set(&totBufAllocCount, 0);
61321- atomic_set(&totSmBufAllocCount, 0);
61322+ atomic_set_unchecked(&totBufAllocCount, 0);
61323+ atomic_set_unchecked(&totSmBufAllocCount, 0);
61324 #endif /* CONFIG_CIFS_STATS2 */
61325
61326 atomic_set(&midCount, 0);
61327diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
61328index de6aed8..a0a76fd 100644
61329--- a/fs/cifs/cifsglob.h
61330+++ b/fs/cifs/cifsglob.h
61331@@ -807,35 +807,35 @@ struct cifs_tcon {
61332 __u16 Flags; /* optional support bits */
61333 enum statusEnum tidStatus;
61334 #ifdef CONFIG_CIFS_STATS
61335- atomic_t num_smbs_sent;
61336+ atomic_unchecked_t num_smbs_sent;
61337 union {
61338 struct {
61339- atomic_t num_writes;
61340- atomic_t num_reads;
61341- atomic_t num_flushes;
61342- atomic_t num_oplock_brks;
61343- atomic_t num_opens;
61344- atomic_t num_closes;
61345- atomic_t num_deletes;
61346- atomic_t num_mkdirs;
61347- atomic_t num_posixopens;
61348- atomic_t num_posixmkdirs;
61349- atomic_t num_rmdirs;
61350- atomic_t num_renames;
61351- atomic_t num_t2renames;
61352- atomic_t num_ffirst;
61353- atomic_t num_fnext;
61354- atomic_t num_fclose;
61355- atomic_t num_hardlinks;
61356- atomic_t num_symlinks;
61357- atomic_t num_locks;
61358- atomic_t num_acl_get;
61359- atomic_t num_acl_set;
61360+ atomic_unchecked_t num_writes;
61361+ atomic_unchecked_t num_reads;
61362+ atomic_unchecked_t num_flushes;
61363+ atomic_unchecked_t num_oplock_brks;
61364+ atomic_unchecked_t num_opens;
61365+ atomic_unchecked_t num_closes;
61366+ atomic_unchecked_t num_deletes;
61367+ atomic_unchecked_t num_mkdirs;
61368+ atomic_unchecked_t num_posixopens;
61369+ atomic_unchecked_t num_posixmkdirs;
61370+ atomic_unchecked_t num_rmdirs;
61371+ atomic_unchecked_t num_renames;
61372+ atomic_unchecked_t num_t2renames;
61373+ atomic_unchecked_t num_ffirst;
61374+ atomic_unchecked_t num_fnext;
61375+ atomic_unchecked_t num_fclose;
61376+ atomic_unchecked_t num_hardlinks;
61377+ atomic_unchecked_t num_symlinks;
61378+ atomic_unchecked_t num_locks;
61379+ atomic_unchecked_t num_acl_get;
61380+ atomic_unchecked_t num_acl_set;
61381 } cifs_stats;
61382 #ifdef CONFIG_CIFS_SMB2
61383 struct {
61384- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
61385- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
61386+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
61387+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
61388 } smb2_stats;
61389 #endif /* CONFIG_CIFS_SMB2 */
61390 } stats;
61391@@ -1172,7 +1172,7 @@ convert_delimiter(char *path, char delim)
61392 }
61393
61394 #ifdef CONFIG_CIFS_STATS
61395-#define cifs_stats_inc atomic_inc
61396+#define cifs_stats_inc atomic_inc_unchecked
61397
61398 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
61399 unsigned int bytes)
61400@@ -1538,8 +1538,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
61401 /* Various Debug counters */
61402 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
61403 #ifdef CONFIG_CIFS_STATS2
61404-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
61405-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
61406+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
61407+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
61408 #endif
61409 GLOBAL_EXTERN atomic_t smBufAllocCount;
61410 GLOBAL_EXTERN atomic_t midCount;
61411diff --git a/fs/cifs/file.c b/fs/cifs/file.c
61412index e90a1e9..908699d 100644
61413--- a/fs/cifs/file.c
61414+++ b/fs/cifs/file.c
61415@@ -1900,10 +1900,14 @@ static int cifs_writepages(struct address_space *mapping,
61416 index = mapping->writeback_index; /* Start from prev offset */
61417 end = -1;
61418 } else {
61419- index = wbc->range_start >> PAGE_CACHE_SHIFT;
61420- end = wbc->range_end >> PAGE_CACHE_SHIFT;
61421- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
61422+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
61423 range_whole = true;
61424+ index = 0;
61425+ end = ULONG_MAX;
61426+ } else {
61427+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
61428+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
61429+ }
61430 scanned = true;
61431 }
61432 retry:
61433diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
61434index 3b0c62e..f7d090c 100644
61435--- a/fs/cifs/misc.c
61436+++ b/fs/cifs/misc.c
61437@@ -170,7 +170,7 @@ cifs_buf_get(void)
61438 memset(ret_buf, 0, buf_size + 3);
61439 atomic_inc(&bufAllocCount);
61440 #ifdef CONFIG_CIFS_STATS2
61441- atomic_inc(&totBufAllocCount);
61442+ atomic_inc_unchecked(&totBufAllocCount);
61443 #endif /* CONFIG_CIFS_STATS2 */
61444 }
61445
61446@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
61447 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
61448 atomic_inc(&smBufAllocCount);
61449 #ifdef CONFIG_CIFS_STATS2
61450- atomic_inc(&totSmBufAllocCount);
61451+ atomic_inc_unchecked(&totSmBufAllocCount);
61452 #endif /* CONFIG_CIFS_STATS2 */
61453
61454 }
61455diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
61456index d1fdfa8..94558f8 100644
61457--- a/fs/cifs/smb1ops.c
61458+++ b/fs/cifs/smb1ops.c
61459@@ -626,27 +626,27 @@ static void
61460 cifs_clear_stats(struct cifs_tcon *tcon)
61461 {
61462 #ifdef CONFIG_CIFS_STATS
61463- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
61464- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
61465- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
61466- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
61467- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
61468- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
61469- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
61470- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
61471- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
61472- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
61473- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
61474- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
61475- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
61476- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
61477- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
61478- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
61479- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
61480- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
61481- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
61482- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
61483- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
61484+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
61485+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
61486+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
61487+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
61488+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
61489+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
61490+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
61491+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
61492+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
61493+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
61494+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
61495+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
61496+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
61497+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
61498+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
61499+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
61500+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
61501+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
61502+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
61503+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
61504+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
61505 #endif
61506 }
61507
61508@@ -655,36 +655,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
61509 {
61510 #ifdef CONFIG_CIFS_STATS
61511 seq_printf(m, " Oplocks breaks: %d",
61512- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
61513+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
61514 seq_printf(m, "\nReads: %d Bytes: %llu",
61515- atomic_read(&tcon->stats.cifs_stats.num_reads),
61516+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
61517 (long long)(tcon->bytes_read));
61518 seq_printf(m, "\nWrites: %d Bytes: %llu",
61519- atomic_read(&tcon->stats.cifs_stats.num_writes),
61520+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
61521 (long long)(tcon->bytes_written));
61522 seq_printf(m, "\nFlushes: %d",
61523- atomic_read(&tcon->stats.cifs_stats.num_flushes));
61524+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
61525 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
61526- atomic_read(&tcon->stats.cifs_stats.num_locks),
61527- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
61528- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
61529+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
61530+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
61531+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
61532 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
61533- atomic_read(&tcon->stats.cifs_stats.num_opens),
61534- atomic_read(&tcon->stats.cifs_stats.num_closes),
61535- atomic_read(&tcon->stats.cifs_stats.num_deletes));
61536+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
61537+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
61538+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
61539 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
61540- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
61541- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
61542+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
61543+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
61544 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
61545- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
61546- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
61547+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
61548+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
61549 seq_printf(m, "\nRenames: %d T2 Renames %d",
61550- atomic_read(&tcon->stats.cifs_stats.num_renames),
61551- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
61552+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
61553+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
61554 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
61555- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
61556- atomic_read(&tcon->stats.cifs_stats.num_fnext),
61557- atomic_read(&tcon->stats.cifs_stats.num_fclose));
61558+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
61559+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
61560+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
61561 #endif
61562 }
61563
61564diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
61565index 787844b..8e7bc7d 100644
61566--- a/fs/cifs/smb2ops.c
61567+++ b/fs/cifs/smb2ops.c
61568@@ -364,8 +364,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
61569 #ifdef CONFIG_CIFS_STATS
61570 int i;
61571 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
61572- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
61573- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
61574+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
61575+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
61576 }
61577 #endif
61578 }
61579@@ -405,65 +405,65 @@ static void
61580 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
61581 {
61582 #ifdef CONFIG_CIFS_STATS
61583- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
61584- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
61585+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
61586+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
61587 seq_printf(m, "\nNegotiates: %d sent %d failed",
61588- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
61589- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
61590+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
61591+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
61592 seq_printf(m, "\nSessionSetups: %d sent %d failed",
61593- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
61594- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
61595+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
61596+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
61597 seq_printf(m, "\nLogoffs: %d sent %d failed",
61598- atomic_read(&sent[SMB2_LOGOFF_HE]),
61599- atomic_read(&failed[SMB2_LOGOFF_HE]));
61600+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
61601+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
61602 seq_printf(m, "\nTreeConnects: %d sent %d failed",
61603- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
61604- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
61605+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
61606+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
61607 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
61608- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
61609- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
61610+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
61611+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
61612 seq_printf(m, "\nCreates: %d sent %d failed",
61613- atomic_read(&sent[SMB2_CREATE_HE]),
61614- atomic_read(&failed[SMB2_CREATE_HE]));
61615+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
61616+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
61617 seq_printf(m, "\nCloses: %d sent %d failed",
61618- atomic_read(&sent[SMB2_CLOSE_HE]),
61619- atomic_read(&failed[SMB2_CLOSE_HE]));
61620+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
61621+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
61622 seq_printf(m, "\nFlushes: %d sent %d failed",
61623- atomic_read(&sent[SMB2_FLUSH_HE]),
61624- atomic_read(&failed[SMB2_FLUSH_HE]));
61625+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
61626+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
61627 seq_printf(m, "\nReads: %d sent %d failed",
61628- atomic_read(&sent[SMB2_READ_HE]),
61629- atomic_read(&failed[SMB2_READ_HE]));
61630+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
61631+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
61632 seq_printf(m, "\nWrites: %d sent %d failed",
61633- atomic_read(&sent[SMB2_WRITE_HE]),
61634- atomic_read(&failed[SMB2_WRITE_HE]));
61635+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
61636+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
61637 seq_printf(m, "\nLocks: %d sent %d failed",
61638- atomic_read(&sent[SMB2_LOCK_HE]),
61639- atomic_read(&failed[SMB2_LOCK_HE]));
61640+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
61641+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
61642 seq_printf(m, "\nIOCTLs: %d sent %d failed",
61643- atomic_read(&sent[SMB2_IOCTL_HE]),
61644- atomic_read(&failed[SMB2_IOCTL_HE]));
61645+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
61646+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
61647 seq_printf(m, "\nCancels: %d sent %d failed",
61648- atomic_read(&sent[SMB2_CANCEL_HE]),
61649- atomic_read(&failed[SMB2_CANCEL_HE]));
61650+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
61651+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
61652 seq_printf(m, "\nEchos: %d sent %d failed",
61653- atomic_read(&sent[SMB2_ECHO_HE]),
61654- atomic_read(&failed[SMB2_ECHO_HE]));
61655+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
61656+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
61657 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
61658- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
61659- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
61660+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
61661+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
61662 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
61663- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
61664- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
61665+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
61666+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
61667 seq_printf(m, "\nQueryInfos: %d sent %d failed",
61668- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
61669- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
61670+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
61671+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
61672 seq_printf(m, "\nSetInfos: %d sent %d failed",
61673- atomic_read(&sent[SMB2_SET_INFO_HE]),
61674- atomic_read(&failed[SMB2_SET_INFO_HE]));
61675+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
61676+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
61677 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
61678- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
61679- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
61680+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
61681+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
61682 #endif
61683 }
61684
61685diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
61686index b0b260d..c8927e1 100644
61687--- a/fs/cifs/smb2pdu.c
61688+++ b/fs/cifs/smb2pdu.c
61689@@ -2105,8 +2105,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
61690 default:
61691 cifs_dbg(VFS, "info level %u isn't supported\n",
61692 srch_inf->info_level);
61693- rc = -EINVAL;
61694- goto qdir_exit;
61695+ return -EINVAL;
61696 }
61697
61698 req->FileIndex = cpu_to_le32(index);
61699diff --git a/fs/coda/cache.c b/fs/coda/cache.c
61700index 1da168c..8bc7ff6 100644
61701--- a/fs/coda/cache.c
61702+++ b/fs/coda/cache.c
61703@@ -24,7 +24,7 @@
61704 #include "coda_linux.h"
61705 #include "coda_cache.h"
61706
61707-static atomic_t permission_epoch = ATOMIC_INIT(0);
61708+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
61709
61710 /* replace or extend an acl cache hit */
61711 void coda_cache_enter(struct inode *inode, int mask)
61712@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
61713 struct coda_inode_info *cii = ITOC(inode);
61714
61715 spin_lock(&cii->c_lock);
61716- cii->c_cached_epoch = atomic_read(&permission_epoch);
61717+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
61718 if (!uid_eq(cii->c_uid, current_fsuid())) {
61719 cii->c_uid = current_fsuid();
61720 cii->c_cached_perm = mask;
61721@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
61722 {
61723 struct coda_inode_info *cii = ITOC(inode);
61724 spin_lock(&cii->c_lock);
61725- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
61726+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
61727 spin_unlock(&cii->c_lock);
61728 }
61729
61730 /* remove all acl caches */
61731 void coda_cache_clear_all(struct super_block *sb)
61732 {
61733- atomic_inc(&permission_epoch);
61734+ atomic_inc_unchecked(&permission_epoch);
61735 }
61736
61737
61738@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
61739 spin_lock(&cii->c_lock);
61740 hit = (mask & cii->c_cached_perm) == mask &&
61741 uid_eq(cii->c_uid, current_fsuid()) &&
61742- cii->c_cached_epoch == atomic_read(&permission_epoch);
61743+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
61744 spin_unlock(&cii->c_lock);
61745
61746 return hit;
61747diff --git a/fs/compat.c b/fs/compat.c
61748index 66d3d3c..9c10175 100644
61749--- a/fs/compat.c
61750+++ b/fs/compat.c
61751@@ -54,7 +54,7 @@
61752 #include <asm/ioctls.h>
61753 #include "internal.h"
61754
61755-int compat_log = 1;
61756+int compat_log = 0;
61757
61758 int compat_printk(const char *fmt, ...)
61759 {
61760@@ -512,7 +512,7 @@ COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
61761
61762 set_fs(KERNEL_DS);
61763 /* The __user pointer cast is valid because of the set_fs() */
61764- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
61765+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
61766 set_fs(oldfs);
61767 /* truncating is ok because it's a user address */
61768 if (!ret)
61769@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
61770 goto out;
61771
61772 ret = -EINVAL;
61773- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
61774+ if (nr_segs > UIO_MAXIOV)
61775 goto out;
61776 if (nr_segs > fast_segs) {
61777 ret = -ENOMEM;
61778@@ -850,6 +850,7 @@ struct compat_old_linux_dirent {
61779 struct compat_readdir_callback {
61780 struct dir_context ctx;
61781 struct compat_old_linux_dirent __user *dirent;
61782+ struct file * file;
61783 int result;
61784 };
61785
61786@@ -867,6 +868,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
61787 buf->result = -EOVERFLOW;
61788 return -EOVERFLOW;
61789 }
61790+
61791+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61792+ return 0;
61793+
61794 buf->result++;
61795 dirent = buf->dirent;
61796 if (!access_ok(VERIFY_WRITE, dirent,
61797@@ -898,6 +903,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
61798 if (!f.file)
61799 return -EBADF;
61800
61801+ buf.file = f.file;
61802 error = iterate_dir(f.file, &buf.ctx);
61803 if (buf.result)
61804 error = buf.result;
61805@@ -917,6 +923,7 @@ struct compat_getdents_callback {
61806 struct dir_context ctx;
61807 struct compat_linux_dirent __user *current_dir;
61808 struct compat_linux_dirent __user *previous;
61809+ struct file * file;
61810 int count;
61811 int error;
61812 };
61813@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
61814 buf->error = -EOVERFLOW;
61815 return -EOVERFLOW;
61816 }
61817+
61818+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61819+ return 0;
61820+
61821 dirent = buf->previous;
61822 if (dirent) {
61823 if (__put_user(offset, &dirent->d_off))
61824@@ -983,6 +994,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
61825 if (!f.file)
61826 return -EBADF;
61827
61828+ buf.file = f.file;
61829 error = iterate_dir(f.file, &buf.ctx);
61830 if (error >= 0)
61831 error = buf.error;
61832@@ -1003,6 +1015,7 @@ struct compat_getdents_callback64 {
61833 struct dir_context ctx;
61834 struct linux_dirent64 __user *current_dir;
61835 struct linux_dirent64 __user *previous;
61836+ struct file * file;
61837 int count;
61838 int error;
61839 };
61840@@ -1019,6 +1032,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
61841 buf->error = -EINVAL; /* only used if we fail.. */
61842 if (reclen > buf->count)
61843 return -EINVAL;
61844+
61845+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61846+ return 0;
61847+
61848 dirent = buf->previous;
61849
61850 if (dirent) {
61851@@ -1068,6 +1085,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
61852 if (!f.file)
61853 return -EBADF;
61854
61855+ buf.file = f.file;
61856 error = iterate_dir(f.file, &buf.ctx);
61857 if (error >= 0)
61858 error = buf.error;
61859diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
61860index 4d24d17..4f8c09e 100644
61861--- a/fs/compat_binfmt_elf.c
61862+++ b/fs/compat_binfmt_elf.c
61863@@ -30,11 +30,13 @@
61864 #undef elf_phdr
61865 #undef elf_shdr
61866 #undef elf_note
61867+#undef elf_dyn
61868 #undef elf_addr_t
61869 #define elfhdr elf32_hdr
61870 #define elf_phdr elf32_phdr
61871 #define elf_shdr elf32_shdr
61872 #define elf_note elf32_note
61873+#define elf_dyn Elf32_Dyn
61874 #define elf_addr_t Elf32_Addr
61875
61876 /*
61877diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
61878index e822890..fed89d9 100644
61879--- a/fs/compat_ioctl.c
61880+++ b/fs/compat_ioctl.c
61881@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
61882 return -EFAULT;
61883 if (__get_user(udata, &ss32->iomem_base))
61884 return -EFAULT;
61885- ss.iomem_base = compat_ptr(udata);
61886+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
61887 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
61888 __get_user(ss.port_high, &ss32->port_high))
61889 return -EFAULT;
61890@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
61891 for (i = 0; i < nmsgs; i++) {
61892 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
61893 return -EFAULT;
61894- if (get_user(datap, &umsgs[i].buf) ||
61895- put_user(compat_ptr(datap), &tmsgs[i].buf))
61896+ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
61897+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
61898 return -EFAULT;
61899 }
61900 return sys_ioctl(fd, cmd, (unsigned long)tdata);
61901@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
61902 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
61903 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
61904 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
61905- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
61906+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
61907 return -EFAULT;
61908
61909 return ioctl_preallocate(file, p);
61910@@ -1618,8 +1618,8 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
61911 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
61912 {
61913 unsigned int a, b;
61914- a = *(unsigned int *)p;
61915- b = *(unsigned int *)q;
61916+ a = *(const unsigned int *)p;
61917+ b = *(const unsigned int *)q;
61918 if (a > b)
61919 return 1;
61920 if (a < b)
61921diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
61922index 668dcab..daebcd6 100644
61923--- a/fs/configfs/dir.c
61924+++ b/fs/configfs/dir.c
61925@@ -1548,7 +1548,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
61926 }
61927 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
61928 struct configfs_dirent *next;
61929- const char *name;
61930+ const unsigned char * name;
61931+ char d_name[sizeof(next->s_dentry->d_iname)];
61932 int len;
61933 struct inode *inode = NULL;
61934
61935@@ -1557,7 +1558,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
61936 continue;
61937
61938 name = configfs_get_name(next);
61939- len = strlen(name);
61940+ if (next->s_dentry && name == next->s_dentry->d_iname) {
61941+ len = next->s_dentry->d_name.len;
61942+ memcpy(d_name, name, len);
61943+ name = d_name;
61944+ } else
61945+ len = strlen(name);
61946
61947 /*
61948 * We'll have a dentry and an inode for
61949diff --git a/fs/coredump.c b/fs/coredump.c
61950index a93f7e6..d58bcbe 100644
61951--- a/fs/coredump.c
61952+++ b/fs/coredump.c
61953@@ -442,8 +442,8 @@ static void wait_for_dump_helpers(struct file *file)
61954 struct pipe_inode_info *pipe = file->private_data;
61955
61956 pipe_lock(pipe);
61957- pipe->readers++;
61958- pipe->writers--;
61959+ atomic_inc(&pipe->readers);
61960+ atomic_dec(&pipe->writers);
61961 wake_up_interruptible_sync(&pipe->wait);
61962 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
61963 pipe_unlock(pipe);
61964@@ -452,11 +452,11 @@ static void wait_for_dump_helpers(struct file *file)
61965 * We actually want wait_event_freezable() but then we need
61966 * to clear TIF_SIGPENDING and improve dump_interrupted().
61967 */
61968- wait_event_interruptible(pipe->wait, pipe->readers == 1);
61969+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
61970
61971 pipe_lock(pipe);
61972- pipe->readers--;
61973- pipe->writers++;
61974+ atomic_dec(&pipe->readers);
61975+ atomic_inc(&pipe->writers);
61976 pipe_unlock(pipe);
61977 }
61978
61979@@ -503,7 +503,9 @@ void do_coredump(const siginfo_t *siginfo)
61980 struct files_struct *displaced;
61981 bool need_nonrelative = false;
61982 bool core_dumped = false;
61983- static atomic_t core_dump_count = ATOMIC_INIT(0);
61984+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
61985+ long signr = siginfo->si_signo;
61986+ int dumpable;
61987 struct coredump_params cprm = {
61988 .siginfo = siginfo,
61989 .regs = signal_pt_regs(),
61990@@ -516,12 +518,17 @@ void do_coredump(const siginfo_t *siginfo)
61991 .mm_flags = mm->flags,
61992 };
61993
61994- audit_core_dumps(siginfo->si_signo);
61995+ audit_core_dumps(signr);
61996+
61997+ dumpable = __get_dumpable(cprm.mm_flags);
61998+
61999+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
62000+ gr_handle_brute_attach(dumpable);
62001
62002 binfmt = mm->binfmt;
62003 if (!binfmt || !binfmt->core_dump)
62004 goto fail;
62005- if (!__get_dumpable(cprm.mm_flags))
62006+ if (!dumpable)
62007 goto fail;
62008
62009 cred = prepare_creds();
62010@@ -540,7 +547,7 @@ void do_coredump(const siginfo_t *siginfo)
62011 need_nonrelative = true;
62012 }
62013
62014- retval = coredump_wait(siginfo->si_signo, &core_state);
62015+ retval = coredump_wait(signr, &core_state);
62016 if (retval < 0)
62017 goto fail_creds;
62018
62019@@ -583,7 +590,7 @@ void do_coredump(const siginfo_t *siginfo)
62020 }
62021 cprm.limit = RLIM_INFINITY;
62022
62023- dump_count = atomic_inc_return(&core_dump_count);
62024+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
62025 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
62026 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
62027 task_tgid_vnr(current), current->comm);
62028@@ -615,6 +622,8 @@ void do_coredump(const siginfo_t *siginfo)
62029 } else {
62030 struct inode *inode;
62031
62032+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
62033+
62034 if (cprm.limit < binfmt->min_coredump)
62035 goto fail_unlock;
62036
62037@@ -673,7 +682,7 @@ close_fail:
62038 filp_close(cprm.file, NULL);
62039 fail_dropcount:
62040 if (ispipe)
62041- atomic_dec(&core_dump_count);
62042+ atomic_dec_unchecked(&core_dump_count);
62043 fail_unlock:
62044 kfree(cn.corename);
62045 coredump_finish(mm, core_dumped);
62046@@ -694,6 +703,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
62047 struct file *file = cprm->file;
62048 loff_t pos = file->f_pos;
62049 ssize_t n;
62050+
62051+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
62052 if (cprm->written + nr > cprm->limit)
62053 return 0;
62054 while (nr) {
62055diff --git a/fs/dcache.c b/fs/dcache.c
62056index 06f6585..65499d1 100644
62057--- a/fs/dcache.c
62058+++ b/fs/dcache.c
62059@@ -1445,7 +1445,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
62060 */
62061 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
62062 if (name->len > DNAME_INLINE_LEN-1) {
62063- dname = kmalloc(name->len + 1, GFP_KERNEL);
62064+ dname = kmalloc(round_up(name->len + 1, sizeof(unsigned long)), GFP_KERNEL);
62065 if (!dname) {
62066 kmem_cache_free(dentry_cache, dentry);
62067 return NULL;
62068@@ -2402,7 +2402,7 @@ void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
62069 }
62070 EXPORT_SYMBOL(dentry_update_name_case);
62071
62072-static void switch_names(struct dentry *dentry, struct dentry *target)
62073+static void switch_names(struct dentry *dentry, struct dentry *target, bool exchange)
62074 {
62075 if (dname_external(target)) {
62076 if (dname_external(dentry)) {
62077@@ -2430,7 +2430,7 @@ static void switch_names(struct dentry *dentry, struct dentry *target)
62078 target->d_name.len + 1);
62079 target->d_name.name = dentry->d_name.name;
62080 dentry->d_name.name = dentry->d_iname;
62081- } else {
62082+ } else if (exchange) {
62083 /*
62084 * Both are internal.
62085 */
62086@@ -2440,6 +2440,14 @@ static void switch_names(struct dentry *dentry, struct dentry *target)
62087 swap(((long *) &dentry->d_iname)[i],
62088 ((long *) &target->d_iname)[i]);
62089 }
62090+ } else {
62091+ /*
62092+ * Both are internal. Just copy target to dentry
62093+ */
62094+ memcpy(dentry->d_iname, target->d_name.name,
62095+ target->d_name.len + 1);
62096+ dentry->d_name.len = target->d_name.len;
62097+ return;
62098 }
62099 }
62100 swap(dentry->d_name.len, target->d_name.len);
62101@@ -2540,7 +2548,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
62102 list_del(&target->d_u.d_child);
62103
62104 /* Switch the names.. */
62105- switch_names(dentry, target);
62106+ switch_names(dentry, target, exchange);
62107 swap(dentry->d_name.hash, target->d_name.hash);
62108
62109 /* ... and switch the parents */
62110@@ -2679,7 +2687,7 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
62111
62112 dparent = dentry->d_parent;
62113
62114- switch_names(dentry, anon);
62115+ switch_names(dentry, anon, false);
62116 swap(dentry->d_name.hash, anon->d_name.hash);
62117
62118 dentry->d_parent = dentry;
62119@@ -3413,7 +3421,8 @@ void __init vfs_caches_init(unsigned long mempages)
62120 mempages -= reserve;
62121
62122 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
62123- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
62124+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
62125+ SLAB_NO_SANITIZE, NULL);
62126
62127 dcache_init();
62128 inode_init();
62129diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
62130index 16a46b6..41696fd 100644
62131--- a/fs/debugfs/inode.c
62132+++ b/fs/debugfs/inode.c
62133@@ -416,7 +416,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
62134 */
62135 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
62136 {
62137+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
62138+ return __create_file(name, S_IFDIR | S_IRWXU,
62139+#else
62140 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
62141+#endif
62142 parent, NULL, NULL);
62143 }
62144 EXPORT_SYMBOL_GPL(debugfs_create_dir);
62145diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
62146index d4a9431..77f9b2e 100644
62147--- a/fs/ecryptfs/inode.c
62148+++ b/fs/ecryptfs/inode.c
62149@@ -673,7 +673,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
62150 old_fs = get_fs();
62151 set_fs(get_ds());
62152 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
62153- (char __user *)lower_buf,
62154+ (char __force_user *)lower_buf,
62155 PATH_MAX);
62156 set_fs(old_fs);
62157 if (rc < 0)
62158diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
62159index e4141f2..d8263e8 100644
62160--- a/fs/ecryptfs/miscdev.c
62161+++ b/fs/ecryptfs/miscdev.c
62162@@ -304,7 +304,7 @@ check_list:
62163 goto out_unlock_msg_ctx;
62164 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
62165 if (msg_ctx->msg) {
62166- if (copy_to_user(&buf[i], packet_length, packet_length_size))
62167+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
62168 goto out_unlock_msg_ctx;
62169 i += packet_length_size;
62170 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
62171diff --git a/fs/exec.c b/fs/exec.c
62172index a3d33fe..49e9bc9 100644
62173--- a/fs/exec.c
62174+++ b/fs/exec.c
62175@@ -56,8 +56,20 @@
62176 #include <linux/pipe_fs_i.h>
62177 #include <linux/oom.h>
62178 #include <linux/compat.h>
62179+#include <linux/random.h>
62180+#include <linux/seq_file.h>
62181+#include <linux/coredump.h>
62182+#include <linux/mman.h>
62183+
62184+#ifdef CONFIG_PAX_REFCOUNT
62185+#include <linux/kallsyms.h>
62186+#include <linux/kdebug.h>
62187+#endif
62188+
62189+#include <trace/events/fs.h>
62190
62191 #include <asm/uaccess.h>
62192+#include <asm/sections.h>
62193 #include <asm/mmu_context.h>
62194 #include <asm/tlb.h>
62195
62196@@ -66,19 +78,34 @@
62197
62198 #include <trace/events/sched.h>
62199
62200+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62201+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
62202+{
62203+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
62204+}
62205+#endif
62206+
62207+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
62208+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
62209+EXPORT_SYMBOL(pax_set_initial_flags_func);
62210+#endif
62211+
62212 int suid_dumpable = 0;
62213
62214 static LIST_HEAD(formats);
62215 static DEFINE_RWLOCK(binfmt_lock);
62216
62217+extern int gr_process_kernel_exec_ban(void);
62218+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
62219+
62220 void __register_binfmt(struct linux_binfmt * fmt, int insert)
62221 {
62222 BUG_ON(!fmt);
62223 if (WARN_ON(!fmt->load_binary))
62224 return;
62225 write_lock(&binfmt_lock);
62226- insert ? list_add(&fmt->lh, &formats) :
62227- list_add_tail(&fmt->lh, &formats);
62228+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
62229+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
62230 write_unlock(&binfmt_lock);
62231 }
62232
62233@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
62234 void unregister_binfmt(struct linux_binfmt * fmt)
62235 {
62236 write_lock(&binfmt_lock);
62237- list_del(&fmt->lh);
62238+ pax_list_del((struct list_head *)&fmt->lh);
62239 write_unlock(&binfmt_lock);
62240 }
62241
62242@@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
62243 int write)
62244 {
62245 struct page *page;
62246- int ret;
62247
62248-#ifdef CONFIG_STACK_GROWSUP
62249- if (write) {
62250- ret = expand_downwards(bprm->vma, pos);
62251- if (ret < 0)
62252- return NULL;
62253- }
62254-#endif
62255- ret = get_user_pages(current, bprm->mm, pos,
62256- 1, write, 1, &page, NULL);
62257- if (ret <= 0)
62258+ if (0 > expand_downwards(bprm->vma, pos))
62259+ return NULL;
62260+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
62261 return NULL;
62262
62263 if (write) {
62264@@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
62265 if (size <= ARG_MAX)
62266 return page;
62267
62268+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62269+ // only allow 512KB for argv+env on suid/sgid binaries
62270+ // to prevent easy ASLR exhaustion
62271+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
62272+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
62273+ (size > (512 * 1024))) {
62274+ put_page(page);
62275+ return NULL;
62276+ }
62277+#endif
62278+
62279 /*
62280 * Limit to 1/4-th the stack size for the argv+env strings.
62281 * This ensures that:
62282@@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
62283 vma->vm_end = STACK_TOP_MAX;
62284 vma->vm_start = vma->vm_end - PAGE_SIZE;
62285 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
62286+
62287+#ifdef CONFIG_PAX_SEGMEXEC
62288+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
62289+#endif
62290+
62291 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
62292 INIT_LIST_HEAD(&vma->anon_vma_chain);
62293
62294@@ -279,6 +314,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
62295 mm->stack_vm = mm->total_vm = 1;
62296 up_write(&mm->mmap_sem);
62297 bprm->p = vma->vm_end - sizeof(void *);
62298+
62299+#ifdef CONFIG_PAX_RANDUSTACK
62300+ if (randomize_va_space)
62301+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
62302+#endif
62303+
62304 return 0;
62305 err:
62306 up_write(&mm->mmap_sem);
62307@@ -399,7 +440,7 @@ struct user_arg_ptr {
62308 } ptr;
62309 };
62310
62311-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
62312+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
62313 {
62314 const char __user *native;
62315
62316@@ -408,14 +449,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
62317 compat_uptr_t compat;
62318
62319 if (get_user(compat, argv.ptr.compat + nr))
62320- return ERR_PTR(-EFAULT);
62321+ return (const char __force_user *)ERR_PTR(-EFAULT);
62322
62323 return compat_ptr(compat);
62324 }
62325 #endif
62326
62327 if (get_user(native, argv.ptr.native + nr))
62328- return ERR_PTR(-EFAULT);
62329+ return (const char __force_user *)ERR_PTR(-EFAULT);
62330
62331 return native;
62332 }
62333@@ -434,7 +475,7 @@ static int count(struct user_arg_ptr argv, int max)
62334 if (!p)
62335 break;
62336
62337- if (IS_ERR(p))
62338+ if (IS_ERR((const char __force_kernel *)p))
62339 return -EFAULT;
62340
62341 if (i >= max)
62342@@ -469,7 +510,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
62343
62344 ret = -EFAULT;
62345 str = get_user_arg_ptr(argv, argc);
62346- if (IS_ERR(str))
62347+ if (IS_ERR((const char __force_kernel *)str))
62348 goto out;
62349
62350 len = strnlen_user(str, MAX_ARG_STRLEN);
62351@@ -551,7 +592,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
62352 int r;
62353 mm_segment_t oldfs = get_fs();
62354 struct user_arg_ptr argv = {
62355- .ptr.native = (const char __user *const __user *)__argv,
62356+ .ptr.native = (const char __user * const __force_user *)__argv,
62357 };
62358
62359 set_fs(KERNEL_DS);
62360@@ -586,7 +627,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
62361 unsigned long new_end = old_end - shift;
62362 struct mmu_gather tlb;
62363
62364- BUG_ON(new_start > new_end);
62365+ if (new_start >= new_end || new_start < mmap_min_addr)
62366+ return -ENOMEM;
62367
62368 /*
62369 * ensure there are no vmas between where we want to go
62370@@ -595,6 +637,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
62371 if (vma != find_vma(mm, new_start))
62372 return -EFAULT;
62373
62374+#ifdef CONFIG_PAX_SEGMEXEC
62375+ BUG_ON(pax_find_mirror_vma(vma));
62376+#endif
62377+
62378 /*
62379 * cover the whole range: [new_start, old_end)
62380 */
62381@@ -675,10 +721,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
62382 stack_top = arch_align_stack(stack_top);
62383 stack_top = PAGE_ALIGN(stack_top);
62384
62385- if (unlikely(stack_top < mmap_min_addr) ||
62386- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
62387- return -ENOMEM;
62388-
62389 stack_shift = vma->vm_end - stack_top;
62390
62391 bprm->p -= stack_shift;
62392@@ -690,8 +732,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
62393 bprm->exec -= stack_shift;
62394
62395 down_write(&mm->mmap_sem);
62396+
62397+ /* Move stack pages down in memory. */
62398+ if (stack_shift) {
62399+ ret = shift_arg_pages(vma, stack_shift);
62400+ if (ret)
62401+ goto out_unlock;
62402+ }
62403+
62404 vm_flags = VM_STACK_FLAGS;
62405
62406+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
62407+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
62408+ vm_flags &= ~VM_EXEC;
62409+
62410+#ifdef CONFIG_PAX_MPROTECT
62411+ if (mm->pax_flags & MF_PAX_MPROTECT)
62412+ vm_flags &= ~VM_MAYEXEC;
62413+#endif
62414+
62415+ }
62416+#endif
62417+
62418 /*
62419 * Adjust stack execute permissions; explicitly enable for
62420 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
62421@@ -710,13 +772,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
62422 goto out_unlock;
62423 BUG_ON(prev != vma);
62424
62425- /* Move stack pages down in memory. */
62426- if (stack_shift) {
62427- ret = shift_arg_pages(vma, stack_shift);
62428- if (ret)
62429- goto out_unlock;
62430- }
62431-
62432 /* mprotect_fixup is overkill to remove the temporary stack flags */
62433 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
62434
62435@@ -740,6 +795,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
62436 #endif
62437 current->mm->start_stack = bprm->p;
62438 ret = expand_stack(vma, stack_base);
62439+
62440+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
62441+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
62442+ unsigned long size;
62443+ vm_flags_t vm_flags;
62444+
62445+ size = STACK_TOP - vma->vm_end;
62446+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
62447+
62448+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
62449+
62450+#ifdef CONFIG_X86
62451+ if (!ret) {
62452+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
62453+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
62454+ }
62455+#endif
62456+
62457+ }
62458+#endif
62459+
62460 if (ret)
62461 ret = -EFAULT;
62462
62463@@ -775,6 +851,8 @@ static struct file *do_open_exec(struct filename *name)
62464
62465 fsnotify_open(file);
62466
62467+ trace_open_exec(name->name);
62468+
62469 err = deny_write_access(file);
62470 if (err)
62471 goto exit;
62472@@ -804,7 +882,7 @@ int kernel_read(struct file *file, loff_t offset,
62473 old_fs = get_fs();
62474 set_fs(get_ds());
62475 /* The cast to a user pointer is valid due to the set_fs() */
62476- result = vfs_read(file, (void __user *)addr, count, &pos);
62477+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
62478 set_fs(old_fs);
62479 return result;
62480 }
62481@@ -849,6 +927,7 @@ static int exec_mmap(struct mm_struct *mm)
62482 tsk->mm = mm;
62483 tsk->active_mm = mm;
62484 activate_mm(active_mm, mm);
62485+ populate_stack();
62486 tsk->mm->vmacache_seqnum = 0;
62487 vmacache_flush(tsk);
62488 task_unlock(tsk);
62489@@ -1247,7 +1326,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
62490 }
62491 rcu_read_unlock();
62492
62493- if (p->fs->users > n_fs)
62494+ if (atomic_read(&p->fs->users) > n_fs)
62495 bprm->unsafe |= LSM_UNSAFE_SHARE;
62496 else
62497 p->fs->in_exec = 1;
62498@@ -1423,6 +1502,31 @@ static int exec_binprm(struct linux_binprm *bprm)
62499 return ret;
62500 }
62501
62502+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62503+static DEFINE_PER_CPU(u64, exec_counter);
62504+static int __init init_exec_counters(void)
62505+{
62506+ unsigned int cpu;
62507+
62508+ for_each_possible_cpu(cpu) {
62509+ per_cpu(exec_counter, cpu) = (u64)cpu;
62510+ }
62511+
62512+ return 0;
62513+}
62514+early_initcall(init_exec_counters);
62515+static inline void increment_exec_counter(void)
62516+{
62517+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
62518+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
62519+}
62520+#else
62521+static inline void increment_exec_counter(void) {}
62522+#endif
62523+
62524+extern void gr_handle_exec_args(struct linux_binprm *bprm,
62525+ struct user_arg_ptr argv);
62526+
62527 /*
62528 * sys_execve() executes a new program.
62529 */
62530@@ -1430,6 +1534,11 @@ static int do_execve_common(struct filename *filename,
62531 struct user_arg_ptr argv,
62532 struct user_arg_ptr envp)
62533 {
62534+#ifdef CONFIG_GRKERNSEC
62535+ struct file *old_exec_file;
62536+ struct acl_subject_label *old_acl;
62537+ struct rlimit old_rlim[RLIM_NLIMITS];
62538+#endif
62539 struct linux_binprm *bprm;
62540 struct file *file;
62541 struct files_struct *displaced;
62542@@ -1438,6 +1547,8 @@ static int do_execve_common(struct filename *filename,
62543 if (IS_ERR(filename))
62544 return PTR_ERR(filename);
62545
62546+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
62547+
62548 /*
62549 * We move the actual failure in case of RLIMIT_NPROC excess from
62550 * set*uid() to execve() because too many poorly written programs
62551@@ -1475,11 +1586,21 @@ static int do_execve_common(struct filename *filename,
62552 if (IS_ERR(file))
62553 goto out_unmark;
62554
62555+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
62556+ retval = -EPERM;
62557+ goto out_unmark;
62558+ }
62559+
62560 sched_exec();
62561
62562 bprm->file = file;
62563 bprm->filename = bprm->interp = filename->name;
62564
62565+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
62566+ retval = -EACCES;
62567+ goto out_unmark;
62568+ }
62569+
62570 retval = bprm_mm_init(bprm);
62571 if (retval)
62572 goto out_unmark;
62573@@ -1496,24 +1617,70 @@ static int do_execve_common(struct filename *filename,
62574 if (retval < 0)
62575 goto out;
62576
62577+#ifdef CONFIG_GRKERNSEC
62578+ old_acl = current->acl;
62579+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
62580+ old_exec_file = current->exec_file;
62581+ get_file(file);
62582+ current->exec_file = file;
62583+#endif
62584+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62585+ /* limit suid stack to 8MB
62586+ * we saved the old limits above and will restore them if this exec fails
62587+ */
62588+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
62589+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
62590+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
62591+#endif
62592+
62593+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
62594+ retval = -EPERM;
62595+ goto out_fail;
62596+ }
62597+
62598+ if (!gr_tpe_allow(file)) {
62599+ retval = -EACCES;
62600+ goto out_fail;
62601+ }
62602+
62603+ if (gr_check_crash_exec(file)) {
62604+ retval = -EACCES;
62605+ goto out_fail;
62606+ }
62607+
62608+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
62609+ bprm->unsafe);
62610+ if (retval < 0)
62611+ goto out_fail;
62612+
62613 retval = copy_strings_kernel(1, &bprm->filename, bprm);
62614 if (retval < 0)
62615- goto out;
62616+ goto out_fail;
62617
62618 bprm->exec = bprm->p;
62619 retval = copy_strings(bprm->envc, envp, bprm);
62620 if (retval < 0)
62621- goto out;
62622+ goto out_fail;
62623
62624 retval = copy_strings(bprm->argc, argv, bprm);
62625 if (retval < 0)
62626- goto out;
62627+ goto out_fail;
62628+
62629+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
62630+
62631+ gr_handle_exec_args(bprm, argv);
62632
62633 retval = exec_binprm(bprm);
62634 if (retval < 0)
62635- goto out;
62636+ goto out_fail;
62637+#ifdef CONFIG_GRKERNSEC
62638+ if (old_exec_file)
62639+ fput(old_exec_file);
62640+#endif
62641
62642 /* execve succeeded */
62643+
62644+ increment_exec_counter();
62645 current->fs->in_exec = 0;
62646 current->in_execve = 0;
62647 acct_update_integrals(current);
62648@@ -1524,6 +1691,14 @@ static int do_execve_common(struct filename *filename,
62649 put_files_struct(displaced);
62650 return retval;
62651
62652+out_fail:
62653+#ifdef CONFIG_GRKERNSEC
62654+ current->acl = old_acl;
62655+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
62656+ fput(current->exec_file);
62657+ current->exec_file = old_exec_file;
62658+#endif
62659+
62660 out:
62661 if (bprm->mm) {
62662 acct_arg_size(bprm, 0);
62663@@ -1615,3 +1790,312 @@ COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
62664 return compat_do_execve(getname(filename), argv, envp);
62665 }
62666 #endif
62667+
62668+int pax_check_flags(unsigned long *flags)
62669+{
62670+ int retval = 0;
62671+
62672+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
62673+ if (*flags & MF_PAX_SEGMEXEC)
62674+ {
62675+ *flags &= ~MF_PAX_SEGMEXEC;
62676+ retval = -EINVAL;
62677+ }
62678+#endif
62679+
62680+ if ((*flags & MF_PAX_PAGEEXEC)
62681+
62682+#ifdef CONFIG_PAX_PAGEEXEC
62683+ && (*flags & MF_PAX_SEGMEXEC)
62684+#endif
62685+
62686+ )
62687+ {
62688+ *flags &= ~MF_PAX_PAGEEXEC;
62689+ retval = -EINVAL;
62690+ }
62691+
62692+ if ((*flags & MF_PAX_MPROTECT)
62693+
62694+#ifdef CONFIG_PAX_MPROTECT
62695+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
62696+#endif
62697+
62698+ )
62699+ {
62700+ *flags &= ~MF_PAX_MPROTECT;
62701+ retval = -EINVAL;
62702+ }
62703+
62704+ if ((*flags & MF_PAX_EMUTRAMP)
62705+
62706+#ifdef CONFIG_PAX_EMUTRAMP
62707+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
62708+#endif
62709+
62710+ )
62711+ {
62712+ *flags &= ~MF_PAX_EMUTRAMP;
62713+ retval = -EINVAL;
62714+ }
62715+
62716+ return retval;
62717+}
62718+
62719+EXPORT_SYMBOL(pax_check_flags);
62720+
62721+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
62722+char *pax_get_path(const struct path *path, char *buf, int buflen)
62723+{
62724+ char *pathname = d_path(path, buf, buflen);
62725+
62726+ if (IS_ERR(pathname))
62727+ goto toolong;
62728+
62729+ pathname = mangle_path(buf, pathname, "\t\n\\");
62730+ if (!pathname)
62731+ goto toolong;
62732+
62733+ *pathname = 0;
62734+ return buf;
62735+
62736+toolong:
62737+ return "<path too long>";
62738+}
62739+EXPORT_SYMBOL(pax_get_path);
62740+
62741+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
62742+{
62743+ struct task_struct *tsk = current;
62744+ struct mm_struct *mm = current->mm;
62745+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
62746+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
62747+ char *path_exec = NULL;
62748+ char *path_fault = NULL;
62749+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
62750+ siginfo_t info = { };
62751+
62752+ if (buffer_exec && buffer_fault) {
62753+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
62754+
62755+ down_read(&mm->mmap_sem);
62756+ vma = mm->mmap;
62757+ while (vma && (!vma_exec || !vma_fault)) {
62758+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
62759+ vma_exec = vma;
62760+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
62761+ vma_fault = vma;
62762+ vma = vma->vm_next;
62763+ }
62764+ if (vma_exec)
62765+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
62766+ if (vma_fault) {
62767+ start = vma_fault->vm_start;
62768+ end = vma_fault->vm_end;
62769+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
62770+ if (vma_fault->vm_file)
62771+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
62772+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
62773+ path_fault = "<heap>";
62774+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
62775+ path_fault = "<stack>";
62776+ else
62777+ path_fault = "<anonymous mapping>";
62778+ }
62779+ up_read(&mm->mmap_sem);
62780+ }
62781+ if (tsk->signal->curr_ip)
62782+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
62783+ else
62784+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
62785+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
62786+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
62787+ free_page((unsigned long)buffer_exec);
62788+ free_page((unsigned long)buffer_fault);
62789+ pax_report_insns(regs, pc, sp);
62790+ info.si_signo = SIGKILL;
62791+ info.si_errno = 0;
62792+ info.si_code = SI_KERNEL;
62793+ info.si_pid = 0;
62794+ info.si_uid = 0;
62795+ do_coredump(&info);
62796+}
62797+#endif
62798+
62799+#ifdef CONFIG_PAX_REFCOUNT
62800+void pax_report_refcount_overflow(struct pt_regs *regs)
62801+{
62802+ if (current->signal->curr_ip)
62803+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
62804+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
62805+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
62806+ else
62807+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
62808+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
62809+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
62810+ preempt_disable();
62811+ show_regs(regs);
62812+ preempt_enable();
62813+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
62814+}
62815+#endif
62816+
62817+#ifdef CONFIG_PAX_USERCOPY
62818+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
62819+static noinline int check_stack_object(const void *obj, unsigned long len)
62820+{
62821+ const void * const stack = task_stack_page(current);
62822+ const void * const stackend = stack + THREAD_SIZE;
62823+
62824+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
62825+ const void *frame = NULL;
62826+ const void *oldframe;
62827+#endif
62828+
62829+ if (obj + len < obj)
62830+ return -1;
62831+
62832+ if (obj + len <= stack || stackend <= obj)
62833+ return 0;
62834+
62835+ if (obj < stack || stackend < obj + len)
62836+ return -1;
62837+
62838+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
62839+ oldframe = __builtin_frame_address(1);
62840+ if (oldframe)
62841+ frame = __builtin_frame_address(2);
62842+ /*
62843+ low ----------------------------------------------> high
62844+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
62845+ ^----------------^
62846+ allow copies only within here
62847+ */
62848+ while (stack <= frame && frame < stackend) {
62849+ /* if obj + len extends past the last frame, this
62850+ check won't pass and the next frame will be 0,
62851+ causing us to bail out and correctly report
62852+ the copy as invalid
62853+ */
62854+ if (obj + len <= frame)
62855+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
62856+ oldframe = frame;
62857+ frame = *(const void * const *)frame;
62858+ }
62859+ return -1;
62860+#else
62861+ return 1;
62862+#endif
62863+}
62864+
62865+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
62866+{
62867+ if (current->signal->curr_ip)
62868+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
62869+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
62870+ else
62871+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
62872+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
62873+ dump_stack();
62874+ gr_handle_kernel_exploit();
62875+ do_group_exit(SIGKILL);
62876+}
62877+#endif
62878+
62879+#ifdef CONFIG_PAX_USERCOPY
62880+
62881+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
62882+{
62883+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
62884+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
62885+#ifdef CONFIG_MODULES
62886+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
62887+#else
62888+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
62889+#endif
62890+
62891+#else
62892+ unsigned long textlow = (unsigned long)_stext;
62893+ unsigned long texthigh = (unsigned long)_etext;
62894+
62895+#ifdef CONFIG_X86_64
62896+ /* check against linear mapping as well */
62897+ if (high > (unsigned long)__va(__pa(textlow)) &&
62898+ low < (unsigned long)__va(__pa(texthigh)))
62899+ return true;
62900+#endif
62901+
62902+#endif
62903+
62904+ if (high <= textlow || low >= texthigh)
62905+ return false;
62906+ else
62907+ return true;
62908+}
62909+#endif
62910+
62911+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
62912+{
62913+#ifdef CONFIG_PAX_USERCOPY
62914+ const char *type;
62915+#endif
62916+
62917+#ifndef CONFIG_STACK_GROWSUP
62918+ unsigned long stackstart = (unsigned long)task_stack_page(current);
62919+ unsigned long currentsp = (unsigned long)&stackstart;
62920+ if (unlikely((currentsp < stackstart + 512 ||
62921+ currentsp >= stackstart + THREAD_SIZE) && !in_interrupt()))
62922+ BUG();
62923+#endif
62924+
62925+#ifndef CONFIG_PAX_USERCOPY_DEBUG
62926+ if (const_size)
62927+ return;
62928+#endif
62929+
62930+#ifdef CONFIG_PAX_USERCOPY
62931+ if (!n)
62932+ return;
62933+
62934+ type = check_heap_object(ptr, n);
62935+ if (!type) {
62936+ int ret = check_stack_object(ptr, n);
62937+ if (ret == 1 || ret == 2)
62938+ return;
62939+ if (ret == 0) {
62940+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
62941+ type = "<kernel text>";
62942+ else
62943+ return;
62944+ } else
62945+ type = "<process stack>";
62946+ }
62947+
62948+ pax_report_usercopy(ptr, n, to_user, type);
62949+#endif
62950+
62951+}
62952+EXPORT_SYMBOL(__check_object_size);
62953+
62954+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
62955+void pax_track_stack(void)
62956+{
62957+ unsigned long sp = (unsigned long)&sp;
62958+ if (sp < current_thread_info()->lowest_stack &&
62959+ sp > (unsigned long)task_stack_page(current))
62960+ current_thread_info()->lowest_stack = sp;
62961+ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
62962+ BUG();
62963+}
62964+EXPORT_SYMBOL(pax_track_stack);
62965+#endif
62966+
62967+#ifdef CONFIG_PAX_SIZE_OVERFLOW
62968+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
62969+{
62970+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
62971+ dump_stack();
62972+ do_group_exit(SIGKILL);
62973+}
62974+EXPORT_SYMBOL(report_size_overflow);
62975+#endif
62976diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
62977index 9f9992b..8b59411 100644
62978--- a/fs/ext2/balloc.c
62979+++ b/fs/ext2/balloc.c
62980@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
62981
62982 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
62983 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
62984- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
62985+ if (free_blocks < root_blocks + 1 &&
62986 !uid_eq(sbi->s_resuid, current_fsuid()) &&
62987 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
62988- !in_group_p (sbi->s_resgid))) {
62989+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
62990 return 0;
62991 }
62992 return 1;
62993diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
62994index 9142614..97484fa 100644
62995--- a/fs/ext2/xattr.c
62996+++ b/fs/ext2/xattr.c
62997@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
62998 struct buffer_head *bh = NULL;
62999 struct ext2_xattr_entry *entry;
63000 char *end;
63001- size_t rest = buffer_size;
63002+ size_t rest = buffer_size, total_size = 0;
63003 int error;
63004
63005 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
63006@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
63007 buffer += size;
63008 }
63009 rest -= size;
63010+ total_size += size;
63011 }
63012 }
63013- error = buffer_size - rest; /* total size */
63014+ error = total_size;
63015
63016 cleanup:
63017 brelse(bh);
63018diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
63019index 158b5d4..2432610 100644
63020--- a/fs/ext3/balloc.c
63021+++ b/fs/ext3/balloc.c
63022@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
63023
63024 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
63025 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
63026- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
63027+ if (free_blocks < root_blocks + 1 &&
63028 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
63029 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
63030- !in_group_p (sbi->s_resgid))) {
63031+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
63032 return 0;
63033 }
63034 return 1;
63035diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
63036index c6874be..f8a6ae8 100644
63037--- a/fs/ext3/xattr.c
63038+++ b/fs/ext3/xattr.c
63039@@ -330,7 +330,7 @@ static int
63040 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
63041 char *buffer, size_t buffer_size)
63042 {
63043- size_t rest = buffer_size;
63044+ size_t rest = buffer_size, total_size = 0;
63045
63046 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
63047 const struct xattr_handler *handler =
63048@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
63049 buffer += size;
63050 }
63051 rest -= size;
63052+ total_size += size;
63053 }
63054 }
63055- return buffer_size - rest;
63056+ return total_size;
63057 }
63058
63059 static int
63060diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
63061index fca3820..e1ea241 100644
63062--- a/fs/ext4/balloc.c
63063+++ b/fs/ext4/balloc.c
63064@@ -553,8 +553,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
63065 /* Hm, nope. Are (enough) root reserved clusters available? */
63066 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
63067 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
63068- capable(CAP_SYS_RESOURCE) ||
63069- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
63070+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
63071+ capable_nolog(CAP_SYS_RESOURCE)) {
63072
63073 if (free_clusters >= (nclusters + dirty_clusters +
63074 resv_clusters))
63075diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
63076index 1bbe7c3..6f404a5c 100644
63077--- a/fs/ext4/ext4.h
63078+++ b/fs/ext4/ext4.h
63079@@ -1276,19 +1276,19 @@ struct ext4_sb_info {
63080 unsigned long s_mb_last_start;
63081
63082 /* stats for buddy allocator */
63083- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
63084- atomic_t s_bal_success; /* we found long enough chunks */
63085- atomic_t s_bal_allocated; /* in blocks */
63086- atomic_t s_bal_ex_scanned; /* total extents scanned */
63087- atomic_t s_bal_goals; /* goal hits */
63088- atomic_t s_bal_breaks; /* too long searches */
63089- atomic_t s_bal_2orders; /* 2^order hits */
63090+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
63091+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
63092+ atomic_unchecked_t s_bal_allocated; /* in blocks */
63093+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
63094+ atomic_unchecked_t s_bal_goals; /* goal hits */
63095+ atomic_unchecked_t s_bal_breaks; /* too long searches */
63096+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
63097 spinlock_t s_bal_lock;
63098 unsigned long s_mb_buddies_generated;
63099 unsigned long long s_mb_generation_time;
63100- atomic_t s_mb_lost_chunks;
63101- atomic_t s_mb_preallocated;
63102- atomic_t s_mb_discarded;
63103+ atomic_unchecked_t s_mb_lost_chunks;
63104+ atomic_unchecked_t s_mb_preallocated;
63105+ atomic_unchecked_t s_mb_discarded;
63106 atomic_t s_lock_busy;
63107
63108 /* locality groups */
63109@@ -1826,7 +1826,7 @@ ext4_group_first_block_no(struct super_block *sb, ext4_group_t group_no)
63110 /*
63111 * Special error return code only used by dx_probe() and its callers.
63112 */
63113-#define ERR_BAD_DX_DIR -75000
63114+#define ERR_BAD_DX_DIR (-(MAX_ERRNO - 1))
63115
63116 /*
63117 * Timeout and state flag for lazy initialization inode thread.
63118diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
63119index c3e7418..f62cab3 100644
63120--- a/fs/ext4/mballoc.c
63121+++ b/fs/ext4/mballoc.c
63122@@ -1901,7 +1901,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
63123 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
63124
63125 if (EXT4_SB(sb)->s_mb_stats)
63126- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
63127+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
63128
63129 break;
63130 }
63131@@ -2211,7 +2211,7 @@ repeat:
63132 ac->ac_status = AC_STATUS_CONTINUE;
63133 ac->ac_flags |= EXT4_MB_HINT_FIRST;
63134 cr = 3;
63135- atomic_inc(&sbi->s_mb_lost_chunks);
63136+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
63137 goto repeat;
63138 }
63139 }
63140@@ -2717,25 +2717,25 @@ int ext4_mb_release(struct super_block *sb)
63141 if (sbi->s_mb_stats) {
63142 ext4_msg(sb, KERN_INFO,
63143 "mballoc: %u blocks %u reqs (%u success)",
63144- atomic_read(&sbi->s_bal_allocated),
63145- atomic_read(&sbi->s_bal_reqs),
63146- atomic_read(&sbi->s_bal_success));
63147+ atomic_read_unchecked(&sbi->s_bal_allocated),
63148+ atomic_read_unchecked(&sbi->s_bal_reqs),
63149+ atomic_read_unchecked(&sbi->s_bal_success));
63150 ext4_msg(sb, KERN_INFO,
63151 "mballoc: %u extents scanned, %u goal hits, "
63152 "%u 2^N hits, %u breaks, %u lost",
63153- atomic_read(&sbi->s_bal_ex_scanned),
63154- atomic_read(&sbi->s_bal_goals),
63155- atomic_read(&sbi->s_bal_2orders),
63156- atomic_read(&sbi->s_bal_breaks),
63157- atomic_read(&sbi->s_mb_lost_chunks));
63158+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
63159+ atomic_read_unchecked(&sbi->s_bal_goals),
63160+ atomic_read_unchecked(&sbi->s_bal_2orders),
63161+ atomic_read_unchecked(&sbi->s_bal_breaks),
63162+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
63163 ext4_msg(sb, KERN_INFO,
63164 "mballoc: %lu generated and it took %Lu",
63165 sbi->s_mb_buddies_generated,
63166 sbi->s_mb_generation_time);
63167 ext4_msg(sb, KERN_INFO,
63168 "mballoc: %u preallocated, %u discarded",
63169- atomic_read(&sbi->s_mb_preallocated),
63170- atomic_read(&sbi->s_mb_discarded));
63171+ atomic_read_unchecked(&sbi->s_mb_preallocated),
63172+ atomic_read_unchecked(&sbi->s_mb_discarded));
63173 }
63174
63175 free_percpu(sbi->s_locality_groups);
63176@@ -3191,16 +3191,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
63177 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
63178
63179 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
63180- atomic_inc(&sbi->s_bal_reqs);
63181- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
63182+ atomic_inc_unchecked(&sbi->s_bal_reqs);
63183+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
63184 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
63185- atomic_inc(&sbi->s_bal_success);
63186- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
63187+ atomic_inc_unchecked(&sbi->s_bal_success);
63188+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
63189 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
63190 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
63191- atomic_inc(&sbi->s_bal_goals);
63192+ atomic_inc_unchecked(&sbi->s_bal_goals);
63193 if (ac->ac_found > sbi->s_mb_max_to_scan)
63194- atomic_inc(&sbi->s_bal_breaks);
63195+ atomic_inc_unchecked(&sbi->s_bal_breaks);
63196 }
63197
63198 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
63199@@ -3627,7 +3627,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
63200 trace_ext4_mb_new_inode_pa(ac, pa);
63201
63202 ext4_mb_use_inode_pa(ac, pa);
63203- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
63204+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
63205
63206 ei = EXT4_I(ac->ac_inode);
63207 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
63208@@ -3687,7 +3687,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
63209 trace_ext4_mb_new_group_pa(ac, pa);
63210
63211 ext4_mb_use_group_pa(ac, pa);
63212- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
63213+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
63214
63215 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
63216 lg = ac->ac_lg;
63217@@ -3776,7 +3776,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
63218 * from the bitmap and continue.
63219 */
63220 }
63221- atomic_add(free, &sbi->s_mb_discarded);
63222+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
63223
63224 return err;
63225 }
63226@@ -3794,7 +3794,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
63227 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
63228 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
63229 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
63230- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
63231+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
63232 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
63233
63234 return 0;
63235diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
63236index 32bce84..112d969 100644
63237--- a/fs/ext4/mmp.c
63238+++ b/fs/ext4/mmp.c
63239@@ -113,7 +113,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
63240 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
63241 const char *function, unsigned int line, const char *msg)
63242 {
63243- __ext4_warning(sb, function, line, msg);
63244+ __ext4_warning(sb, function, line, "%s", msg);
63245 __ext4_warning(sb, function, line,
63246 "MMP failure info: last update time: %llu, last update "
63247 "node: %s, last update device: %s\n",
63248diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
63249index 9e6eced..5e127be 100644
63250--- a/fs/ext4/namei.c
63251+++ b/fs/ext4/namei.c
63252@@ -1227,7 +1227,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
63253 buffer */
63254 int num = 0;
63255 ext4_lblk_t nblocks;
63256- int i, err;
63257+ int i, err = 0;
63258 int namelen;
63259
63260 *res_dir = NULL;
63261@@ -1264,7 +1264,11 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
63262 * return. Otherwise, fall back to doing a search the
63263 * old fashioned way.
63264 */
63265- if (bh || (err != ERR_BAD_DX_DIR))
63266+ if (err == -ENOENT)
63267+ return NULL;
63268+ if (err && err != ERR_BAD_DX_DIR)
63269+ return ERR_PTR(err);
63270+ if (bh)
63271 return bh;
63272 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
63273 "falling back\n"));
63274@@ -1295,6 +1299,11 @@ restart:
63275 }
63276 num++;
63277 bh = ext4_getblk(NULL, dir, b++, 0, &err);
63278+ if (unlikely(err)) {
63279+ if (ra_max == 0)
63280+ return ERR_PTR(err);
63281+ break;
63282+ }
63283 bh_use[ra_max] = bh;
63284 if (bh)
63285 ll_rw_block(READ | REQ_META | REQ_PRIO,
63286@@ -1417,6 +1426,8 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
63287 return ERR_PTR(-ENAMETOOLONG);
63288
63289 bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
63290+ if (IS_ERR(bh))
63291+ return (struct dentry *) bh;
63292 inode = NULL;
63293 if (bh) {
63294 __u32 ino = le32_to_cpu(de->inode);
63295@@ -1450,6 +1461,8 @@ struct dentry *ext4_get_parent(struct dentry *child)
63296 struct buffer_head *bh;
63297
63298 bh = ext4_find_entry(child->d_inode, &dotdot, &de, NULL);
63299+ if (IS_ERR(bh))
63300+ return (struct dentry *) bh;
63301 if (!bh)
63302 return ERR_PTR(-ENOENT);
63303 ino = le32_to_cpu(de->inode);
63304@@ -2727,6 +2740,8 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
63305
63306 retval = -ENOENT;
63307 bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
63308+ if (IS_ERR(bh))
63309+ return PTR_ERR(bh);
63310 if (!bh)
63311 goto end_rmdir;
63312
63313@@ -2794,6 +2809,8 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
63314
63315 retval = -ENOENT;
63316 bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
63317+ if (IS_ERR(bh))
63318+ return PTR_ERR(bh);
63319 if (!bh)
63320 goto end_unlink;
63321
63322@@ -3121,6 +3138,8 @@ static int ext4_find_delete_entry(handle_t *handle, struct inode *dir,
63323 struct ext4_dir_entry_2 *de;
63324
63325 bh = ext4_find_entry(dir, d_name, &de, NULL);
63326+ if (IS_ERR(bh))
63327+ return PTR_ERR(bh);
63328 if (bh) {
63329 retval = ext4_delete_entry(handle, dir, de, bh);
63330 brelse(bh);
63331@@ -3205,6 +3224,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
63332 dquot_initialize(new.inode);
63333
63334 old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
63335+ if (IS_ERR(old.bh))
63336+ return PTR_ERR(old.bh);
63337 /*
63338 * Check for inode number is _not_ due to possible IO errors.
63339 * We might rmdir the source, keep it as pwd of some process
63340@@ -3217,6 +3238,11 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
63341
63342 new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
63343 &new.de, &new.inlined);
63344+ if (IS_ERR(new.bh)) {
63345+ retval = PTR_ERR(new.bh);
63346+ new.bh = NULL;
63347+ goto end_rename;
63348+ }
63349 if (new.bh) {
63350 if (!new.inode) {
63351 brelse(new.bh);
63352@@ -3345,6 +3371,8 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
63353
63354 old.bh = ext4_find_entry(old.dir, &old.dentry->d_name,
63355 &old.de, &old.inlined);
63356+ if (IS_ERR(old.bh))
63357+ return PTR_ERR(old.bh);
63358 /*
63359 * Check for inode number is _not_ due to possible IO errors.
63360 * We might rmdir the source, keep it as pwd of some process
63361@@ -3357,6 +3385,11 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
63362
63363 new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
63364 &new.de, &new.inlined);
63365+ if (IS_ERR(new.bh)) {
63366+ retval = PTR_ERR(new.bh);
63367+ new.bh = NULL;
63368+ goto end_rename;
63369+ }
63370
63371 /* RENAME_EXCHANGE case: old *and* new must both exist */
63372 if (!new.bh || le32_to_cpu(new.de->inode) != new.inode->i_ino)
63373diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
63374index bb0e80f..1e43b90 100644
63375--- a/fs/ext4/resize.c
63376+++ b/fs/ext4/resize.c
63377@@ -575,6 +575,7 @@ handle_bb:
63378 bh = bclean(handle, sb, block);
63379 if (IS_ERR(bh)) {
63380 err = PTR_ERR(bh);
63381+ bh = NULL;
63382 goto out;
63383 }
63384 overhead = ext4_group_overhead_blocks(sb, group);
63385@@ -603,6 +604,7 @@ handle_ib:
63386 bh = bclean(handle, sb, block);
63387 if (IS_ERR(bh)) {
63388 err = PTR_ERR(bh);
63389+ bh = NULL;
63390 goto out;
63391 }
63392
63393diff --git a/fs/ext4/super.c b/fs/ext4/super.c
63394index beeb5c4..998c28d 100644
63395--- a/fs/ext4/super.c
63396+++ b/fs/ext4/super.c
63397@@ -1276,7 +1276,7 @@ static ext4_fsblk_t get_sb_block(void **data)
63398 }
63399
63400 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
63401-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
63402+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
63403 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
63404
63405 #ifdef CONFIG_QUOTA
63406@@ -2464,7 +2464,7 @@ struct ext4_attr {
63407 int offset;
63408 int deprecated_val;
63409 } u;
63410-};
63411+} __do_const;
63412
63413 static int parse_strtoull(const char *buf,
63414 unsigned long long max, unsigned long long *value)
63415diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
63416index e738733..9843a6c 100644
63417--- a/fs/ext4/xattr.c
63418+++ b/fs/ext4/xattr.c
63419@@ -386,7 +386,7 @@ static int
63420 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
63421 char *buffer, size_t buffer_size)
63422 {
63423- size_t rest = buffer_size;
63424+ size_t rest = buffer_size, total_size = 0;
63425
63426 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
63427 const struct xattr_handler *handler =
63428@@ -403,9 +403,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
63429 buffer += size;
63430 }
63431 rest -= size;
63432+ total_size += size;
63433 }
63434 }
63435- return buffer_size - rest;
63436+ return total_size;
63437 }
63438
63439 static int
63440diff --git a/fs/fcntl.c b/fs/fcntl.c
63441index 72c82f6..a18b263 100644
63442--- a/fs/fcntl.c
63443+++ b/fs/fcntl.c
63444@@ -106,6 +106,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
63445 if (err)
63446 return err;
63447
63448+ if (gr_handle_chroot_fowner(pid, type))
63449+ return -ENOENT;
63450+ if (gr_check_protected_task_fowner(pid, type))
63451+ return -EACCES;
63452+
63453 f_modown(filp, pid, type, force);
63454 return 0;
63455 }
63456diff --git a/fs/fhandle.c b/fs/fhandle.c
63457index 999ff5c..ac037c9 100644
63458--- a/fs/fhandle.c
63459+++ b/fs/fhandle.c
63460@@ -8,6 +8,7 @@
63461 #include <linux/fs_struct.h>
63462 #include <linux/fsnotify.h>
63463 #include <linux/personality.h>
63464+#include <linux/grsecurity.h>
63465 #include <asm/uaccess.h>
63466 #include "internal.h"
63467 #include "mount.h"
63468@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path,
63469 } else
63470 retval = 0;
63471 /* copy the mount id */
63472- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
63473- sizeof(*mnt_id)) ||
63474+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
63475 copy_to_user(ufh, handle,
63476 sizeof(struct file_handle) + handle_bytes))
63477 retval = -EFAULT;
63478@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
63479 * the directory. Ideally we would like CAP_DAC_SEARCH.
63480 * But we don't have that
63481 */
63482- if (!capable(CAP_DAC_READ_SEARCH)) {
63483+ if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
63484 retval = -EPERM;
63485 goto out_err;
63486 }
63487diff --git a/fs/file.c b/fs/file.c
63488index 66923fe..2849783 100644
63489--- a/fs/file.c
63490+++ b/fs/file.c
63491@@ -16,6 +16,7 @@
63492 #include <linux/slab.h>
63493 #include <linux/vmalloc.h>
63494 #include <linux/file.h>
63495+#include <linux/security.h>
63496 #include <linux/fdtable.h>
63497 #include <linux/bitops.h>
63498 #include <linux/interrupt.h>
63499@@ -139,7 +140,7 @@ out:
63500 * Return <0 error code on error; 1 on successful completion.
63501 * The files->file_lock should be held on entry, and will be held on exit.
63502 */
63503-static int expand_fdtable(struct files_struct *files, int nr)
63504+static int expand_fdtable(struct files_struct *files, unsigned int nr)
63505 __releases(files->file_lock)
63506 __acquires(files->file_lock)
63507 {
63508@@ -184,7 +185,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
63509 * expanded and execution may have blocked.
63510 * The files->file_lock should be held on entry, and will be held on exit.
63511 */
63512-static int expand_files(struct files_struct *files, int nr)
63513+static int expand_files(struct files_struct *files, unsigned int nr)
63514 {
63515 struct fdtable *fdt;
63516
63517@@ -799,6 +800,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
63518 if (!file)
63519 return __close_fd(files, fd);
63520
63521+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
63522 if (fd >= rlimit(RLIMIT_NOFILE))
63523 return -EBADF;
63524
63525@@ -825,6 +827,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
63526 if (unlikely(oldfd == newfd))
63527 return -EINVAL;
63528
63529+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
63530 if (newfd >= rlimit(RLIMIT_NOFILE))
63531 return -EBADF;
63532
63533@@ -880,6 +883,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
63534 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
63535 {
63536 int err;
63537+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
63538 if (from >= rlimit(RLIMIT_NOFILE))
63539 return -EINVAL;
63540 err = alloc_fd(from, flags);
63541diff --git a/fs/filesystems.c b/fs/filesystems.c
63542index 5797d45..7d7d79a 100644
63543--- a/fs/filesystems.c
63544+++ b/fs/filesystems.c
63545@@ -275,7 +275,11 @@ struct file_system_type *get_fs_type(const char *name)
63546 int len = dot ? dot - name : strlen(name);
63547
63548 fs = __get_fs_type(name, len);
63549+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63550+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
63551+#else
63552 if (!fs && (request_module("fs-%.*s", len, name) == 0))
63553+#endif
63554 fs = __get_fs_type(name, len);
63555
63556 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
63557diff --git a/fs/fs_struct.c b/fs/fs_struct.c
63558index 7dca743..543d620 100644
63559--- a/fs/fs_struct.c
63560+++ b/fs/fs_struct.c
63561@@ -4,6 +4,7 @@
63562 #include <linux/path.h>
63563 #include <linux/slab.h>
63564 #include <linux/fs_struct.h>
63565+#include <linux/grsecurity.h>
63566 #include "internal.h"
63567
63568 /*
63569@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
63570 write_seqcount_begin(&fs->seq);
63571 old_root = fs->root;
63572 fs->root = *path;
63573+ gr_set_chroot_entries(current, path);
63574 write_seqcount_end(&fs->seq);
63575 spin_unlock(&fs->lock);
63576 if (old_root.dentry)
63577@@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
63578 int hits = 0;
63579 spin_lock(&fs->lock);
63580 write_seqcount_begin(&fs->seq);
63581+ /* this root replacement is only done by pivot_root,
63582+ leave grsec's chroot tagging alone for this task
63583+ so that a pivoted root isn't treated as a chroot
63584+ */
63585 hits += replace_path(&fs->root, old_root, new_root);
63586 hits += replace_path(&fs->pwd, old_root, new_root);
63587 write_seqcount_end(&fs->seq);
63588@@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk)
63589 task_lock(tsk);
63590 spin_lock(&fs->lock);
63591 tsk->fs = NULL;
63592- kill = !--fs->users;
63593+ gr_clear_chroot_entries(tsk);
63594+ kill = !atomic_dec_return(&fs->users);
63595 spin_unlock(&fs->lock);
63596 task_unlock(tsk);
63597 if (kill)
63598@@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
63599 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
63600 /* We don't need to lock fs - think why ;-) */
63601 if (fs) {
63602- fs->users = 1;
63603+ atomic_set(&fs->users, 1);
63604 fs->in_exec = 0;
63605 spin_lock_init(&fs->lock);
63606 seqcount_init(&fs->seq);
63607@@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
63608 spin_lock(&old->lock);
63609 fs->root = old->root;
63610 path_get(&fs->root);
63611+ /* instead of calling gr_set_chroot_entries here,
63612+ we call it from every caller of this function
63613+ */
63614 fs->pwd = old->pwd;
63615 path_get(&fs->pwd);
63616 spin_unlock(&old->lock);
63617@@ -139,8 +149,9 @@ int unshare_fs_struct(void)
63618
63619 task_lock(current);
63620 spin_lock(&fs->lock);
63621- kill = !--fs->users;
63622+ kill = !atomic_dec_return(&fs->users);
63623 current->fs = new_fs;
63624+ gr_set_chroot_entries(current, &new_fs->root);
63625 spin_unlock(&fs->lock);
63626 task_unlock(current);
63627
63628@@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
63629
63630 int current_umask(void)
63631 {
63632- return current->fs->umask;
63633+ return current->fs->umask | gr_acl_umask();
63634 }
63635 EXPORT_SYMBOL(current_umask);
63636
63637 /* to be mentioned only in INIT_TASK */
63638 struct fs_struct init_fs = {
63639- .users = 1,
63640+ .users = ATOMIC_INIT(1),
63641 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
63642 .seq = SEQCNT_ZERO(init_fs.seq),
63643 .umask = 0022,
63644diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
63645index aec01be..cf81ff9 100644
63646--- a/fs/fscache/cookie.c
63647+++ b/fs/fscache/cookie.c
63648@@ -19,7 +19,7 @@
63649
63650 struct kmem_cache *fscache_cookie_jar;
63651
63652-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
63653+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
63654
63655 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
63656 static int fscache_alloc_object(struct fscache_cache *cache,
63657@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
63658 parent ? (char *) parent->def->name : "<no-parent>",
63659 def->name, netfs_data, enable);
63660
63661- fscache_stat(&fscache_n_acquires);
63662+ fscache_stat_unchecked(&fscache_n_acquires);
63663
63664 /* if there's no parent cookie, then we don't create one here either */
63665 if (!parent) {
63666- fscache_stat(&fscache_n_acquires_null);
63667+ fscache_stat_unchecked(&fscache_n_acquires_null);
63668 _leave(" [no parent]");
63669 return NULL;
63670 }
63671@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
63672 /* allocate and initialise a cookie */
63673 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
63674 if (!cookie) {
63675- fscache_stat(&fscache_n_acquires_oom);
63676+ fscache_stat_unchecked(&fscache_n_acquires_oom);
63677 _leave(" [ENOMEM]");
63678 return NULL;
63679 }
63680@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
63681
63682 switch (cookie->def->type) {
63683 case FSCACHE_COOKIE_TYPE_INDEX:
63684- fscache_stat(&fscache_n_cookie_index);
63685+ fscache_stat_unchecked(&fscache_n_cookie_index);
63686 break;
63687 case FSCACHE_COOKIE_TYPE_DATAFILE:
63688- fscache_stat(&fscache_n_cookie_data);
63689+ fscache_stat_unchecked(&fscache_n_cookie_data);
63690 break;
63691 default:
63692- fscache_stat(&fscache_n_cookie_special);
63693+ fscache_stat_unchecked(&fscache_n_cookie_special);
63694 break;
63695 }
63696
63697@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
63698 } else {
63699 atomic_dec(&parent->n_children);
63700 __fscache_cookie_put(cookie);
63701- fscache_stat(&fscache_n_acquires_nobufs);
63702+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
63703 _leave(" = NULL");
63704 return NULL;
63705 }
63706@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
63707 }
63708 }
63709
63710- fscache_stat(&fscache_n_acquires_ok);
63711+ fscache_stat_unchecked(&fscache_n_acquires_ok);
63712 _leave(" = %p", cookie);
63713 return cookie;
63714 }
63715@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
63716 cache = fscache_select_cache_for_object(cookie->parent);
63717 if (!cache) {
63718 up_read(&fscache_addremove_sem);
63719- fscache_stat(&fscache_n_acquires_no_cache);
63720+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
63721 _leave(" = -ENOMEDIUM [no cache]");
63722 return -ENOMEDIUM;
63723 }
63724@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
63725 object = cache->ops->alloc_object(cache, cookie);
63726 fscache_stat_d(&fscache_n_cop_alloc_object);
63727 if (IS_ERR(object)) {
63728- fscache_stat(&fscache_n_object_no_alloc);
63729+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
63730 ret = PTR_ERR(object);
63731 goto error;
63732 }
63733
63734- fscache_stat(&fscache_n_object_alloc);
63735+ fscache_stat_unchecked(&fscache_n_object_alloc);
63736
63737- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
63738+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
63739
63740 _debug("ALLOC OBJ%x: %s {%lx}",
63741 object->debug_id, cookie->def->name, object->events);
63742@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
63743
63744 _enter("{%s}", cookie->def->name);
63745
63746- fscache_stat(&fscache_n_invalidates);
63747+ fscache_stat_unchecked(&fscache_n_invalidates);
63748
63749 /* Only permit invalidation of data files. Invalidating an index will
63750 * require the caller to release all its attachments to the tree rooted
63751@@ -477,10 +477,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
63752 {
63753 struct fscache_object *object;
63754
63755- fscache_stat(&fscache_n_updates);
63756+ fscache_stat_unchecked(&fscache_n_updates);
63757
63758 if (!cookie) {
63759- fscache_stat(&fscache_n_updates_null);
63760+ fscache_stat_unchecked(&fscache_n_updates_null);
63761 _leave(" [no cookie]");
63762 return;
63763 }
63764@@ -581,12 +581,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
63765 */
63766 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
63767 {
63768- fscache_stat(&fscache_n_relinquishes);
63769+ fscache_stat_unchecked(&fscache_n_relinquishes);
63770 if (retire)
63771- fscache_stat(&fscache_n_relinquishes_retire);
63772+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
63773
63774 if (!cookie) {
63775- fscache_stat(&fscache_n_relinquishes_null);
63776+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
63777 _leave(" [no cookie]");
63778 return;
63779 }
63780@@ -687,7 +687,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
63781 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
63782 goto inconsistent;
63783
63784- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
63785+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
63786
63787 __fscache_use_cookie(cookie);
63788 if (fscache_submit_op(object, op) < 0)
63789diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
63790index bc6c08f..09c0d96 100644
63791--- a/fs/fscache/internal.h
63792+++ b/fs/fscache/internal.h
63793@@ -139,8 +139,8 @@ extern void fscache_operation_gc(struct work_struct *);
63794 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
63795 extern int fscache_wait_for_operation_activation(struct fscache_object *,
63796 struct fscache_operation *,
63797- atomic_t *,
63798- atomic_t *,
63799+ atomic_unchecked_t *,
63800+ atomic_unchecked_t *,
63801 void (*)(struct fscache_operation *));
63802 extern void fscache_invalidate_writes(struct fscache_cookie *);
63803
63804@@ -159,101 +159,101 @@ extern void fscache_proc_cleanup(void);
63805 * stats.c
63806 */
63807 #ifdef CONFIG_FSCACHE_STATS
63808-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
63809-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
63810+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
63811+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
63812
63813-extern atomic_t fscache_n_op_pend;
63814-extern atomic_t fscache_n_op_run;
63815-extern atomic_t fscache_n_op_enqueue;
63816-extern atomic_t fscache_n_op_deferred_release;
63817-extern atomic_t fscache_n_op_release;
63818-extern atomic_t fscache_n_op_gc;
63819-extern atomic_t fscache_n_op_cancelled;
63820-extern atomic_t fscache_n_op_rejected;
63821+extern atomic_unchecked_t fscache_n_op_pend;
63822+extern atomic_unchecked_t fscache_n_op_run;
63823+extern atomic_unchecked_t fscache_n_op_enqueue;
63824+extern atomic_unchecked_t fscache_n_op_deferred_release;
63825+extern atomic_unchecked_t fscache_n_op_release;
63826+extern atomic_unchecked_t fscache_n_op_gc;
63827+extern atomic_unchecked_t fscache_n_op_cancelled;
63828+extern atomic_unchecked_t fscache_n_op_rejected;
63829
63830-extern atomic_t fscache_n_attr_changed;
63831-extern atomic_t fscache_n_attr_changed_ok;
63832-extern atomic_t fscache_n_attr_changed_nobufs;
63833-extern atomic_t fscache_n_attr_changed_nomem;
63834-extern atomic_t fscache_n_attr_changed_calls;
63835+extern atomic_unchecked_t fscache_n_attr_changed;
63836+extern atomic_unchecked_t fscache_n_attr_changed_ok;
63837+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
63838+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
63839+extern atomic_unchecked_t fscache_n_attr_changed_calls;
63840
63841-extern atomic_t fscache_n_allocs;
63842-extern atomic_t fscache_n_allocs_ok;
63843-extern atomic_t fscache_n_allocs_wait;
63844-extern atomic_t fscache_n_allocs_nobufs;
63845-extern atomic_t fscache_n_allocs_intr;
63846-extern atomic_t fscache_n_allocs_object_dead;
63847-extern atomic_t fscache_n_alloc_ops;
63848-extern atomic_t fscache_n_alloc_op_waits;
63849+extern atomic_unchecked_t fscache_n_allocs;
63850+extern atomic_unchecked_t fscache_n_allocs_ok;
63851+extern atomic_unchecked_t fscache_n_allocs_wait;
63852+extern atomic_unchecked_t fscache_n_allocs_nobufs;
63853+extern atomic_unchecked_t fscache_n_allocs_intr;
63854+extern atomic_unchecked_t fscache_n_allocs_object_dead;
63855+extern atomic_unchecked_t fscache_n_alloc_ops;
63856+extern atomic_unchecked_t fscache_n_alloc_op_waits;
63857
63858-extern atomic_t fscache_n_retrievals;
63859-extern atomic_t fscache_n_retrievals_ok;
63860-extern atomic_t fscache_n_retrievals_wait;
63861-extern atomic_t fscache_n_retrievals_nodata;
63862-extern atomic_t fscache_n_retrievals_nobufs;
63863-extern atomic_t fscache_n_retrievals_intr;
63864-extern atomic_t fscache_n_retrievals_nomem;
63865-extern atomic_t fscache_n_retrievals_object_dead;
63866-extern atomic_t fscache_n_retrieval_ops;
63867-extern atomic_t fscache_n_retrieval_op_waits;
63868+extern atomic_unchecked_t fscache_n_retrievals;
63869+extern atomic_unchecked_t fscache_n_retrievals_ok;
63870+extern atomic_unchecked_t fscache_n_retrievals_wait;
63871+extern atomic_unchecked_t fscache_n_retrievals_nodata;
63872+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
63873+extern atomic_unchecked_t fscache_n_retrievals_intr;
63874+extern atomic_unchecked_t fscache_n_retrievals_nomem;
63875+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
63876+extern atomic_unchecked_t fscache_n_retrieval_ops;
63877+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
63878
63879-extern atomic_t fscache_n_stores;
63880-extern atomic_t fscache_n_stores_ok;
63881-extern atomic_t fscache_n_stores_again;
63882-extern atomic_t fscache_n_stores_nobufs;
63883-extern atomic_t fscache_n_stores_oom;
63884-extern atomic_t fscache_n_store_ops;
63885-extern atomic_t fscache_n_store_calls;
63886-extern atomic_t fscache_n_store_pages;
63887-extern atomic_t fscache_n_store_radix_deletes;
63888-extern atomic_t fscache_n_store_pages_over_limit;
63889+extern atomic_unchecked_t fscache_n_stores;
63890+extern atomic_unchecked_t fscache_n_stores_ok;
63891+extern atomic_unchecked_t fscache_n_stores_again;
63892+extern atomic_unchecked_t fscache_n_stores_nobufs;
63893+extern atomic_unchecked_t fscache_n_stores_oom;
63894+extern atomic_unchecked_t fscache_n_store_ops;
63895+extern atomic_unchecked_t fscache_n_store_calls;
63896+extern atomic_unchecked_t fscache_n_store_pages;
63897+extern atomic_unchecked_t fscache_n_store_radix_deletes;
63898+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
63899
63900-extern atomic_t fscache_n_store_vmscan_not_storing;
63901-extern atomic_t fscache_n_store_vmscan_gone;
63902-extern atomic_t fscache_n_store_vmscan_busy;
63903-extern atomic_t fscache_n_store_vmscan_cancelled;
63904-extern atomic_t fscache_n_store_vmscan_wait;
63905+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
63906+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
63907+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
63908+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
63909+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
63910
63911-extern atomic_t fscache_n_marks;
63912-extern atomic_t fscache_n_uncaches;
63913+extern atomic_unchecked_t fscache_n_marks;
63914+extern atomic_unchecked_t fscache_n_uncaches;
63915
63916-extern atomic_t fscache_n_acquires;
63917-extern atomic_t fscache_n_acquires_null;
63918-extern atomic_t fscache_n_acquires_no_cache;
63919-extern atomic_t fscache_n_acquires_ok;
63920-extern atomic_t fscache_n_acquires_nobufs;
63921-extern atomic_t fscache_n_acquires_oom;
63922+extern atomic_unchecked_t fscache_n_acquires;
63923+extern atomic_unchecked_t fscache_n_acquires_null;
63924+extern atomic_unchecked_t fscache_n_acquires_no_cache;
63925+extern atomic_unchecked_t fscache_n_acquires_ok;
63926+extern atomic_unchecked_t fscache_n_acquires_nobufs;
63927+extern atomic_unchecked_t fscache_n_acquires_oom;
63928
63929-extern atomic_t fscache_n_invalidates;
63930-extern atomic_t fscache_n_invalidates_run;
63931+extern atomic_unchecked_t fscache_n_invalidates;
63932+extern atomic_unchecked_t fscache_n_invalidates_run;
63933
63934-extern atomic_t fscache_n_updates;
63935-extern atomic_t fscache_n_updates_null;
63936-extern atomic_t fscache_n_updates_run;
63937+extern atomic_unchecked_t fscache_n_updates;
63938+extern atomic_unchecked_t fscache_n_updates_null;
63939+extern atomic_unchecked_t fscache_n_updates_run;
63940
63941-extern atomic_t fscache_n_relinquishes;
63942-extern atomic_t fscache_n_relinquishes_null;
63943-extern atomic_t fscache_n_relinquishes_waitcrt;
63944-extern atomic_t fscache_n_relinquishes_retire;
63945+extern atomic_unchecked_t fscache_n_relinquishes;
63946+extern atomic_unchecked_t fscache_n_relinquishes_null;
63947+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
63948+extern atomic_unchecked_t fscache_n_relinquishes_retire;
63949
63950-extern atomic_t fscache_n_cookie_index;
63951-extern atomic_t fscache_n_cookie_data;
63952-extern atomic_t fscache_n_cookie_special;
63953+extern atomic_unchecked_t fscache_n_cookie_index;
63954+extern atomic_unchecked_t fscache_n_cookie_data;
63955+extern atomic_unchecked_t fscache_n_cookie_special;
63956
63957-extern atomic_t fscache_n_object_alloc;
63958-extern atomic_t fscache_n_object_no_alloc;
63959-extern atomic_t fscache_n_object_lookups;
63960-extern atomic_t fscache_n_object_lookups_negative;
63961-extern atomic_t fscache_n_object_lookups_positive;
63962-extern atomic_t fscache_n_object_lookups_timed_out;
63963-extern atomic_t fscache_n_object_created;
63964-extern atomic_t fscache_n_object_avail;
63965-extern atomic_t fscache_n_object_dead;
63966+extern atomic_unchecked_t fscache_n_object_alloc;
63967+extern atomic_unchecked_t fscache_n_object_no_alloc;
63968+extern atomic_unchecked_t fscache_n_object_lookups;
63969+extern atomic_unchecked_t fscache_n_object_lookups_negative;
63970+extern atomic_unchecked_t fscache_n_object_lookups_positive;
63971+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
63972+extern atomic_unchecked_t fscache_n_object_created;
63973+extern atomic_unchecked_t fscache_n_object_avail;
63974+extern atomic_unchecked_t fscache_n_object_dead;
63975
63976-extern atomic_t fscache_n_checkaux_none;
63977-extern atomic_t fscache_n_checkaux_okay;
63978-extern atomic_t fscache_n_checkaux_update;
63979-extern atomic_t fscache_n_checkaux_obsolete;
63980+extern atomic_unchecked_t fscache_n_checkaux_none;
63981+extern atomic_unchecked_t fscache_n_checkaux_okay;
63982+extern atomic_unchecked_t fscache_n_checkaux_update;
63983+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
63984
63985 extern atomic_t fscache_n_cop_alloc_object;
63986 extern atomic_t fscache_n_cop_lookup_object;
63987@@ -278,6 +278,11 @@ static inline void fscache_stat(atomic_t *stat)
63988 atomic_inc(stat);
63989 }
63990
63991+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
63992+{
63993+ atomic_inc_unchecked(stat);
63994+}
63995+
63996 static inline void fscache_stat_d(atomic_t *stat)
63997 {
63998 atomic_dec(stat);
63999@@ -290,6 +295,7 @@ extern const struct file_operations fscache_stats_fops;
64000
64001 #define __fscache_stat(stat) (NULL)
64002 #define fscache_stat(stat) do {} while (0)
64003+#define fscache_stat_unchecked(stat) do {} while (0)
64004 #define fscache_stat_d(stat) do {} while (0)
64005 #endif
64006
64007diff --git a/fs/fscache/object.c b/fs/fscache/object.c
64008index d3b4539..ed0c659 100644
64009--- a/fs/fscache/object.c
64010+++ b/fs/fscache/object.c
64011@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
64012 _debug("LOOKUP \"%s\" in \"%s\"",
64013 cookie->def->name, object->cache->tag->name);
64014
64015- fscache_stat(&fscache_n_object_lookups);
64016+ fscache_stat_unchecked(&fscache_n_object_lookups);
64017 fscache_stat(&fscache_n_cop_lookup_object);
64018 ret = object->cache->ops->lookup_object(object);
64019 fscache_stat_d(&fscache_n_cop_lookup_object);
64020@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
64021 if (ret == -ETIMEDOUT) {
64022 /* probably stuck behind another object, so move this one to
64023 * the back of the queue */
64024- fscache_stat(&fscache_n_object_lookups_timed_out);
64025+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
64026 _leave(" [timeout]");
64027 return NO_TRANSIT;
64028 }
64029@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
64030 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
64031
64032 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
64033- fscache_stat(&fscache_n_object_lookups_negative);
64034+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
64035
64036 /* Allow write requests to begin stacking up and read requests to begin
64037 * returning ENODATA.
64038@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object)
64039 /* if we were still looking up, then we must have a positive lookup
64040 * result, in which case there may be data available */
64041 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
64042- fscache_stat(&fscache_n_object_lookups_positive);
64043+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
64044
64045 /* We do (presumably) have data */
64046 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
64047@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object)
64048 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
64049 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
64050 } else {
64051- fscache_stat(&fscache_n_object_created);
64052+ fscache_stat_unchecked(&fscache_n_object_created);
64053 }
64054
64055 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
64056@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
64057 fscache_stat_d(&fscache_n_cop_lookup_complete);
64058
64059 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
64060- fscache_stat(&fscache_n_object_avail);
64061+ fscache_stat_unchecked(&fscache_n_object_avail);
64062
64063 _leave("");
64064 return transit_to(JUMPSTART_DEPS);
64065@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
64066
64067 /* this just shifts the object release to the work processor */
64068 fscache_put_object(object);
64069- fscache_stat(&fscache_n_object_dead);
64070+ fscache_stat_unchecked(&fscache_n_object_dead);
64071
64072 _leave("");
64073 return transit_to(OBJECT_DEAD);
64074@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
64075 enum fscache_checkaux result;
64076
64077 if (!object->cookie->def->check_aux) {
64078- fscache_stat(&fscache_n_checkaux_none);
64079+ fscache_stat_unchecked(&fscache_n_checkaux_none);
64080 return FSCACHE_CHECKAUX_OKAY;
64081 }
64082
64083@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
64084 switch (result) {
64085 /* entry okay as is */
64086 case FSCACHE_CHECKAUX_OKAY:
64087- fscache_stat(&fscache_n_checkaux_okay);
64088+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
64089 break;
64090
64091 /* entry requires update */
64092 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
64093- fscache_stat(&fscache_n_checkaux_update);
64094+ fscache_stat_unchecked(&fscache_n_checkaux_update);
64095 break;
64096
64097 /* entry requires deletion */
64098 case FSCACHE_CHECKAUX_OBSOLETE:
64099- fscache_stat(&fscache_n_checkaux_obsolete);
64100+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
64101 break;
64102
64103 default:
64104@@ -992,7 +992,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
64105 {
64106 const struct fscache_state *s;
64107
64108- fscache_stat(&fscache_n_invalidates_run);
64109+ fscache_stat_unchecked(&fscache_n_invalidates_run);
64110 fscache_stat(&fscache_n_cop_invalidate_object);
64111 s = _fscache_invalidate_object(object, event);
64112 fscache_stat_d(&fscache_n_cop_invalidate_object);
64113@@ -1007,7 +1007,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
64114 {
64115 _enter("{OBJ%x},%d", object->debug_id, event);
64116
64117- fscache_stat(&fscache_n_updates_run);
64118+ fscache_stat_unchecked(&fscache_n_updates_run);
64119 fscache_stat(&fscache_n_cop_update_object);
64120 object->cache->ops->update_object(object);
64121 fscache_stat_d(&fscache_n_cop_update_object);
64122diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
64123index e7b87a0..a85d47a 100644
64124--- a/fs/fscache/operation.c
64125+++ b/fs/fscache/operation.c
64126@@ -17,7 +17,7 @@
64127 #include <linux/slab.h>
64128 #include "internal.h"
64129
64130-atomic_t fscache_op_debug_id;
64131+atomic_unchecked_t fscache_op_debug_id;
64132 EXPORT_SYMBOL(fscache_op_debug_id);
64133
64134 /**
64135@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
64136 ASSERTCMP(atomic_read(&op->usage), >, 0);
64137 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
64138
64139- fscache_stat(&fscache_n_op_enqueue);
64140+ fscache_stat_unchecked(&fscache_n_op_enqueue);
64141 switch (op->flags & FSCACHE_OP_TYPE) {
64142 case FSCACHE_OP_ASYNC:
64143 _debug("queue async");
64144@@ -72,7 +72,7 @@ static void fscache_run_op(struct fscache_object *object,
64145 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
64146 if (op->processor)
64147 fscache_enqueue_operation(op);
64148- fscache_stat(&fscache_n_op_run);
64149+ fscache_stat_unchecked(&fscache_n_op_run);
64150 }
64151
64152 /*
64153@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
64154 if (object->n_in_progress > 0) {
64155 atomic_inc(&op->usage);
64156 list_add_tail(&op->pend_link, &object->pending_ops);
64157- fscache_stat(&fscache_n_op_pend);
64158+ fscache_stat_unchecked(&fscache_n_op_pend);
64159 } else if (!list_empty(&object->pending_ops)) {
64160 atomic_inc(&op->usage);
64161 list_add_tail(&op->pend_link, &object->pending_ops);
64162- fscache_stat(&fscache_n_op_pend);
64163+ fscache_stat_unchecked(&fscache_n_op_pend);
64164 fscache_start_operations(object);
64165 } else {
64166 ASSERTCMP(object->n_in_progress, ==, 0);
64167@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
64168 object->n_exclusive++; /* reads and writes must wait */
64169 atomic_inc(&op->usage);
64170 list_add_tail(&op->pend_link, &object->pending_ops);
64171- fscache_stat(&fscache_n_op_pend);
64172+ fscache_stat_unchecked(&fscache_n_op_pend);
64173 ret = 0;
64174 } else {
64175 /* If we're in any other state, there must have been an I/O
64176@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_object *object,
64177 if (object->n_exclusive > 0) {
64178 atomic_inc(&op->usage);
64179 list_add_tail(&op->pend_link, &object->pending_ops);
64180- fscache_stat(&fscache_n_op_pend);
64181+ fscache_stat_unchecked(&fscache_n_op_pend);
64182 } else if (!list_empty(&object->pending_ops)) {
64183 atomic_inc(&op->usage);
64184 list_add_tail(&op->pend_link, &object->pending_ops);
64185- fscache_stat(&fscache_n_op_pend);
64186+ fscache_stat_unchecked(&fscache_n_op_pend);
64187 fscache_start_operations(object);
64188 } else {
64189 ASSERTCMP(object->n_exclusive, ==, 0);
64190@@ -227,10 +227,10 @@ int fscache_submit_op(struct fscache_object *object,
64191 object->n_ops++;
64192 atomic_inc(&op->usage);
64193 list_add_tail(&op->pend_link, &object->pending_ops);
64194- fscache_stat(&fscache_n_op_pend);
64195+ fscache_stat_unchecked(&fscache_n_op_pend);
64196 ret = 0;
64197 } else if (fscache_object_is_dying(object)) {
64198- fscache_stat(&fscache_n_op_rejected);
64199+ fscache_stat_unchecked(&fscache_n_op_rejected);
64200 op->state = FSCACHE_OP_ST_CANCELLED;
64201 ret = -ENOBUFS;
64202 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
64203@@ -309,7 +309,7 @@ int fscache_cancel_op(struct fscache_operation *op,
64204 ret = -EBUSY;
64205 if (op->state == FSCACHE_OP_ST_PENDING) {
64206 ASSERT(!list_empty(&op->pend_link));
64207- fscache_stat(&fscache_n_op_cancelled);
64208+ fscache_stat_unchecked(&fscache_n_op_cancelled);
64209 list_del_init(&op->pend_link);
64210 if (do_cancel)
64211 do_cancel(op);
64212@@ -341,7 +341,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
64213 while (!list_empty(&object->pending_ops)) {
64214 op = list_entry(object->pending_ops.next,
64215 struct fscache_operation, pend_link);
64216- fscache_stat(&fscache_n_op_cancelled);
64217+ fscache_stat_unchecked(&fscache_n_op_cancelled);
64218 list_del_init(&op->pend_link);
64219
64220 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
64221@@ -413,7 +413,7 @@ void fscache_put_operation(struct fscache_operation *op)
64222 op->state, ==, FSCACHE_OP_ST_CANCELLED);
64223 op->state = FSCACHE_OP_ST_DEAD;
64224
64225- fscache_stat(&fscache_n_op_release);
64226+ fscache_stat_unchecked(&fscache_n_op_release);
64227
64228 if (op->release) {
64229 op->release(op);
64230@@ -432,7 +432,7 @@ void fscache_put_operation(struct fscache_operation *op)
64231 * lock, and defer it otherwise */
64232 if (!spin_trylock(&object->lock)) {
64233 _debug("defer put");
64234- fscache_stat(&fscache_n_op_deferred_release);
64235+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
64236
64237 cache = object->cache;
64238 spin_lock(&cache->op_gc_list_lock);
64239@@ -485,7 +485,7 @@ void fscache_operation_gc(struct work_struct *work)
64240
64241 _debug("GC DEFERRED REL OBJ%x OP%x",
64242 object->debug_id, op->debug_id);
64243- fscache_stat(&fscache_n_op_gc);
64244+ fscache_stat_unchecked(&fscache_n_op_gc);
64245
64246 ASSERTCMP(atomic_read(&op->usage), ==, 0);
64247 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
64248diff --git a/fs/fscache/page.c b/fs/fscache/page.c
64249index ed70714..67f4982 100644
64250--- a/fs/fscache/page.c
64251+++ b/fs/fscache/page.c
64252@@ -61,7 +61,7 @@ try_again:
64253 val = radix_tree_lookup(&cookie->stores, page->index);
64254 if (!val) {
64255 rcu_read_unlock();
64256- fscache_stat(&fscache_n_store_vmscan_not_storing);
64257+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
64258 __fscache_uncache_page(cookie, page);
64259 return true;
64260 }
64261@@ -91,11 +91,11 @@ try_again:
64262 spin_unlock(&cookie->stores_lock);
64263
64264 if (xpage) {
64265- fscache_stat(&fscache_n_store_vmscan_cancelled);
64266- fscache_stat(&fscache_n_store_radix_deletes);
64267+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
64268+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
64269 ASSERTCMP(xpage, ==, page);
64270 } else {
64271- fscache_stat(&fscache_n_store_vmscan_gone);
64272+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
64273 }
64274
64275 wake_up_bit(&cookie->flags, 0);
64276@@ -110,11 +110,11 @@ page_busy:
64277 * sleeping on memory allocation, so we may need to impose a timeout
64278 * too. */
64279 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
64280- fscache_stat(&fscache_n_store_vmscan_busy);
64281+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
64282 return false;
64283 }
64284
64285- fscache_stat(&fscache_n_store_vmscan_wait);
64286+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
64287 __fscache_wait_on_page_write(cookie, page);
64288 gfp &= ~__GFP_WAIT;
64289 goto try_again;
64290@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
64291 FSCACHE_COOKIE_STORING_TAG);
64292 if (!radix_tree_tag_get(&cookie->stores, page->index,
64293 FSCACHE_COOKIE_PENDING_TAG)) {
64294- fscache_stat(&fscache_n_store_radix_deletes);
64295+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
64296 xpage = radix_tree_delete(&cookie->stores, page->index);
64297 }
64298 spin_unlock(&cookie->stores_lock);
64299@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
64300
64301 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
64302
64303- fscache_stat(&fscache_n_attr_changed_calls);
64304+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
64305
64306 if (fscache_object_is_active(object)) {
64307 fscache_stat(&fscache_n_cop_attr_changed);
64308@@ -188,11 +188,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
64309
64310 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
64311
64312- fscache_stat(&fscache_n_attr_changed);
64313+ fscache_stat_unchecked(&fscache_n_attr_changed);
64314
64315 op = kzalloc(sizeof(*op), GFP_KERNEL);
64316 if (!op) {
64317- fscache_stat(&fscache_n_attr_changed_nomem);
64318+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
64319 _leave(" = -ENOMEM");
64320 return -ENOMEM;
64321 }
64322@@ -214,7 +214,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
64323 if (fscache_submit_exclusive_op(object, op) < 0)
64324 goto nobufs;
64325 spin_unlock(&cookie->lock);
64326- fscache_stat(&fscache_n_attr_changed_ok);
64327+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
64328 fscache_put_operation(op);
64329 _leave(" = 0");
64330 return 0;
64331@@ -225,7 +225,7 @@ nobufs:
64332 kfree(op);
64333 if (wake_cookie)
64334 __fscache_wake_unused_cookie(cookie);
64335- fscache_stat(&fscache_n_attr_changed_nobufs);
64336+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
64337 _leave(" = %d", -ENOBUFS);
64338 return -ENOBUFS;
64339 }
64340@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
64341 /* allocate a retrieval operation and attempt to submit it */
64342 op = kzalloc(sizeof(*op), GFP_NOIO);
64343 if (!op) {
64344- fscache_stat(&fscache_n_retrievals_nomem);
64345+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
64346 return NULL;
64347 }
64348
64349@@ -294,13 +294,13 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
64350 return 0;
64351 }
64352
64353- fscache_stat(&fscache_n_retrievals_wait);
64354+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
64355
64356 jif = jiffies;
64357 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
64358 fscache_wait_bit_interruptible,
64359 TASK_INTERRUPTIBLE) != 0) {
64360- fscache_stat(&fscache_n_retrievals_intr);
64361+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
64362 _leave(" = -ERESTARTSYS");
64363 return -ERESTARTSYS;
64364 }
64365@@ -329,8 +329,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
64366 */
64367 int fscache_wait_for_operation_activation(struct fscache_object *object,
64368 struct fscache_operation *op,
64369- atomic_t *stat_op_waits,
64370- atomic_t *stat_object_dead,
64371+ atomic_unchecked_t *stat_op_waits,
64372+ atomic_unchecked_t *stat_object_dead,
64373 void (*do_cancel)(struct fscache_operation *))
64374 {
64375 int ret;
64376@@ -340,7 +340,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
64377
64378 _debug(">>> WT");
64379 if (stat_op_waits)
64380- fscache_stat(stat_op_waits);
64381+ fscache_stat_unchecked(stat_op_waits);
64382 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
64383 fscache_wait_bit_interruptible,
64384 TASK_INTERRUPTIBLE) != 0) {
64385@@ -358,7 +358,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
64386 check_if_dead:
64387 if (op->state == FSCACHE_OP_ST_CANCELLED) {
64388 if (stat_object_dead)
64389- fscache_stat(stat_object_dead);
64390+ fscache_stat_unchecked(stat_object_dead);
64391 _leave(" = -ENOBUFS [cancelled]");
64392 return -ENOBUFS;
64393 }
64394@@ -366,7 +366,7 @@ check_if_dead:
64395 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
64396 fscache_cancel_op(op, do_cancel);
64397 if (stat_object_dead)
64398- fscache_stat(stat_object_dead);
64399+ fscache_stat_unchecked(stat_object_dead);
64400 return -ENOBUFS;
64401 }
64402 return 0;
64403@@ -394,7 +394,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
64404
64405 _enter("%p,%p,,,", cookie, page);
64406
64407- fscache_stat(&fscache_n_retrievals);
64408+ fscache_stat_unchecked(&fscache_n_retrievals);
64409
64410 if (hlist_empty(&cookie->backing_objects))
64411 goto nobufs;
64412@@ -436,7 +436,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
64413 goto nobufs_unlock_dec;
64414 spin_unlock(&cookie->lock);
64415
64416- fscache_stat(&fscache_n_retrieval_ops);
64417+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
64418
64419 /* pin the netfs read context in case we need to do the actual netfs
64420 * read because we've encountered a cache read failure */
64421@@ -467,15 +467,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
64422
64423 error:
64424 if (ret == -ENOMEM)
64425- fscache_stat(&fscache_n_retrievals_nomem);
64426+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
64427 else if (ret == -ERESTARTSYS)
64428- fscache_stat(&fscache_n_retrievals_intr);
64429+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
64430 else if (ret == -ENODATA)
64431- fscache_stat(&fscache_n_retrievals_nodata);
64432+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
64433 else if (ret < 0)
64434- fscache_stat(&fscache_n_retrievals_nobufs);
64435+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
64436 else
64437- fscache_stat(&fscache_n_retrievals_ok);
64438+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
64439
64440 fscache_put_retrieval(op);
64441 _leave(" = %d", ret);
64442@@ -490,7 +490,7 @@ nobufs_unlock:
64443 __fscache_wake_unused_cookie(cookie);
64444 kfree(op);
64445 nobufs:
64446- fscache_stat(&fscache_n_retrievals_nobufs);
64447+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
64448 _leave(" = -ENOBUFS");
64449 return -ENOBUFS;
64450 }
64451@@ -529,7 +529,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
64452
64453 _enter("%p,,%d,,,", cookie, *nr_pages);
64454
64455- fscache_stat(&fscache_n_retrievals);
64456+ fscache_stat_unchecked(&fscache_n_retrievals);
64457
64458 if (hlist_empty(&cookie->backing_objects))
64459 goto nobufs;
64460@@ -567,7 +567,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
64461 goto nobufs_unlock_dec;
64462 spin_unlock(&cookie->lock);
64463
64464- fscache_stat(&fscache_n_retrieval_ops);
64465+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
64466
64467 /* pin the netfs read context in case we need to do the actual netfs
64468 * read because we've encountered a cache read failure */
64469@@ -598,15 +598,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
64470
64471 error:
64472 if (ret == -ENOMEM)
64473- fscache_stat(&fscache_n_retrievals_nomem);
64474+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
64475 else if (ret == -ERESTARTSYS)
64476- fscache_stat(&fscache_n_retrievals_intr);
64477+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
64478 else if (ret == -ENODATA)
64479- fscache_stat(&fscache_n_retrievals_nodata);
64480+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
64481 else if (ret < 0)
64482- fscache_stat(&fscache_n_retrievals_nobufs);
64483+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
64484 else
64485- fscache_stat(&fscache_n_retrievals_ok);
64486+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
64487
64488 fscache_put_retrieval(op);
64489 _leave(" = %d", ret);
64490@@ -621,7 +621,7 @@ nobufs_unlock:
64491 if (wake_cookie)
64492 __fscache_wake_unused_cookie(cookie);
64493 nobufs:
64494- fscache_stat(&fscache_n_retrievals_nobufs);
64495+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
64496 _leave(" = -ENOBUFS");
64497 return -ENOBUFS;
64498 }
64499@@ -646,7 +646,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
64500
64501 _enter("%p,%p,,,", cookie, page);
64502
64503- fscache_stat(&fscache_n_allocs);
64504+ fscache_stat_unchecked(&fscache_n_allocs);
64505
64506 if (hlist_empty(&cookie->backing_objects))
64507 goto nobufs;
64508@@ -680,7 +680,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
64509 goto nobufs_unlock_dec;
64510 spin_unlock(&cookie->lock);
64511
64512- fscache_stat(&fscache_n_alloc_ops);
64513+ fscache_stat_unchecked(&fscache_n_alloc_ops);
64514
64515 ret = fscache_wait_for_operation_activation(
64516 object, &op->op,
64517@@ -697,11 +697,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
64518
64519 error:
64520 if (ret == -ERESTARTSYS)
64521- fscache_stat(&fscache_n_allocs_intr);
64522+ fscache_stat_unchecked(&fscache_n_allocs_intr);
64523 else if (ret < 0)
64524- fscache_stat(&fscache_n_allocs_nobufs);
64525+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
64526 else
64527- fscache_stat(&fscache_n_allocs_ok);
64528+ fscache_stat_unchecked(&fscache_n_allocs_ok);
64529
64530 fscache_put_retrieval(op);
64531 _leave(" = %d", ret);
64532@@ -715,7 +715,7 @@ nobufs_unlock:
64533 if (wake_cookie)
64534 __fscache_wake_unused_cookie(cookie);
64535 nobufs:
64536- fscache_stat(&fscache_n_allocs_nobufs);
64537+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
64538 _leave(" = -ENOBUFS");
64539 return -ENOBUFS;
64540 }
64541@@ -791,7 +791,7 @@ static void fscache_write_op(struct fscache_operation *_op)
64542
64543 spin_lock(&cookie->stores_lock);
64544
64545- fscache_stat(&fscache_n_store_calls);
64546+ fscache_stat_unchecked(&fscache_n_store_calls);
64547
64548 /* find a page to store */
64549 page = NULL;
64550@@ -802,7 +802,7 @@ static void fscache_write_op(struct fscache_operation *_op)
64551 page = results[0];
64552 _debug("gang %d [%lx]", n, page->index);
64553 if (page->index > op->store_limit) {
64554- fscache_stat(&fscache_n_store_pages_over_limit);
64555+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
64556 goto superseded;
64557 }
64558
64559@@ -814,7 +814,7 @@ static void fscache_write_op(struct fscache_operation *_op)
64560 spin_unlock(&cookie->stores_lock);
64561 spin_unlock(&object->lock);
64562
64563- fscache_stat(&fscache_n_store_pages);
64564+ fscache_stat_unchecked(&fscache_n_store_pages);
64565 fscache_stat(&fscache_n_cop_write_page);
64566 ret = object->cache->ops->write_page(op, page);
64567 fscache_stat_d(&fscache_n_cop_write_page);
64568@@ -918,7 +918,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
64569 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
64570 ASSERT(PageFsCache(page));
64571
64572- fscache_stat(&fscache_n_stores);
64573+ fscache_stat_unchecked(&fscache_n_stores);
64574
64575 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
64576 _leave(" = -ENOBUFS [invalidating]");
64577@@ -977,7 +977,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
64578 spin_unlock(&cookie->stores_lock);
64579 spin_unlock(&object->lock);
64580
64581- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
64582+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
64583 op->store_limit = object->store_limit;
64584
64585 __fscache_use_cookie(cookie);
64586@@ -986,8 +986,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
64587
64588 spin_unlock(&cookie->lock);
64589 radix_tree_preload_end();
64590- fscache_stat(&fscache_n_store_ops);
64591- fscache_stat(&fscache_n_stores_ok);
64592+ fscache_stat_unchecked(&fscache_n_store_ops);
64593+ fscache_stat_unchecked(&fscache_n_stores_ok);
64594
64595 /* the work queue now carries its own ref on the object */
64596 fscache_put_operation(&op->op);
64597@@ -995,14 +995,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
64598 return 0;
64599
64600 already_queued:
64601- fscache_stat(&fscache_n_stores_again);
64602+ fscache_stat_unchecked(&fscache_n_stores_again);
64603 already_pending:
64604 spin_unlock(&cookie->stores_lock);
64605 spin_unlock(&object->lock);
64606 spin_unlock(&cookie->lock);
64607 radix_tree_preload_end();
64608 kfree(op);
64609- fscache_stat(&fscache_n_stores_ok);
64610+ fscache_stat_unchecked(&fscache_n_stores_ok);
64611 _leave(" = 0");
64612 return 0;
64613
64614@@ -1024,14 +1024,14 @@ nobufs:
64615 kfree(op);
64616 if (wake_cookie)
64617 __fscache_wake_unused_cookie(cookie);
64618- fscache_stat(&fscache_n_stores_nobufs);
64619+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
64620 _leave(" = -ENOBUFS");
64621 return -ENOBUFS;
64622
64623 nomem_free:
64624 kfree(op);
64625 nomem:
64626- fscache_stat(&fscache_n_stores_oom);
64627+ fscache_stat_unchecked(&fscache_n_stores_oom);
64628 _leave(" = -ENOMEM");
64629 return -ENOMEM;
64630 }
64631@@ -1049,7 +1049,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
64632 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
64633 ASSERTCMP(page, !=, NULL);
64634
64635- fscache_stat(&fscache_n_uncaches);
64636+ fscache_stat_unchecked(&fscache_n_uncaches);
64637
64638 /* cache withdrawal may beat us to it */
64639 if (!PageFsCache(page))
64640@@ -1100,7 +1100,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
64641 struct fscache_cookie *cookie = op->op.object->cookie;
64642
64643 #ifdef CONFIG_FSCACHE_STATS
64644- atomic_inc(&fscache_n_marks);
64645+ atomic_inc_unchecked(&fscache_n_marks);
64646 #endif
64647
64648 _debug("- mark %p{%lx}", page, page->index);
64649diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
64650index 40d13c7..ddf52b9 100644
64651--- a/fs/fscache/stats.c
64652+++ b/fs/fscache/stats.c
64653@@ -18,99 +18,99 @@
64654 /*
64655 * operation counters
64656 */
64657-atomic_t fscache_n_op_pend;
64658-atomic_t fscache_n_op_run;
64659-atomic_t fscache_n_op_enqueue;
64660-atomic_t fscache_n_op_requeue;
64661-atomic_t fscache_n_op_deferred_release;
64662-atomic_t fscache_n_op_release;
64663-atomic_t fscache_n_op_gc;
64664-atomic_t fscache_n_op_cancelled;
64665-atomic_t fscache_n_op_rejected;
64666+atomic_unchecked_t fscache_n_op_pend;
64667+atomic_unchecked_t fscache_n_op_run;
64668+atomic_unchecked_t fscache_n_op_enqueue;
64669+atomic_unchecked_t fscache_n_op_requeue;
64670+atomic_unchecked_t fscache_n_op_deferred_release;
64671+atomic_unchecked_t fscache_n_op_release;
64672+atomic_unchecked_t fscache_n_op_gc;
64673+atomic_unchecked_t fscache_n_op_cancelled;
64674+atomic_unchecked_t fscache_n_op_rejected;
64675
64676-atomic_t fscache_n_attr_changed;
64677-atomic_t fscache_n_attr_changed_ok;
64678-atomic_t fscache_n_attr_changed_nobufs;
64679-atomic_t fscache_n_attr_changed_nomem;
64680-atomic_t fscache_n_attr_changed_calls;
64681+atomic_unchecked_t fscache_n_attr_changed;
64682+atomic_unchecked_t fscache_n_attr_changed_ok;
64683+atomic_unchecked_t fscache_n_attr_changed_nobufs;
64684+atomic_unchecked_t fscache_n_attr_changed_nomem;
64685+atomic_unchecked_t fscache_n_attr_changed_calls;
64686
64687-atomic_t fscache_n_allocs;
64688-atomic_t fscache_n_allocs_ok;
64689-atomic_t fscache_n_allocs_wait;
64690-atomic_t fscache_n_allocs_nobufs;
64691-atomic_t fscache_n_allocs_intr;
64692-atomic_t fscache_n_allocs_object_dead;
64693-atomic_t fscache_n_alloc_ops;
64694-atomic_t fscache_n_alloc_op_waits;
64695+atomic_unchecked_t fscache_n_allocs;
64696+atomic_unchecked_t fscache_n_allocs_ok;
64697+atomic_unchecked_t fscache_n_allocs_wait;
64698+atomic_unchecked_t fscache_n_allocs_nobufs;
64699+atomic_unchecked_t fscache_n_allocs_intr;
64700+atomic_unchecked_t fscache_n_allocs_object_dead;
64701+atomic_unchecked_t fscache_n_alloc_ops;
64702+atomic_unchecked_t fscache_n_alloc_op_waits;
64703
64704-atomic_t fscache_n_retrievals;
64705-atomic_t fscache_n_retrievals_ok;
64706-atomic_t fscache_n_retrievals_wait;
64707-atomic_t fscache_n_retrievals_nodata;
64708-atomic_t fscache_n_retrievals_nobufs;
64709-atomic_t fscache_n_retrievals_intr;
64710-atomic_t fscache_n_retrievals_nomem;
64711-atomic_t fscache_n_retrievals_object_dead;
64712-atomic_t fscache_n_retrieval_ops;
64713-atomic_t fscache_n_retrieval_op_waits;
64714+atomic_unchecked_t fscache_n_retrievals;
64715+atomic_unchecked_t fscache_n_retrievals_ok;
64716+atomic_unchecked_t fscache_n_retrievals_wait;
64717+atomic_unchecked_t fscache_n_retrievals_nodata;
64718+atomic_unchecked_t fscache_n_retrievals_nobufs;
64719+atomic_unchecked_t fscache_n_retrievals_intr;
64720+atomic_unchecked_t fscache_n_retrievals_nomem;
64721+atomic_unchecked_t fscache_n_retrievals_object_dead;
64722+atomic_unchecked_t fscache_n_retrieval_ops;
64723+atomic_unchecked_t fscache_n_retrieval_op_waits;
64724
64725-atomic_t fscache_n_stores;
64726-atomic_t fscache_n_stores_ok;
64727-atomic_t fscache_n_stores_again;
64728-atomic_t fscache_n_stores_nobufs;
64729-atomic_t fscache_n_stores_oom;
64730-atomic_t fscache_n_store_ops;
64731-atomic_t fscache_n_store_calls;
64732-atomic_t fscache_n_store_pages;
64733-atomic_t fscache_n_store_radix_deletes;
64734-atomic_t fscache_n_store_pages_over_limit;
64735+atomic_unchecked_t fscache_n_stores;
64736+atomic_unchecked_t fscache_n_stores_ok;
64737+atomic_unchecked_t fscache_n_stores_again;
64738+atomic_unchecked_t fscache_n_stores_nobufs;
64739+atomic_unchecked_t fscache_n_stores_oom;
64740+atomic_unchecked_t fscache_n_store_ops;
64741+atomic_unchecked_t fscache_n_store_calls;
64742+atomic_unchecked_t fscache_n_store_pages;
64743+atomic_unchecked_t fscache_n_store_radix_deletes;
64744+atomic_unchecked_t fscache_n_store_pages_over_limit;
64745
64746-atomic_t fscache_n_store_vmscan_not_storing;
64747-atomic_t fscache_n_store_vmscan_gone;
64748-atomic_t fscache_n_store_vmscan_busy;
64749-atomic_t fscache_n_store_vmscan_cancelled;
64750-atomic_t fscache_n_store_vmscan_wait;
64751+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
64752+atomic_unchecked_t fscache_n_store_vmscan_gone;
64753+atomic_unchecked_t fscache_n_store_vmscan_busy;
64754+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
64755+atomic_unchecked_t fscache_n_store_vmscan_wait;
64756
64757-atomic_t fscache_n_marks;
64758-atomic_t fscache_n_uncaches;
64759+atomic_unchecked_t fscache_n_marks;
64760+atomic_unchecked_t fscache_n_uncaches;
64761
64762-atomic_t fscache_n_acquires;
64763-atomic_t fscache_n_acquires_null;
64764-atomic_t fscache_n_acquires_no_cache;
64765-atomic_t fscache_n_acquires_ok;
64766-atomic_t fscache_n_acquires_nobufs;
64767-atomic_t fscache_n_acquires_oom;
64768+atomic_unchecked_t fscache_n_acquires;
64769+atomic_unchecked_t fscache_n_acquires_null;
64770+atomic_unchecked_t fscache_n_acquires_no_cache;
64771+atomic_unchecked_t fscache_n_acquires_ok;
64772+atomic_unchecked_t fscache_n_acquires_nobufs;
64773+atomic_unchecked_t fscache_n_acquires_oom;
64774
64775-atomic_t fscache_n_invalidates;
64776-atomic_t fscache_n_invalidates_run;
64777+atomic_unchecked_t fscache_n_invalidates;
64778+atomic_unchecked_t fscache_n_invalidates_run;
64779
64780-atomic_t fscache_n_updates;
64781-atomic_t fscache_n_updates_null;
64782-atomic_t fscache_n_updates_run;
64783+atomic_unchecked_t fscache_n_updates;
64784+atomic_unchecked_t fscache_n_updates_null;
64785+atomic_unchecked_t fscache_n_updates_run;
64786
64787-atomic_t fscache_n_relinquishes;
64788-atomic_t fscache_n_relinquishes_null;
64789-atomic_t fscache_n_relinquishes_waitcrt;
64790-atomic_t fscache_n_relinquishes_retire;
64791+atomic_unchecked_t fscache_n_relinquishes;
64792+atomic_unchecked_t fscache_n_relinquishes_null;
64793+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
64794+atomic_unchecked_t fscache_n_relinquishes_retire;
64795
64796-atomic_t fscache_n_cookie_index;
64797-atomic_t fscache_n_cookie_data;
64798-atomic_t fscache_n_cookie_special;
64799+atomic_unchecked_t fscache_n_cookie_index;
64800+atomic_unchecked_t fscache_n_cookie_data;
64801+atomic_unchecked_t fscache_n_cookie_special;
64802
64803-atomic_t fscache_n_object_alloc;
64804-atomic_t fscache_n_object_no_alloc;
64805-atomic_t fscache_n_object_lookups;
64806-atomic_t fscache_n_object_lookups_negative;
64807-atomic_t fscache_n_object_lookups_positive;
64808-atomic_t fscache_n_object_lookups_timed_out;
64809-atomic_t fscache_n_object_created;
64810-atomic_t fscache_n_object_avail;
64811-atomic_t fscache_n_object_dead;
64812+atomic_unchecked_t fscache_n_object_alloc;
64813+atomic_unchecked_t fscache_n_object_no_alloc;
64814+atomic_unchecked_t fscache_n_object_lookups;
64815+atomic_unchecked_t fscache_n_object_lookups_negative;
64816+atomic_unchecked_t fscache_n_object_lookups_positive;
64817+atomic_unchecked_t fscache_n_object_lookups_timed_out;
64818+atomic_unchecked_t fscache_n_object_created;
64819+atomic_unchecked_t fscache_n_object_avail;
64820+atomic_unchecked_t fscache_n_object_dead;
64821
64822-atomic_t fscache_n_checkaux_none;
64823-atomic_t fscache_n_checkaux_okay;
64824-atomic_t fscache_n_checkaux_update;
64825-atomic_t fscache_n_checkaux_obsolete;
64826+atomic_unchecked_t fscache_n_checkaux_none;
64827+atomic_unchecked_t fscache_n_checkaux_okay;
64828+atomic_unchecked_t fscache_n_checkaux_update;
64829+atomic_unchecked_t fscache_n_checkaux_obsolete;
64830
64831 atomic_t fscache_n_cop_alloc_object;
64832 atomic_t fscache_n_cop_lookup_object;
64833@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
64834 seq_puts(m, "FS-Cache statistics\n");
64835
64836 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
64837- atomic_read(&fscache_n_cookie_index),
64838- atomic_read(&fscache_n_cookie_data),
64839- atomic_read(&fscache_n_cookie_special));
64840+ atomic_read_unchecked(&fscache_n_cookie_index),
64841+ atomic_read_unchecked(&fscache_n_cookie_data),
64842+ atomic_read_unchecked(&fscache_n_cookie_special));
64843
64844 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
64845- atomic_read(&fscache_n_object_alloc),
64846- atomic_read(&fscache_n_object_no_alloc),
64847- atomic_read(&fscache_n_object_avail),
64848- atomic_read(&fscache_n_object_dead));
64849+ atomic_read_unchecked(&fscache_n_object_alloc),
64850+ atomic_read_unchecked(&fscache_n_object_no_alloc),
64851+ atomic_read_unchecked(&fscache_n_object_avail),
64852+ atomic_read_unchecked(&fscache_n_object_dead));
64853 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
64854- atomic_read(&fscache_n_checkaux_none),
64855- atomic_read(&fscache_n_checkaux_okay),
64856- atomic_read(&fscache_n_checkaux_update),
64857- atomic_read(&fscache_n_checkaux_obsolete));
64858+ atomic_read_unchecked(&fscache_n_checkaux_none),
64859+ atomic_read_unchecked(&fscache_n_checkaux_okay),
64860+ atomic_read_unchecked(&fscache_n_checkaux_update),
64861+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
64862
64863 seq_printf(m, "Pages : mrk=%u unc=%u\n",
64864- atomic_read(&fscache_n_marks),
64865- atomic_read(&fscache_n_uncaches));
64866+ atomic_read_unchecked(&fscache_n_marks),
64867+ atomic_read_unchecked(&fscache_n_uncaches));
64868
64869 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
64870 " oom=%u\n",
64871- atomic_read(&fscache_n_acquires),
64872- atomic_read(&fscache_n_acquires_null),
64873- atomic_read(&fscache_n_acquires_no_cache),
64874- atomic_read(&fscache_n_acquires_ok),
64875- atomic_read(&fscache_n_acquires_nobufs),
64876- atomic_read(&fscache_n_acquires_oom));
64877+ atomic_read_unchecked(&fscache_n_acquires),
64878+ atomic_read_unchecked(&fscache_n_acquires_null),
64879+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
64880+ atomic_read_unchecked(&fscache_n_acquires_ok),
64881+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
64882+ atomic_read_unchecked(&fscache_n_acquires_oom));
64883
64884 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
64885- atomic_read(&fscache_n_object_lookups),
64886- atomic_read(&fscache_n_object_lookups_negative),
64887- atomic_read(&fscache_n_object_lookups_positive),
64888- atomic_read(&fscache_n_object_created),
64889- atomic_read(&fscache_n_object_lookups_timed_out));
64890+ atomic_read_unchecked(&fscache_n_object_lookups),
64891+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
64892+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
64893+ atomic_read_unchecked(&fscache_n_object_created),
64894+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
64895
64896 seq_printf(m, "Invals : n=%u run=%u\n",
64897- atomic_read(&fscache_n_invalidates),
64898- atomic_read(&fscache_n_invalidates_run));
64899+ atomic_read_unchecked(&fscache_n_invalidates),
64900+ atomic_read_unchecked(&fscache_n_invalidates_run));
64901
64902 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
64903- atomic_read(&fscache_n_updates),
64904- atomic_read(&fscache_n_updates_null),
64905- atomic_read(&fscache_n_updates_run));
64906+ atomic_read_unchecked(&fscache_n_updates),
64907+ atomic_read_unchecked(&fscache_n_updates_null),
64908+ atomic_read_unchecked(&fscache_n_updates_run));
64909
64910 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
64911- atomic_read(&fscache_n_relinquishes),
64912- atomic_read(&fscache_n_relinquishes_null),
64913- atomic_read(&fscache_n_relinquishes_waitcrt),
64914- atomic_read(&fscache_n_relinquishes_retire));
64915+ atomic_read_unchecked(&fscache_n_relinquishes),
64916+ atomic_read_unchecked(&fscache_n_relinquishes_null),
64917+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
64918+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
64919
64920 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
64921- atomic_read(&fscache_n_attr_changed),
64922- atomic_read(&fscache_n_attr_changed_ok),
64923- atomic_read(&fscache_n_attr_changed_nobufs),
64924- atomic_read(&fscache_n_attr_changed_nomem),
64925- atomic_read(&fscache_n_attr_changed_calls));
64926+ atomic_read_unchecked(&fscache_n_attr_changed),
64927+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
64928+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
64929+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
64930+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
64931
64932 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
64933- atomic_read(&fscache_n_allocs),
64934- atomic_read(&fscache_n_allocs_ok),
64935- atomic_read(&fscache_n_allocs_wait),
64936- atomic_read(&fscache_n_allocs_nobufs),
64937- atomic_read(&fscache_n_allocs_intr));
64938+ atomic_read_unchecked(&fscache_n_allocs),
64939+ atomic_read_unchecked(&fscache_n_allocs_ok),
64940+ atomic_read_unchecked(&fscache_n_allocs_wait),
64941+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
64942+ atomic_read_unchecked(&fscache_n_allocs_intr));
64943 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
64944- atomic_read(&fscache_n_alloc_ops),
64945- atomic_read(&fscache_n_alloc_op_waits),
64946- atomic_read(&fscache_n_allocs_object_dead));
64947+ atomic_read_unchecked(&fscache_n_alloc_ops),
64948+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
64949+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
64950
64951 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
64952 " int=%u oom=%u\n",
64953- atomic_read(&fscache_n_retrievals),
64954- atomic_read(&fscache_n_retrievals_ok),
64955- atomic_read(&fscache_n_retrievals_wait),
64956- atomic_read(&fscache_n_retrievals_nodata),
64957- atomic_read(&fscache_n_retrievals_nobufs),
64958- atomic_read(&fscache_n_retrievals_intr),
64959- atomic_read(&fscache_n_retrievals_nomem));
64960+ atomic_read_unchecked(&fscache_n_retrievals),
64961+ atomic_read_unchecked(&fscache_n_retrievals_ok),
64962+ atomic_read_unchecked(&fscache_n_retrievals_wait),
64963+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
64964+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
64965+ atomic_read_unchecked(&fscache_n_retrievals_intr),
64966+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
64967 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
64968- atomic_read(&fscache_n_retrieval_ops),
64969- atomic_read(&fscache_n_retrieval_op_waits),
64970- atomic_read(&fscache_n_retrievals_object_dead));
64971+ atomic_read_unchecked(&fscache_n_retrieval_ops),
64972+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
64973+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
64974
64975 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
64976- atomic_read(&fscache_n_stores),
64977- atomic_read(&fscache_n_stores_ok),
64978- atomic_read(&fscache_n_stores_again),
64979- atomic_read(&fscache_n_stores_nobufs),
64980- atomic_read(&fscache_n_stores_oom));
64981+ atomic_read_unchecked(&fscache_n_stores),
64982+ atomic_read_unchecked(&fscache_n_stores_ok),
64983+ atomic_read_unchecked(&fscache_n_stores_again),
64984+ atomic_read_unchecked(&fscache_n_stores_nobufs),
64985+ atomic_read_unchecked(&fscache_n_stores_oom));
64986 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
64987- atomic_read(&fscache_n_store_ops),
64988- atomic_read(&fscache_n_store_calls),
64989- atomic_read(&fscache_n_store_pages),
64990- atomic_read(&fscache_n_store_radix_deletes),
64991- atomic_read(&fscache_n_store_pages_over_limit));
64992+ atomic_read_unchecked(&fscache_n_store_ops),
64993+ atomic_read_unchecked(&fscache_n_store_calls),
64994+ atomic_read_unchecked(&fscache_n_store_pages),
64995+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
64996+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
64997
64998 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
64999- atomic_read(&fscache_n_store_vmscan_not_storing),
65000- atomic_read(&fscache_n_store_vmscan_gone),
65001- atomic_read(&fscache_n_store_vmscan_busy),
65002- atomic_read(&fscache_n_store_vmscan_cancelled),
65003- atomic_read(&fscache_n_store_vmscan_wait));
65004+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
65005+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
65006+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
65007+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
65008+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
65009
65010 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
65011- atomic_read(&fscache_n_op_pend),
65012- atomic_read(&fscache_n_op_run),
65013- atomic_read(&fscache_n_op_enqueue),
65014- atomic_read(&fscache_n_op_cancelled),
65015- atomic_read(&fscache_n_op_rejected));
65016+ atomic_read_unchecked(&fscache_n_op_pend),
65017+ atomic_read_unchecked(&fscache_n_op_run),
65018+ atomic_read_unchecked(&fscache_n_op_enqueue),
65019+ atomic_read_unchecked(&fscache_n_op_cancelled),
65020+ atomic_read_unchecked(&fscache_n_op_rejected));
65021 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
65022- atomic_read(&fscache_n_op_deferred_release),
65023- atomic_read(&fscache_n_op_release),
65024- atomic_read(&fscache_n_op_gc));
65025+ atomic_read_unchecked(&fscache_n_op_deferred_release),
65026+ atomic_read_unchecked(&fscache_n_op_release),
65027+ atomic_read_unchecked(&fscache_n_op_gc));
65028
65029 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
65030 atomic_read(&fscache_n_cop_alloc_object),
65031diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
65032index 966ace8..030a03a 100644
65033--- a/fs/fuse/cuse.c
65034+++ b/fs/fuse/cuse.c
65035@@ -611,10 +611,12 @@ static int __init cuse_init(void)
65036 INIT_LIST_HEAD(&cuse_conntbl[i]);
65037
65038 /* inherit and extend fuse_dev_operations */
65039- cuse_channel_fops = fuse_dev_operations;
65040- cuse_channel_fops.owner = THIS_MODULE;
65041- cuse_channel_fops.open = cuse_channel_open;
65042- cuse_channel_fops.release = cuse_channel_release;
65043+ pax_open_kernel();
65044+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
65045+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
65046+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
65047+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
65048+ pax_close_kernel();
65049
65050 cuse_class = class_create(THIS_MODULE, "cuse");
65051 if (IS_ERR(cuse_class))
65052diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
65053index ca88731..8e9c55d 100644
65054--- a/fs/fuse/dev.c
65055+++ b/fs/fuse/dev.c
65056@@ -1318,7 +1318,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
65057 ret = 0;
65058 pipe_lock(pipe);
65059
65060- if (!pipe->readers) {
65061+ if (!atomic_read(&pipe->readers)) {
65062 send_sig(SIGPIPE, current, 0);
65063 if (!ret)
65064 ret = -EPIPE;
65065@@ -1347,7 +1347,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
65066 page_nr++;
65067 ret += buf->len;
65068
65069- if (pipe->files)
65070+ if (atomic_read(&pipe->files))
65071 do_wakeup = 1;
65072 }
65073
65074diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
65075index 0c60482..025724f 100644
65076--- a/fs/fuse/dir.c
65077+++ b/fs/fuse/dir.c
65078@@ -1485,7 +1485,7 @@ static char *read_link(struct dentry *dentry)
65079 return link;
65080 }
65081
65082-static void free_link(char *link)
65083+static void free_link(const char *link)
65084 {
65085 if (!IS_ERR(link))
65086 free_page((unsigned long) link);
65087diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
65088index bb529f3..454c253 100644
65089--- a/fs/hostfs/hostfs_kern.c
65090+++ b/fs/hostfs/hostfs_kern.c
65091@@ -898,7 +898,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
65092
65093 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
65094 {
65095- char *s = nd_get_link(nd);
65096+ const char *s = nd_get_link(nd);
65097 if (!IS_ERR(s))
65098 __putname(s);
65099 }
65100diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
65101index 1e2872b..7aea000 100644
65102--- a/fs/hugetlbfs/inode.c
65103+++ b/fs/hugetlbfs/inode.c
65104@@ -154,6 +154,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
65105 struct mm_struct *mm = current->mm;
65106 struct vm_area_struct *vma;
65107 struct hstate *h = hstate_file(file);
65108+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
65109 struct vm_unmapped_area_info info;
65110
65111 if (len & ~huge_page_mask(h))
65112@@ -167,17 +168,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
65113 return addr;
65114 }
65115
65116+#ifdef CONFIG_PAX_RANDMMAP
65117+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
65118+#endif
65119+
65120 if (addr) {
65121 addr = ALIGN(addr, huge_page_size(h));
65122 vma = find_vma(mm, addr);
65123- if (TASK_SIZE - len >= addr &&
65124- (!vma || addr + len <= vma->vm_start))
65125+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
65126 return addr;
65127 }
65128
65129 info.flags = 0;
65130 info.length = len;
65131 info.low_limit = TASK_UNMAPPED_BASE;
65132+
65133+#ifdef CONFIG_PAX_RANDMMAP
65134+ if (mm->pax_flags & MF_PAX_RANDMMAP)
65135+ info.low_limit += mm->delta_mmap;
65136+#endif
65137+
65138 info.high_limit = TASK_SIZE;
65139 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
65140 info.align_offset = 0;
65141@@ -919,7 +929,7 @@ static struct file_system_type hugetlbfs_fs_type = {
65142 };
65143 MODULE_ALIAS_FS("hugetlbfs");
65144
65145-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
65146+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
65147
65148 static int can_do_hugetlb_shm(void)
65149 {
65150diff --git a/fs/inode.c b/fs/inode.c
65151index 6eecb7f..abec305 100644
65152--- a/fs/inode.c
65153+++ b/fs/inode.c
65154@@ -839,16 +839,20 @@ unsigned int get_next_ino(void)
65155 unsigned int *p = &get_cpu_var(last_ino);
65156 unsigned int res = *p;
65157
65158+start:
65159+
65160 #ifdef CONFIG_SMP
65161 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
65162- static atomic_t shared_last_ino;
65163- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
65164+ static atomic_unchecked_t shared_last_ino;
65165+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
65166
65167 res = next - LAST_INO_BATCH;
65168 }
65169 #endif
65170
65171- *p = ++res;
65172+ if (unlikely(!++res))
65173+ goto start; /* never zero */
65174+ *p = res;
65175 put_cpu_var(last_ino);
65176 return res;
65177 }
65178diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
65179index 4a6cf28..d3a29d3 100644
65180--- a/fs/jffs2/erase.c
65181+++ b/fs/jffs2/erase.c
65182@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
65183 struct jffs2_unknown_node marker = {
65184 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
65185 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
65186- .totlen = cpu_to_je32(c->cleanmarker_size)
65187+ .totlen = cpu_to_je32(c->cleanmarker_size),
65188+ .hdr_crc = cpu_to_je32(0)
65189 };
65190
65191 jffs2_prealloc_raw_node_refs(c, jeb, 1);
65192diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
65193index a6597d6..41b30ec 100644
65194--- a/fs/jffs2/wbuf.c
65195+++ b/fs/jffs2/wbuf.c
65196@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
65197 {
65198 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
65199 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
65200- .totlen = constant_cpu_to_je32(8)
65201+ .totlen = constant_cpu_to_je32(8),
65202+ .hdr_crc = constant_cpu_to_je32(0)
65203 };
65204
65205 /*
65206diff --git a/fs/jfs/super.c b/fs/jfs/super.c
65207index adf8cb0..bb935fa 100644
65208--- a/fs/jfs/super.c
65209+++ b/fs/jfs/super.c
65210@@ -893,7 +893,7 @@ static int __init init_jfs_fs(void)
65211
65212 jfs_inode_cachep =
65213 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
65214- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
65215+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
65216 init_once);
65217 if (jfs_inode_cachep == NULL)
65218 return -ENOMEM;
65219diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
65220index a693f5b..82276a1 100644
65221--- a/fs/kernfs/dir.c
65222+++ b/fs/kernfs/dir.c
65223@@ -182,7 +182,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
65224 *
65225 * Returns 31 bit hash of ns + name (so it fits in an off_t )
65226 */
65227-static unsigned int kernfs_name_hash(const char *name, const void *ns)
65228+static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
65229 {
65230 unsigned long hash = init_name_hash();
65231 unsigned int len = strlen(name);
65232diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
65233index d895b4b..0b8af77 100644
65234--- a/fs/kernfs/file.c
65235+++ b/fs/kernfs/file.c
65236@@ -34,7 +34,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
65237
65238 struct kernfs_open_node {
65239 atomic_t refcnt;
65240- atomic_t event;
65241+ atomic_unchecked_t event;
65242 wait_queue_head_t poll;
65243 struct list_head files; /* goes through kernfs_open_file.list */
65244 };
65245@@ -163,7 +163,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
65246 {
65247 struct kernfs_open_file *of = sf->private;
65248
65249- of->event = atomic_read(&of->kn->attr.open->event);
65250+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
65251
65252 return of->kn->attr.ops->seq_show(sf, v);
65253 }
65254@@ -375,12 +375,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
65255 return ret;
65256 }
65257
65258-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
65259- void *buf, int len, int write)
65260+static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
65261+ void *buf, size_t len, int write)
65262 {
65263 struct file *file = vma->vm_file;
65264 struct kernfs_open_file *of = kernfs_of(file);
65265- int ret;
65266+ ssize_t ret;
65267
65268 if (!of->vm_ops)
65269 return -EINVAL;
65270@@ -581,7 +581,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
65271 return -ENOMEM;
65272
65273 atomic_set(&new_on->refcnt, 0);
65274- atomic_set(&new_on->event, 1);
65275+ atomic_set_unchecked(&new_on->event, 1);
65276 init_waitqueue_head(&new_on->poll);
65277 INIT_LIST_HEAD(&new_on->files);
65278 goto retry;
65279@@ -787,7 +787,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
65280
65281 kernfs_put_active(kn);
65282
65283- if (of->event != atomic_read(&on->event))
65284+ if (of->event != atomic_read_unchecked(&on->event))
65285 goto trigger;
65286
65287 return DEFAULT_POLLMASK;
65288@@ -818,7 +818,7 @@ repeat:
65289
65290 on = kn->attr.open;
65291 if (on) {
65292- atomic_inc(&on->event);
65293+ atomic_inc_unchecked(&on->event);
65294 wake_up_interruptible(&on->poll);
65295 }
65296
65297diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
65298index 8a19889..4c3069a 100644
65299--- a/fs/kernfs/symlink.c
65300+++ b/fs/kernfs/symlink.c
65301@@ -128,7 +128,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
65302 static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
65303 void *cookie)
65304 {
65305- char *page = nd_get_link(nd);
65306+ const char *page = nd_get_link(nd);
65307 if (!IS_ERR(page))
65308 free_page((unsigned long)page);
65309 }
65310diff --git a/fs/libfs.c b/fs/libfs.c
65311index 88e3e00..979c262 100644
65312--- a/fs/libfs.c
65313+++ b/fs/libfs.c
65314@@ -160,6 +160,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
65315
65316 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
65317 struct dentry *next = list_entry(p, struct dentry, d_u.d_child);
65318+ char d_name[sizeof(next->d_iname)];
65319+ const unsigned char *name;
65320+
65321 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
65322 if (!simple_positive(next)) {
65323 spin_unlock(&next->d_lock);
65324@@ -168,7 +171,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
65325
65326 spin_unlock(&next->d_lock);
65327 spin_unlock(&dentry->d_lock);
65328- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
65329+ name = next->d_name.name;
65330+ if (name == next->d_iname) {
65331+ memcpy(d_name, name, next->d_name.len);
65332+ name = d_name;
65333+ }
65334+ if (!dir_emit(ctx, name, next->d_name.len,
65335 next->d_inode->i_ino, dt_type(next->d_inode)))
65336 return 0;
65337 spin_lock(&dentry->d_lock);
65338@@ -1027,7 +1035,7 @@ EXPORT_SYMBOL(noop_fsync);
65339 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
65340 void *cookie)
65341 {
65342- char *s = nd_get_link(nd);
65343+ const char *s = nd_get_link(nd);
65344 if (!IS_ERR(s))
65345 kfree(s);
65346 }
65347diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
65348index acd3947..1f896e2 100644
65349--- a/fs/lockd/clntproc.c
65350+++ b/fs/lockd/clntproc.c
65351@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
65352 /*
65353 * Cookie counter for NLM requests
65354 */
65355-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
65356+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
65357
65358 void nlmclnt_next_cookie(struct nlm_cookie *c)
65359 {
65360- u32 cookie = atomic_inc_return(&nlm_cookie);
65361+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
65362
65363 memcpy(c->data, &cookie, 4);
65364 c->len=4;
65365diff --git a/fs/locks.c b/fs/locks.c
65366index 717fbc4..74628c3 100644
65367--- a/fs/locks.c
65368+++ b/fs/locks.c
65369@@ -2327,7 +2327,7 @@ void locks_remove_file(struct file *filp)
65370 locks_remove_posix(filp, (fl_owner_t)filp);
65371
65372 if (filp->f_op->flock) {
65373- struct file_lock fl = {
65374+ struct file_lock flock = {
65375 .fl_owner = (fl_owner_t)filp,
65376 .fl_pid = current->tgid,
65377 .fl_file = filp,
65378@@ -2335,9 +2335,9 @@ void locks_remove_file(struct file *filp)
65379 .fl_type = F_UNLCK,
65380 .fl_end = OFFSET_MAX,
65381 };
65382- filp->f_op->flock(filp, F_SETLKW, &fl);
65383- if (fl.fl_ops && fl.fl_ops->fl_release_private)
65384- fl.fl_ops->fl_release_private(&fl);
65385+ filp->f_op->flock(filp, F_SETLKW, &flock);
65386+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
65387+ flock.fl_ops->fl_release_private(&flock);
65388 }
65389
65390 spin_lock(&inode->i_lock);
65391diff --git a/fs/mount.h b/fs/mount.h
65392index d55297f..f5b28c5 100644
65393--- a/fs/mount.h
65394+++ b/fs/mount.h
65395@@ -11,7 +11,7 @@ struct mnt_namespace {
65396 u64 seq; /* Sequence number to prevent loops */
65397 wait_queue_head_t poll;
65398 u64 event;
65399-};
65400+} __randomize_layout;
65401
65402 struct mnt_pcp {
65403 int mnt_count;
65404@@ -57,7 +57,7 @@ struct mount {
65405 int mnt_expiry_mark; /* true if marked for expiry */
65406 int mnt_pinned;
65407 struct path mnt_ex_mountpoint;
65408-};
65409+} __randomize_layout;
65410
65411 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
65412
65413diff --git a/fs/namei.c b/fs/namei.c
65414index 9eb787e..5f520b67 100644
65415--- a/fs/namei.c
65416+++ b/fs/namei.c
65417@@ -330,17 +330,32 @@ int generic_permission(struct inode *inode, int mask)
65418 if (ret != -EACCES)
65419 return ret;
65420
65421+#ifdef CONFIG_GRKERNSEC
65422+ /* we'll block if we have to log due to a denied capability use */
65423+ if (mask & MAY_NOT_BLOCK)
65424+ return -ECHILD;
65425+#endif
65426+
65427 if (S_ISDIR(inode->i_mode)) {
65428 /* DACs are overridable for directories */
65429- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
65430- return 0;
65431 if (!(mask & MAY_WRITE))
65432- if (capable_wrt_inode_uidgid(inode,
65433- CAP_DAC_READ_SEARCH))
65434+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
65435+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
65436 return 0;
65437+ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
65438+ return 0;
65439 return -EACCES;
65440 }
65441 /*
65442+ * Searching includes executable on directories, else just read.
65443+ */
65444+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
65445+ if (mask == MAY_READ)
65446+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
65447+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
65448+ return 0;
65449+
65450+ /*
65451 * Read/write DACs are always overridable.
65452 * Executable DACs are overridable when there is
65453 * at least one exec bit set.
65454@@ -349,14 +364,6 @@ int generic_permission(struct inode *inode, int mask)
65455 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
65456 return 0;
65457
65458- /*
65459- * Searching includes executable on directories, else just read.
65460- */
65461- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
65462- if (mask == MAY_READ)
65463- if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
65464- return 0;
65465-
65466 return -EACCES;
65467 }
65468 EXPORT_SYMBOL(generic_permission);
65469@@ -824,7 +831,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
65470 {
65471 struct dentry *dentry = link->dentry;
65472 int error;
65473- char *s;
65474+ const char *s;
65475
65476 BUG_ON(nd->flags & LOOKUP_RCU);
65477
65478@@ -845,6 +852,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
65479 if (error)
65480 goto out_put_nd_path;
65481
65482+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
65483+ dentry->d_inode, dentry, nd->path.mnt)) {
65484+ error = -EACCES;
65485+ goto out_put_nd_path;
65486+ }
65487+
65488 nd->last_type = LAST_BIND;
65489 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
65490 error = PTR_ERR(*p);
65491@@ -1596,6 +1609,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
65492 if (res)
65493 break;
65494 res = walk_component(nd, path, LOOKUP_FOLLOW);
65495+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
65496+ res = -EACCES;
65497 put_link(nd, &link, cookie);
65498 } while (res > 0);
65499
65500@@ -1669,7 +1684,7 @@ EXPORT_SYMBOL(full_name_hash);
65501 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
65502 {
65503 unsigned long a, b, adata, bdata, mask, hash, len;
65504- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
65505+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
65506
65507 hash = a = 0;
65508 len = -sizeof(unsigned long);
65509@@ -1953,6 +1968,8 @@ static int path_lookupat(int dfd, const char *name,
65510 if (err)
65511 break;
65512 err = lookup_last(nd, &path);
65513+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
65514+ err = -EACCES;
65515 put_link(nd, &link, cookie);
65516 }
65517 }
65518@@ -1960,6 +1977,13 @@ static int path_lookupat(int dfd, const char *name,
65519 if (!err)
65520 err = complete_walk(nd);
65521
65522+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
65523+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
65524+ path_put(&nd->path);
65525+ err = -ENOENT;
65526+ }
65527+ }
65528+
65529 if (!err && nd->flags & LOOKUP_DIRECTORY) {
65530 if (!d_can_lookup(nd->path.dentry)) {
65531 path_put(&nd->path);
65532@@ -1987,8 +2011,15 @@ static int filename_lookup(int dfd, struct filename *name,
65533 retval = path_lookupat(dfd, name->name,
65534 flags | LOOKUP_REVAL, nd);
65535
65536- if (likely(!retval))
65537+ if (likely(!retval)) {
65538 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
65539+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
65540+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
65541+ path_put(&nd->path);
65542+ return -ENOENT;
65543+ }
65544+ }
65545+ }
65546 return retval;
65547 }
65548
65549@@ -2570,6 +2601,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
65550 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
65551 return -EPERM;
65552
65553+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
65554+ return -EPERM;
65555+ if (gr_handle_rawio(inode))
65556+ return -EPERM;
65557+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
65558+ return -EACCES;
65559+
65560 return 0;
65561 }
65562
65563@@ -2801,7 +2839,7 @@ looked_up:
65564 * cleared otherwise prior to returning.
65565 */
65566 static int lookup_open(struct nameidata *nd, struct path *path,
65567- struct file *file,
65568+ struct path *link, struct file *file,
65569 const struct open_flags *op,
65570 bool got_write, int *opened)
65571 {
65572@@ -2836,6 +2874,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
65573 /* Negative dentry, just create the file */
65574 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
65575 umode_t mode = op->mode;
65576+
65577+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
65578+ error = -EACCES;
65579+ goto out_dput;
65580+ }
65581+
65582+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
65583+ error = -EACCES;
65584+ goto out_dput;
65585+ }
65586+
65587 if (!IS_POSIXACL(dir->d_inode))
65588 mode &= ~current_umask();
65589 /*
65590@@ -2857,6 +2906,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
65591 nd->flags & LOOKUP_EXCL);
65592 if (error)
65593 goto out_dput;
65594+ else
65595+ gr_handle_create(dentry, nd->path.mnt);
65596 }
65597 out_no_open:
65598 path->dentry = dentry;
65599@@ -2871,7 +2922,7 @@ out_dput:
65600 /*
65601 * Handle the last step of open()
65602 */
65603-static int do_last(struct nameidata *nd, struct path *path,
65604+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
65605 struct file *file, const struct open_flags *op,
65606 int *opened, struct filename *name)
65607 {
65608@@ -2921,6 +2972,15 @@ static int do_last(struct nameidata *nd, struct path *path,
65609 if (error)
65610 return error;
65611
65612+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
65613+ error = -ENOENT;
65614+ goto out;
65615+ }
65616+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
65617+ error = -EACCES;
65618+ goto out;
65619+ }
65620+
65621 audit_inode(name, dir, LOOKUP_PARENT);
65622 error = -EISDIR;
65623 /* trailing slashes? */
65624@@ -2940,7 +3000,7 @@ retry_lookup:
65625 */
65626 }
65627 mutex_lock(&dir->d_inode->i_mutex);
65628- error = lookup_open(nd, path, file, op, got_write, opened);
65629+ error = lookup_open(nd, path, link, file, op, got_write, opened);
65630 mutex_unlock(&dir->d_inode->i_mutex);
65631
65632 if (error <= 0) {
65633@@ -2964,11 +3024,28 @@ retry_lookup:
65634 goto finish_open_created;
65635 }
65636
65637+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
65638+ error = -ENOENT;
65639+ goto exit_dput;
65640+ }
65641+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
65642+ error = -EACCES;
65643+ goto exit_dput;
65644+ }
65645+
65646 /*
65647 * create/update audit record if it already exists.
65648 */
65649- if (d_is_positive(path->dentry))
65650+ if (d_is_positive(path->dentry)) {
65651+ /* only check if O_CREAT is specified, all other checks need to go
65652+ into may_open */
65653+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
65654+ error = -EACCES;
65655+ goto exit_dput;
65656+ }
65657+
65658 audit_inode(name, path->dentry, 0);
65659+ }
65660
65661 /*
65662 * If atomic_open() acquired write access it is dropped now due to
65663@@ -3009,6 +3086,11 @@ finish_lookup:
65664 }
65665 }
65666 BUG_ON(inode != path->dentry->d_inode);
65667+ /* if we're resolving a symlink to another symlink */
65668+ if (link && gr_handle_symlink_owner(link, inode)) {
65669+ error = -EACCES;
65670+ goto out;
65671+ }
65672 return 1;
65673 }
65674
65675@@ -3018,7 +3100,6 @@ finish_lookup:
65676 save_parent.dentry = nd->path.dentry;
65677 save_parent.mnt = mntget(path->mnt);
65678 nd->path.dentry = path->dentry;
65679-
65680 }
65681 nd->inode = inode;
65682 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
65683@@ -3028,7 +3109,18 @@ finish_open:
65684 path_put(&save_parent);
65685 return error;
65686 }
65687+
65688+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
65689+ error = -ENOENT;
65690+ goto out;
65691+ }
65692+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
65693+ error = -EACCES;
65694+ goto out;
65695+ }
65696+
65697 audit_inode(name, nd->path.dentry, 0);
65698+
65699 error = -EISDIR;
65700 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
65701 goto out;
65702@@ -3191,7 +3283,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
65703 if (unlikely(error))
65704 goto out;
65705
65706- error = do_last(nd, &path, file, op, &opened, pathname);
65707+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
65708 while (unlikely(error > 0)) { /* trailing symlink */
65709 struct path link = path;
65710 void *cookie;
65711@@ -3209,7 +3301,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
65712 error = follow_link(&link, nd, &cookie);
65713 if (unlikely(error))
65714 break;
65715- error = do_last(nd, &path, file, op, &opened, pathname);
65716+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
65717 put_link(nd, &link, cookie);
65718 }
65719 out:
65720@@ -3309,9 +3401,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
65721 goto unlock;
65722
65723 error = -EEXIST;
65724- if (d_is_positive(dentry))
65725+ if (d_is_positive(dentry)) {
65726+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
65727+ error = -ENOENT;
65728 goto fail;
65729-
65730+ }
65731 /*
65732 * Special case - lookup gave negative, but... we had foo/bar/
65733 * From the vfs_mknod() POV we just have a negative dentry -
65734@@ -3363,6 +3457,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
65735 }
65736 EXPORT_SYMBOL(user_path_create);
65737
65738+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
65739+{
65740+ struct filename *tmp = getname(pathname);
65741+ struct dentry *res;
65742+ if (IS_ERR(tmp))
65743+ return ERR_CAST(tmp);
65744+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
65745+ if (IS_ERR(res))
65746+ putname(tmp);
65747+ else
65748+ *to = tmp;
65749+ return res;
65750+}
65751+
65752 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
65753 {
65754 int error = may_create(dir, dentry);
65755@@ -3426,6 +3534,17 @@ retry:
65756
65757 if (!IS_POSIXACL(path.dentry->d_inode))
65758 mode &= ~current_umask();
65759+
65760+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
65761+ error = -EPERM;
65762+ goto out;
65763+ }
65764+
65765+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
65766+ error = -EACCES;
65767+ goto out;
65768+ }
65769+
65770 error = security_path_mknod(&path, dentry, mode, dev);
65771 if (error)
65772 goto out;
65773@@ -3442,6 +3561,8 @@ retry:
65774 break;
65775 }
65776 out:
65777+ if (!error)
65778+ gr_handle_create(dentry, path.mnt);
65779 done_path_create(&path, dentry);
65780 if (retry_estale(error, lookup_flags)) {
65781 lookup_flags |= LOOKUP_REVAL;
65782@@ -3495,9 +3616,16 @@ retry:
65783
65784 if (!IS_POSIXACL(path.dentry->d_inode))
65785 mode &= ~current_umask();
65786+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
65787+ error = -EACCES;
65788+ goto out;
65789+ }
65790 error = security_path_mkdir(&path, dentry, mode);
65791 if (!error)
65792 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
65793+ if (!error)
65794+ gr_handle_create(dentry, path.mnt);
65795+out:
65796 done_path_create(&path, dentry);
65797 if (retry_estale(error, lookup_flags)) {
65798 lookup_flags |= LOOKUP_REVAL;
65799@@ -3580,6 +3708,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
65800 struct filename *name;
65801 struct dentry *dentry;
65802 struct nameidata nd;
65803+ ino_t saved_ino = 0;
65804+ dev_t saved_dev = 0;
65805 unsigned int lookup_flags = 0;
65806 retry:
65807 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
65808@@ -3612,10 +3742,21 @@ retry:
65809 error = -ENOENT;
65810 goto exit3;
65811 }
65812+
65813+ saved_ino = dentry->d_inode->i_ino;
65814+ saved_dev = gr_get_dev_from_dentry(dentry);
65815+
65816+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
65817+ error = -EACCES;
65818+ goto exit3;
65819+ }
65820+
65821 error = security_path_rmdir(&nd.path, dentry);
65822 if (error)
65823 goto exit3;
65824 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
65825+ if (!error && (saved_dev || saved_ino))
65826+ gr_handle_delete(saved_ino, saved_dev);
65827 exit3:
65828 dput(dentry);
65829 exit2:
65830@@ -3706,6 +3847,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
65831 struct nameidata nd;
65832 struct inode *inode = NULL;
65833 struct inode *delegated_inode = NULL;
65834+ ino_t saved_ino = 0;
65835+ dev_t saved_dev = 0;
65836 unsigned int lookup_flags = 0;
65837 retry:
65838 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
65839@@ -3732,10 +3875,22 @@ retry_deleg:
65840 if (d_is_negative(dentry))
65841 goto slashes;
65842 ihold(inode);
65843+
65844+ if (inode->i_nlink <= 1) {
65845+ saved_ino = inode->i_ino;
65846+ saved_dev = gr_get_dev_from_dentry(dentry);
65847+ }
65848+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
65849+ error = -EACCES;
65850+ goto exit2;
65851+ }
65852+
65853 error = security_path_unlink(&nd.path, dentry);
65854 if (error)
65855 goto exit2;
65856 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
65857+ if (!error && (saved_ino || saved_dev))
65858+ gr_handle_delete(saved_ino, saved_dev);
65859 exit2:
65860 dput(dentry);
65861 }
65862@@ -3824,9 +3979,17 @@ retry:
65863 if (IS_ERR(dentry))
65864 goto out_putname;
65865
65866+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
65867+ error = -EACCES;
65868+ goto out;
65869+ }
65870+
65871 error = security_path_symlink(&path, dentry, from->name);
65872 if (!error)
65873 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
65874+ if (!error)
65875+ gr_handle_create(dentry, path.mnt);
65876+out:
65877 done_path_create(&path, dentry);
65878 if (retry_estale(error, lookup_flags)) {
65879 lookup_flags |= LOOKUP_REVAL;
65880@@ -3930,6 +4093,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
65881 struct dentry *new_dentry;
65882 struct path old_path, new_path;
65883 struct inode *delegated_inode = NULL;
65884+ struct filename *to = NULL;
65885 int how = 0;
65886 int error;
65887
65888@@ -3953,7 +4117,7 @@ retry:
65889 if (error)
65890 return error;
65891
65892- new_dentry = user_path_create(newdfd, newname, &new_path,
65893+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
65894 (how & LOOKUP_REVAL));
65895 error = PTR_ERR(new_dentry);
65896 if (IS_ERR(new_dentry))
65897@@ -3965,11 +4129,28 @@ retry:
65898 error = may_linkat(&old_path);
65899 if (unlikely(error))
65900 goto out_dput;
65901+
65902+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
65903+ old_path.dentry->d_inode,
65904+ old_path.dentry->d_inode->i_mode, to)) {
65905+ error = -EACCES;
65906+ goto out_dput;
65907+ }
65908+
65909+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
65910+ old_path.dentry, old_path.mnt, to)) {
65911+ error = -EACCES;
65912+ goto out_dput;
65913+ }
65914+
65915 error = security_path_link(old_path.dentry, &new_path, new_dentry);
65916 if (error)
65917 goto out_dput;
65918 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
65919+ if (!error)
65920+ gr_handle_create(new_dentry, new_path.mnt);
65921 out_dput:
65922+ putname(to);
65923 done_path_create(&new_path, new_dentry);
65924 if (delegated_inode) {
65925 error = break_deleg_wait(&delegated_inode);
65926@@ -4279,6 +4460,12 @@ retry_deleg:
65927 if (new_dentry == trap)
65928 goto exit5;
65929
65930+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
65931+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
65932+ to, flags);
65933+ if (error)
65934+ goto exit5;
65935+
65936 error = security_path_rename(&oldnd.path, old_dentry,
65937 &newnd.path, new_dentry, flags);
65938 if (error)
65939@@ -4286,6 +4473,9 @@ retry_deleg:
65940 error = vfs_rename(old_dir->d_inode, old_dentry,
65941 new_dir->d_inode, new_dentry,
65942 &delegated_inode, flags);
65943+ if (!error)
65944+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
65945+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0, flags);
65946 exit5:
65947 dput(new_dentry);
65948 exit4:
65949@@ -4328,14 +4518,24 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
65950
65951 int readlink_copy(char __user *buffer, int buflen, const char *link)
65952 {
65953+ char tmpbuf[64];
65954+ const char *newlink;
65955 int len = PTR_ERR(link);
65956+
65957 if (IS_ERR(link))
65958 goto out;
65959
65960 len = strlen(link);
65961 if (len > (unsigned) buflen)
65962 len = buflen;
65963- if (copy_to_user(buffer, link, len))
65964+
65965+ if (len < sizeof(tmpbuf)) {
65966+ memcpy(tmpbuf, link, len);
65967+ newlink = tmpbuf;
65968+ } else
65969+ newlink = link;
65970+
65971+ if (copy_to_user(buffer, newlink, len))
65972 len = -EFAULT;
65973 out:
65974 return len;
65975diff --git a/fs/namespace.c b/fs/namespace.c
65976index 182bc41..72e3cf1 100644
65977--- a/fs/namespace.c
65978+++ b/fs/namespace.c
65979@@ -1348,6 +1348,9 @@ static int do_umount(struct mount *mnt, int flags)
65980 if (!(sb->s_flags & MS_RDONLY))
65981 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
65982 up_write(&sb->s_umount);
65983+
65984+ gr_log_remount(mnt->mnt_devname, retval);
65985+
65986 return retval;
65987 }
65988
65989@@ -1370,6 +1373,9 @@ static int do_umount(struct mount *mnt, int flags)
65990 }
65991 unlock_mount_hash();
65992 namespace_unlock();
65993+
65994+ gr_log_unmount(mnt->mnt_devname, retval);
65995+
65996 return retval;
65997 }
65998
65999@@ -1389,7 +1395,7 @@ static inline bool may_mount(void)
66000 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
66001 */
66002
66003-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
66004+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
66005 {
66006 struct path path;
66007 struct mount *mnt;
66008@@ -1431,7 +1437,7 @@ out:
66009 /*
66010 * The 2.0 compatible umount. No flags.
66011 */
66012-SYSCALL_DEFINE1(oldumount, char __user *, name)
66013+SYSCALL_DEFINE1(oldumount, const char __user *, name)
66014 {
66015 return sys_umount(name, 0);
66016 }
66017@@ -2440,6 +2446,16 @@ long do_mount(const char *dev_name, const char *dir_name,
66018 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
66019 MS_STRICTATIME);
66020
66021+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
66022+ retval = -EPERM;
66023+ goto dput_out;
66024+ }
66025+
66026+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
66027+ retval = -EPERM;
66028+ goto dput_out;
66029+ }
66030+
66031 if (flags & MS_REMOUNT)
66032 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
66033 data_page);
66034@@ -2454,6 +2470,9 @@ long do_mount(const char *dev_name, const char *dir_name,
66035 dev_name, data_page);
66036 dput_out:
66037 path_put(&path);
66038+
66039+ gr_log_mount(dev_name, dir_name, retval);
66040+
66041 return retval;
66042 }
66043
66044@@ -2471,7 +2490,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
66045 * number incrementing at 10Ghz will take 12,427 years to wrap which
66046 * is effectively never, so we can ignore the possibility.
66047 */
66048-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
66049+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
66050
66051 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
66052 {
66053@@ -2486,7 +2505,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
66054 kfree(new_ns);
66055 return ERR_PTR(ret);
66056 }
66057- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
66058+ new_ns->seq = atomic64_inc_return_unchecked(&mnt_ns_seq);
66059 atomic_set(&new_ns->count, 1);
66060 new_ns->root = NULL;
66061 INIT_LIST_HEAD(&new_ns->list);
66062@@ -2496,7 +2515,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
66063 return new_ns;
66064 }
66065
66066-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
66067+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
66068 struct user_namespace *user_ns, struct fs_struct *new_fs)
66069 {
66070 struct mnt_namespace *new_ns;
66071@@ -2617,8 +2636,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
66072 }
66073 EXPORT_SYMBOL(mount_subtree);
66074
66075-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
66076- char __user *, type, unsigned long, flags, void __user *, data)
66077+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
66078+ const char __user *, type, unsigned long, flags, void __user *, data)
66079 {
66080 int ret;
66081 char *kernel_type;
66082@@ -2731,6 +2750,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
66083 if (error)
66084 goto out2;
66085
66086+ if (gr_handle_chroot_pivot()) {
66087+ error = -EPERM;
66088+ goto out2;
66089+ }
66090+
66091 get_fs_root(current->fs, &root);
66092 old_mp = lock_mount(&old);
66093 error = PTR_ERR(old_mp);
66094@@ -2999,7 +3023,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
66095 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
66096 return -EPERM;
66097
66098- if (fs->users != 1)
66099+ if (atomic_read(&fs->users) != 1)
66100 return -EINVAL;
66101
66102 get_mnt_ns(mnt_ns);
66103diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
66104index f4ccfe6..a5cf064 100644
66105--- a/fs/nfs/callback_xdr.c
66106+++ b/fs/nfs/callback_xdr.c
66107@@ -51,7 +51,7 @@ struct callback_op {
66108 callback_decode_arg_t decode_args;
66109 callback_encode_res_t encode_res;
66110 long res_maxsize;
66111-};
66112+} __do_const;
66113
66114 static struct callback_op callback_ops[];
66115
66116diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
66117index 9927913..faffc5c 100644
66118--- a/fs/nfs/inode.c
66119+++ b/fs/nfs/inode.c
66120@@ -1219,16 +1219,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
66121 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
66122 }
66123
66124-static atomic_long_t nfs_attr_generation_counter;
66125+static atomic_long_unchecked_t nfs_attr_generation_counter;
66126
66127 static unsigned long nfs_read_attr_generation_counter(void)
66128 {
66129- return atomic_long_read(&nfs_attr_generation_counter);
66130+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
66131 }
66132
66133 unsigned long nfs_inc_attr_generation_counter(void)
66134 {
66135- return atomic_long_inc_return(&nfs_attr_generation_counter);
66136+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
66137 }
66138
66139 void nfs_fattr_init(struct nfs_fattr *fattr)
66140diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
66141index 8f029db..3688b84 100644
66142--- a/fs/nfsd/nfs4proc.c
66143+++ b/fs/nfsd/nfs4proc.c
66144@@ -1157,7 +1157,7 @@ struct nfsd4_operation {
66145 nfsd4op_rsize op_rsize_bop;
66146 stateid_getter op_get_currentstateid;
66147 stateid_setter op_set_currentstateid;
66148-};
66149+} __do_const;
66150
66151 static struct nfsd4_operation nfsd4_ops[];
66152
66153diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
66154index 944275c..6fc40a7 100644
66155--- a/fs/nfsd/nfs4xdr.c
66156+++ b/fs/nfsd/nfs4xdr.c
66157@@ -1539,7 +1539,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
66158
66159 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
66160
66161-static nfsd4_dec nfsd4_dec_ops[] = {
66162+static const nfsd4_dec nfsd4_dec_ops[] = {
66163 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
66164 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
66165 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
66166diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
66167index 6040da8..4348565 100644
66168--- a/fs/nfsd/nfscache.c
66169+++ b/fs/nfsd/nfscache.c
66170@@ -518,17 +518,20 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
66171 {
66172 struct svc_cacherep *rp = rqstp->rq_cacherep;
66173 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
66174- int len;
66175+ long len;
66176 size_t bufsize = 0;
66177
66178 if (!rp)
66179 return;
66180
66181- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
66182- len >>= 2;
66183+ if (statp) {
66184+ len = (char*)statp - (char*)resv->iov_base;
66185+ len = resv->iov_len - len;
66186+ len >>= 2;
66187+ }
66188
66189 /* Don't cache excessive amounts of data and XDR failures */
66190- if (!statp || len > (256 >> 2)) {
66191+ if (!statp || len > (256 >> 2) || len < 0) {
66192 nfsd_reply_cache_free(rp);
66193 return;
66194 }
66195@@ -536,7 +539,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
66196 switch (cachetype) {
66197 case RC_REPLSTAT:
66198 if (len != 1)
66199- printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
66200+ printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len);
66201 rp->c_replstat = *statp;
66202 break;
66203 case RC_REPLBUFF:
66204diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
66205index 140c496..e9cbf14 100644
66206--- a/fs/nfsd/vfs.c
66207+++ b/fs/nfsd/vfs.c
66208@@ -855,7 +855,7 @@ int nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
66209
66210 oldfs = get_fs();
66211 set_fs(KERNEL_DS);
66212- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
66213+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
66214 set_fs(oldfs);
66215 return nfsd_finish_read(file, count, host_err);
66216 }
66217@@ -943,7 +943,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
66218
66219 /* Write the data. */
66220 oldfs = get_fs(); set_fs(KERNEL_DS);
66221- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
66222+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
66223 set_fs(oldfs);
66224 if (host_err < 0)
66225 goto out_nfserr;
66226@@ -1482,7 +1482,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
66227 */
66228
66229 oldfs = get_fs(); set_fs(KERNEL_DS);
66230- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
66231+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
66232 set_fs(oldfs);
66233
66234 if (host_err < 0)
66235diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
66236index 52ccd34..7a6b202 100644
66237--- a/fs/nls/nls_base.c
66238+++ b/fs/nls/nls_base.c
66239@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
66240
66241 int __register_nls(struct nls_table *nls, struct module *owner)
66242 {
66243- struct nls_table ** tmp = &tables;
66244+ struct nls_table *tmp = tables;
66245
66246 if (nls->next)
66247 return -EBUSY;
66248
66249- nls->owner = owner;
66250+ pax_open_kernel();
66251+ *(void **)&nls->owner = owner;
66252+ pax_close_kernel();
66253 spin_lock(&nls_lock);
66254- while (*tmp) {
66255- if (nls == *tmp) {
66256+ while (tmp) {
66257+ if (nls == tmp) {
66258 spin_unlock(&nls_lock);
66259 return -EBUSY;
66260 }
66261- tmp = &(*tmp)->next;
66262+ tmp = tmp->next;
66263 }
66264- nls->next = tables;
66265+ pax_open_kernel();
66266+ *(struct nls_table **)&nls->next = tables;
66267+ pax_close_kernel();
66268 tables = nls;
66269 spin_unlock(&nls_lock);
66270 return 0;
66271@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
66272
66273 int unregister_nls(struct nls_table * nls)
66274 {
66275- struct nls_table ** tmp = &tables;
66276+ struct nls_table * const * tmp = &tables;
66277
66278 spin_lock(&nls_lock);
66279 while (*tmp) {
66280 if (nls == *tmp) {
66281- *tmp = nls->next;
66282+ pax_open_kernel();
66283+ *(struct nls_table **)tmp = nls->next;
66284+ pax_close_kernel();
66285 spin_unlock(&nls_lock);
66286 return 0;
66287 }
66288@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls)
66289 return -EINVAL;
66290 }
66291
66292-static struct nls_table *find_nls(char *charset)
66293+static struct nls_table *find_nls(const char *charset)
66294 {
66295 struct nls_table *nls;
66296 spin_lock(&nls_lock);
66297@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset)
66298 return nls;
66299 }
66300
66301-struct nls_table *load_nls(char *charset)
66302+struct nls_table *load_nls(const char *charset)
66303 {
66304 return try_then_request_module(find_nls(charset), "nls_%s", charset);
66305 }
66306diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
66307index 162b3f1..6076a7c 100644
66308--- a/fs/nls/nls_euc-jp.c
66309+++ b/fs/nls/nls_euc-jp.c
66310@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
66311 p_nls = load_nls("cp932");
66312
66313 if (p_nls) {
66314- table.charset2upper = p_nls->charset2upper;
66315- table.charset2lower = p_nls->charset2lower;
66316+ pax_open_kernel();
66317+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
66318+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
66319+ pax_close_kernel();
66320 return register_nls(&table);
66321 }
66322
66323diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
66324index a80a741..7b96e1b 100644
66325--- a/fs/nls/nls_koi8-ru.c
66326+++ b/fs/nls/nls_koi8-ru.c
66327@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
66328 p_nls = load_nls("koi8-u");
66329
66330 if (p_nls) {
66331- table.charset2upper = p_nls->charset2upper;
66332- table.charset2lower = p_nls->charset2lower;
66333+ pax_open_kernel();
66334+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
66335+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
66336+ pax_close_kernel();
66337 return register_nls(&table);
66338 }
66339
66340diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
66341index 3fdc8a3..5888623 100644
66342--- a/fs/notify/fanotify/fanotify_user.c
66343+++ b/fs/notify/fanotify/fanotify_user.c
66344@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
66345
66346 fd = fanotify_event_metadata.fd;
66347 ret = -EFAULT;
66348- if (copy_to_user(buf, &fanotify_event_metadata,
66349- fanotify_event_metadata.event_len))
66350+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
66351+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
66352 goto out_close_fd;
66353
66354 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
66355diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
66356index 238a593..9d7e2b9 100644
66357--- a/fs/notify/fdinfo.c
66358+++ b/fs/notify/fdinfo.c
66359@@ -42,7 +42,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
66360 {
66361 struct {
66362 struct file_handle handle;
66363- u8 pad[64];
66364+ u8 pad[MAX_HANDLE_SZ];
66365 } f;
66366 int size, ret, i;
66367
66368@@ -50,7 +50,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
66369 size = f.handle.handle_bytes >> 2;
66370
66371 ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, 0);
66372- if ((ret == 255) || (ret == -ENOSPC)) {
66373+ if ((ret == FILEID_INVALID) || (ret < 0)) {
66374 WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret);
66375 return 0;
66376 }
66377diff --git a/fs/notify/notification.c b/fs/notify/notification.c
66378index 1e58402..bb2d6f4 100644
66379--- a/fs/notify/notification.c
66380+++ b/fs/notify/notification.c
66381@@ -48,7 +48,7 @@
66382 #include <linux/fsnotify_backend.h>
66383 #include "fsnotify.h"
66384
66385-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
66386+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
66387
66388 /**
66389 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
66390@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
66391 */
66392 u32 fsnotify_get_cookie(void)
66393 {
66394- return atomic_inc_return(&fsnotify_sync_cookie);
66395+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
66396 }
66397 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
66398
66399diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
66400index 9e38daf..5727cae 100644
66401--- a/fs/ntfs/dir.c
66402+++ b/fs/ntfs/dir.c
66403@@ -1310,7 +1310,7 @@ find_next_index_buffer:
66404 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
66405 ~(s64)(ndir->itype.index.block_size - 1)));
66406 /* Bounds checks. */
66407- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
66408+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
66409 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
66410 "inode 0x%lx or driver bug.", vdir->i_ino);
66411 goto err_out;
66412diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
66413index 5c9e2c8..96e4ba0 100644
66414--- a/fs/ntfs/file.c
66415+++ b/fs/ntfs/file.c
66416@@ -1282,7 +1282,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
66417 char *addr;
66418 size_t total = 0;
66419 unsigned len;
66420- int left;
66421+ unsigned left;
66422
66423 do {
66424 len = PAGE_CACHE_SIZE - ofs;
66425diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
66426index 6c3296e..c0b99f0 100644
66427--- a/fs/ntfs/super.c
66428+++ b/fs/ntfs/super.c
66429@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
66430 if (!silent)
66431 ntfs_error(sb, "Primary boot sector is invalid.");
66432 } else if (!silent)
66433- ntfs_error(sb, read_err_str, "primary");
66434+ ntfs_error(sb, read_err_str, "%s", "primary");
66435 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
66436 if (bh_primary)
66437 brelse(bh_primary);
66438@@ -704,7 +704,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
66439 goto hotfix_primary_boot_sector;
66440 brelse(bh_backup);
66441 } else if (!silent)
66442- ntfs_error(sb, read_err_str, "backup");
66443+ ntfs_error(sb, read_err_str, "%s", "backup");
66444 /* Try to read NT3.51- backup boot sector. */
66445 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
66446 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
66447@@ -715,7 +715,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
66448 "sector.");
66449 brelse(bh_backup);
66450 } else if (!silent)
66451- ntfs_error(sb, read_err_str, "backup");
66452+ ntfs_error(sb, read_err_str, "%s", "backup");
66453 /* We failed. Cleanup and return. */
66454 if (bh_primary)
66455 brelse(bh_primary);
66456diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
66457index 0440134..d52c93a 100644
66458--- a/fs/ocfs2/localalloc.c
66459+++ b/fs/ocfs2/localalloc.c
66460@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
66461 goto bail;
66462 }
66463
66464- atomic_inc(&osb->alloc_stats.moves);
66465+ atomic_inc_unchecked(&osb->alloc_stats.moves);
66466
66467 bail:
66468 if (handle)
66469diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
66470index bbec539..7b266d5 100644
66471--- a/fs/ocfs2/ocfs2.h
66472+++ b/fs/ocfs2/ocfs2.h
66473@@ -236,11 +236,11 @@ enum ocfs2_vol_state
66474
66475 struct ocfs2_alloc_stats
66476 {
66477- atomic_t moves;
66478- atomic_t local_data;
66479- atomic_t bitmap_data;
66480- atomic_t bg_allocs;
66481- atomic_t bg_extends;
66482+ atomic_unchecked_t moves;
66483+ atomic_unchecked_t local_data;
66484+ atomic_unchecked_t bitmap_data;
66485+ atomic_unchecked_t bg_allocs;
66486+ atomic_unchecked_t bg_extends;
66487 };
66488
66489 enum ocfs2_local_alloc_state
66490diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
66491index 0cb889a..6a26b24 100644
66492--- a/fs/ocfs2/suballoc.c
66493+++ b/fs/ocfs2/suballoc.c
66494@@ -867,7 +867,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
66495 mlog_errno(status);
66496 goto bail;
66497 }
66498- atomic_inc(&osb->alloc_stats.bg_extends);
66499+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
66500
66501 /* You should never ask for this much metadata */
66502 BUG_ON(bits_wanted >
66503@@ -2014,7 +2014,7 @@ int ocfs2_claim_metadata(handle_t *handle,
66504 mlog_errno(status);
66505 goto bail;
66506 }
66507- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66508+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66509
66510 *suballoc_loc = res.sr_bg_blkno;
66511 *suballoc_bit_start = res.sr_bit_offset;
66512@@ -2180,7 +2180,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
66513 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
66514 res->sr_bits);
66515
66516- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66517+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66518
66519 BUG_ON(res->sr_bits != 1);
66520
66521@@ -2222,7 +2222,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
66522 mlog_errno(status);
66523 goto bail;
66524 }
66525- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66526+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66527
66528 BUG_ON(res.sr_bits != 1);
66529
66530@@ -2326,7 +2326,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
66531 cluster_start,
66532 num_clusters);
66533 if (!status)
66534- atomic_inc(&osb->alloc_stats.local_data);
66535+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
66536 } else {
66537 if (min_clusters > (osb->bitmap_cpg - 1)) {
66538 /* The only paths asking for contiguousness
66539@@ -2352,7 +2352,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
66540 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
66541 res.sr_bg_blkno,
66542 res.sr_bit_offset);
66543- atomic_inc(&osb->alloc_stats.bitmap_data);
66544+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
66545 *num_clusters = res.sr_bits;
66546 }
66547 }
66548diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
66549index ddb662b..f701c83 100644
66550--- a/fs/ocfs2/super.c
66551+++ b/fs/ocfs2/super.c
66552@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
66553 "%10s => GlobalAllocs: %d LocalAllocs: %d "
66554 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
66555 "Stats",
66556- atomic_read(&osb->alloc_stats.bitmap_data),
66557- atomic_read(&osb->alloc_stats.local_data),
66558- atomic_read(&osb->alloc_stats.bg_allocs),
66559- atomic_read(&osb->alloc_stats.moves),
66560- atomic_read(&osb->alloc_stats.bg_extends));
66561+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
66562+ atomic_read_unchecked(&osb->alloc_stats.local_data),
66563+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
66564+ atomic_read_unchecked(&osb->alloc_stats.moves),
66565+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
66566
66567 out += snprintf(buf + out, len - out,
66568 "%10s => State: %u Descriptor: %llu Size: %u bits "
66569@@ -2100,11 +2100,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
66570
66571 mutex_init(&osb->system_file_mutex);
66572
66573- atomic_set(&osb->alloc_stats.moves, 0);
66574- atomic_set(&osb->alloc_stats.local_data, 0);
66575- atomic_set(&osb->alloc_stats.bitmap_data, 0);
66576- atomic_set(&osb->alloc_stats.bg_allocs, 0);
66577- atomic_set(&osb->alloc_stats.bg_extends, 0);
66578+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
66579+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
66580+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
66581+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
66582+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
66583
66584 /* Copy the blockcheck stats from the superblock probe */
66585 osb->osb_ecc_stats = *stats;
66586diff --git a/fs/open.c b/fs/open.c
66587index d6fd3ac..6ccf474 100644
66588--- a/fs/open.c
66589+++ b/fs/open.c
66590@@ -32,6 +32,8 @@
66591 #include <linux/dnotify.h>
66592 #include <linux/compat.h>
66593
66594+#define CREATE_TRACE_POINTS
66595+#include <trace/events/fs.h>
66596 #include "internal.h"
66597
66598 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
66599@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
66600 error = locks_verify_truncate(inode, NULL, length);
66601 if (!error)
66602 error = security_path_truncate(path);
66603+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
66604+ error = -EACCES;
66605 if (!error)
66606 error = do_truncate(path->dentry, length, 0, NULL);
66607
66608@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
66609 error = locks_verify_truncate(inode, f.file, length);
66610 if (!error)
66611 error = security_path_truncate(&f.file->f_path);
66612+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
66613+ error = -EACCES;
66614 if (!error)
66615 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
66616 sb_end_write(inode->i_sb);
66617@@ -380,6 +386,9 @@ retry:
66618 if (__mnt_is_readonly(path.mnt))
66619 res = -EROFS;
66620
66621+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
66622+ res = -EACCES;
66623+
66624 out_path_release:
66625 path_put(&path);
66626 if (retry_estale(res, lookup_flags)) {
66627@@ -411,6 +420,8 @@ retry:
66628 if (error)
66629 goto dput_and_out;
66630
66631+ gr_log_chdir(path.dentry, path.mnt);
66632+
66633 set_fs_pwd(current->fs, &path);
66634
66635 dput_and_out:
66636@@ -440,6 +451,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
66637 goto out_putf;
66638
66639 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
66640+
66641+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
66642+ error = -EPERM;
66643+
66644+ if (!error)
66645+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
66646+
66647 if (!error)
66648 set_fs_pwd(current->fs, &f.file->f_path);
66649 out_putf:
66650@@ -469,7 +487,13 @@ retry:
66651 if (error)
66652 goto dput_and_out;
66653
66654+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
66655+ goto dput_and_out;
66656+
66657 set_fs_root(current->fs, &path);
66658+
66659+ gr_handle_chroot_chdir(&path);
66660+
66661 error = 0;
66662 dput_and_out:
66663 path_put(&path);
66664@@ -493,6 +517,16 @@ static int chmod_common(struct path *path, umode_t mode)
66665 return error;
66666 retry_deleg:
66667 mutex_lock(&inode->i_mutex);
66668+
66669+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
66670+ error = -EACCES;
66671+ goto out_unlock;
66672+ }
66673+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
66674+ error = -EACCES;
66675+ goto out_unlock;
66676+ }
66677+
66678 error = security_path_chmod(path, mode);
66679 if (error)
66680 goto out_unlock;
66681@@ -558,6 +592,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
66682 uid = make_kuid(current_user_ns(), user);
66683 gid = make_kgid(current_user_ns(), group);
66684
66685+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
66686+ return -EACCES;
66687+
66688 newattrs.ia_valid = ATTR_CTIME;
66689 if (user != (uid_t) -1) {
66690 if (!uid_valid(uid))
66691@@ -983,6 +1020,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
66692 } else {
66693 fsnotify_open(f);
66694 fd_install(fd, f);
66695+ trace_do_sys_open(tmp->name, flags, mode);
66696 }
66697 }
66698 putname(tmp);
66699diff --git a/fs/pipe.c b/fs/pipe.c
66700index 21981e5..3d5f55c 100644
66701--- a/fs/pipe.c
66702+++ b/fs/pipe.c
66703@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
66704
66705 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
66706 {
66707- if (pipe->files)
66708+ if (atomic_read(&pipe->files))
66709 mutex_lock_nested(&pipe->mutex, subclass);
66710 }
66711
66712@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
66713
66714 void pipe_unlock(struct pipe_inode_info *pipe)
66715 {
66716- if (pipe->files)
66717+ if (atomic_read(&pipe->files))
66718 mutex_unlock(&pipe->mutex);
66719 }
66720 EXPORT_SYMBOL(pipe_unlock);
66721@@ -292,9 +292,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
66722 }
66723 if (bufs) /* More to do? */
66724 continue;
66725- if (!pipe->writers)
66726+ if (!atomic_read(&pipe->writers))
66727 break;
66728- if (!pipe->waiting_writers) {
66729+ if (!atomic_read(&pipe->waiting_writers)) {
66730 /* syscall merging: Usually we must not sleep
66731 * if O_NONBLOCK is set, or if we got some data.
66732 * But if a writer sleeps in kernel space, then
66733@@ -351,7 +351,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
66734
66735 __pipe_lock(pipe);
66736
66737- if (!pipe->readers) {
66738+ if (!atomic_read(&pipe->readers)) {
66739 send_sig(SIGPIPE, current, 0);
66740 ret = -EPIPE;
66741 goto out;
66742@@ -387,7 +387,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
66743 for (;;) {
66744 int bufs;
66745
66746- if (!pipe->readers) {
66747+ if (!atomic_read(&pipe->readers)) {
66748 send_sig(SIGPIPE, current, 0);
66749 if (!ret)
66750 ret = -EPIPE;
66751@@ -455,9 +455,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
66752 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
66753 do_wakeup = 0;
66754 }
66755- pipe->waiting_writers++;
66756+ atomic_inc(&pipe->waiting_writers);
66757 pipe_wait(pipe);
66758- pipe->waiting_writers--;
66759+ atomic_dec(&pipe->waiting_writers);
66760 }
66761 out:
66762 __pipe_unlock(pipe);
66763@@ -512,7 +512,7 @@ pipe_poll(struct file *filp, poll_table *wait)
66764 mask = 0;
66765 if (filp->f_mode & FMODE_READ) {
66766 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
66767- if (!pipe->writers && filp->f_version != pipe->w_counter)
66768+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
66769 mask |= POLLHUP;
66770 }
66771
66772@@ -522,7 +522,7 @@ pipe_poll(struct file *filp, poll_table *wait)
66773 * Most Unices do not set POLLERR for FIFOs but on Linux they
66774 * behave exactly like pipes for poll().
66775 */
66776- if (!pipe->readers)
66777+ if (!atomic_read(&pipe->readers))
66778 mask |= POLLERR;
66779 }
66780
66781@@ -534,7 +534,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
66782 int kill = 0;
66783
66784 spin_lock(&inode->i_lock);
66785- if (!--pipe->files) {
66786+ if (atomic_dec_and_test(&pipe->files)) {
66787 inode->i_pipe = NULL;
66788 kill = 1;
66789 }
66790@@ -551,11 +551,11 @@ pipe_release(struct inode *inode, struct file *file)
66791
66792 __pipe_lock(pipe);
66793 if (file->f_mode & FMODE_READ)
66794- pipe->readers--;
66795+ atomic_dec(&pipe->readers);
66796 if (file->f_mode & FMODE_WRITE)
66797- pipe->writers--;
66798+ atomic_dec(&pipe->writers);
66799
66800- if (pipe->readers || pipe->writers) {
66801+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
66802 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
66803 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
66804 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
66805@@ -620,7 +620,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
66806 kfree(pipe);
66807 }
66808
66809-static struct vfsmount *pipe_mnt __read_mostly;
66810+struct vfsmount *pipe_mnt __read_mostly;
66811
66812 /*
66813 * pipefs_dname() is called from d_path().
66814@@ -650,8 +650,9 @@ static struct inode * get_pipe_inode(void)
66815 goto fail_iput;
66816
66817 inode->i_pipe = pipe;
66818- pipe->files = 2;
66819- pipe->readers = pipe->writers = 1;
66820+ atomic_set(&pipe->files, 2);
66821+ atomic_set(&pipe->readers, 1);
66822+ atomic_set(&pipe->writers, 1);
66823 inode->i_fop = &pipefifo_fops;
66824
66825 /*
66826@@ -830,17 +831,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
66827 spin_lock(&inode->i_lock);
66828 if (inode->i_pipe) {
66829 pipe = inode->i_pipe;
66830- pipe->files++;
66831+ atomic_inc(&pipe->files);
66832 spin_unlock(&inode->i_lock);
66833 } else {
66834 spin_unlock(&inode->i_lock);
66835 pipe = alloc_pipe_info();
66836 if (!pipe)
66837 return -ENOMEM;
66838- pipe->files = 1;
66839+ atomic_set(&pipe->files, 1);
66840 spin_lock(&inode->i_lock);
66841 if (unlikely(inode->i_pipe)) {
66842- inode->i_pipe->files++;
66843+ atomic_inc(&inode->i_pipe->files);
66844 spin_unlock(&inode->i_lock);
66845 free_pipe_info(pipe);
66846 pipe = inode->i_pipe;
66847@@ -865,10 +866,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
66848 * opened, even when there is no process writing the FIFO.
66849 */
66850 pipe->r_counter++;
66851- if (pipe->readers++ == 0)
66852+ if (atomic_inc_return(&pipe->readers) == 1)
66853 wake_up_partner(pipe);
66854
66855- if (!is_pipe && !pipe->writers) {
66856+ if (!is_pipe && !atomic_read(&pipe->writers)) {
66857 if ((filp->f_flags & O_NONBLOCK)) {
66858 /* suppress POLLHUP until we have
66859 * seen a writer */
66860@@ -887,14 +888,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
66861 * errno=ENXIO when there is no process reading the FIFO.
66862 */
66863 ret = -ENXIO;
66864- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
66865+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
66866 goto err;
66867
66868 pipe->w_counter++;
66869- if (!pipe->writers++)
66870+ if (atomic_inc_return(&pipe->writers) == 1)
66871 wake_up_partner(pipe);
66872
66873- if (!is_pipe && !pipe->readers) {
66874+ if (!is_pipe && !atomic_read(&pipe->readers)) {
66875 if (wait_for_partner(pipe, &pipe->r_counter))
66876 goto err_wr;
66877 }
66878@@ -908,11 +909,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
66879 * the process can at least talk to itself.
66880 */
66881
66882- pipe->readers++;
66883- pipe->writers++;
66884+ atomic_inc(&pipe->readers);
66885+ atomic_inc(&pipe->writers);
66886 pipe->r_counter++;
66887 pipe->w_counter++;
66888- if (pipe->readers == 1 || pipe->writers == 1)
66889+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
66890 wake_up_partner(pipe);
66891 break;
66892
66893@@ -926,13 +927,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
66894 return 0;
66895
66896 err_rd:
66897- if (!--pipe->readers)
66898+ if (atomic_dec_and_test(&pipe->readers))
66899 wake_up_interruptible(&pipe->wait);
66900 ret = -ERESTARTSYS;
66901 goto err;
66902
66903 err_wr:
66904- if (!--pipe->writers)
66905+ if (atomic_dec_and_test(&pipe->writers))
66906 wake_up_interruptible(&pipe->wait);
66907 ret = -ERESTARTSYS;
66908 goto err;
66909diff --git a/fs/posix_acl.c b/fs/posix_acl.c
66910index 0855f77..6787d50 100644
66911--- a/fs/posix_acl.c
66912+++ b/fs/posix_acl.c
66913@@ -20,6 +20,7 @@
66914 #include <linux/xattr.h>
66915 #include <linux/export.h>
66916 #include <linux/user_namespace.h>
66917+#include <linux/grsecurity.h>
66918
66919 struct posix_acl **acl_by_type(struct inode *inode, int type)
66920 {
66921@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
66922 }
66923 }
66924 if (mode_p)
66925- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
66926+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
66927 return not_equiv;
66928 }
66929 EXPORT_SYMBOL(posix_acl_equiv_mode);
66930@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
66931 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
66932 }
66933
66934- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
66935+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
66936 return not_equiv;
66937 }
66938
66939@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
66940 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
66941 int err = -ENOMEM;
66942 if (clone) {
66943+ *mode_p &= ~gr_acl_umask();
66944+
66945 err = posix_acl_create_masq(clone, mode_p);
66946 if (err < 0) {
66947 posix_acl_release(clone);
66948@@ -659,11 +662,12 @@ struct posix_acl *
66949 posix_acl_from_xattr(struct user_namespace *user_ns,
66950 const void *value, size_t size)
66951 {
66952- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
66953- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
66954+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
66955+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
66956 int count;
66957 struct posix_acl *acl;
66958 struct posix_acl_entry *acl_e;
66959+ umode_t umask = gr_acl_umask();
66960
66961 if (!value)
66962 return NULL;
66963@@ -689,12 +693,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
66964
66965 switch(acl_e->e_tag) {
66966 case ACL_USER_OBJ:
66967+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
66968+ break;
66969 case ACL_GROUP_OBJ:
66970 case ACL_MASK:
66971+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
66972+ break;
66973 case ACL_OTHER:
66974+ acl_e->e_perm &= ~(umask & S_IRWXO);
66975 break;
66976
66977 case ACL_USER:
66978+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
66979 acl_e->e_uid =
66980 make_kuid(user_ns,
66981 le32_to_cpu(entry->e_id));
66982@@ -702,6 +712,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
66983 goto fail;
66984 break;
66985 case ACL_GROUP:
66986+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
66987 acl_e->e_gid =
66988 make_kgid(user_ns,
66989 le32_to_cpu(entry->e_id));
66990diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
66991index 2183fcf..3c32a98 100644
66992--- a/fs/proc/Kconfig
66993+++ b/fs/proc/Kconfig
66994@@ -30,7 +30,7 @@ config PROC_FS
66995
66996 config PROC_KCORE
66997 bool "/proc/kcore support" if !ARM
66998- depends on PROC_FS && MMU
66999+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
67000 help
67001 Provides a virtual ELF core file of the live kernel. This can
67002 be read with gdb and other ELF tools. No modifications can be
67003@@ -38,8 +38,8 @@ config PROC_KCORE
67004
67005 config PROC_VMCORE
67006 bool "/proc/vmcore support"
67007- depends on PROC_FS && CRASH_DUMP
67008- default y
67009+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
67010+ default n
67011 help
67012 Exports the dump image of crashed kernel in ELF format.
67013
67014@@ -63,8 +63,8 @@ config PROC_SYSCTL
67015 limited in memory.
67016
67017 config PROC_PAGE_MONITOR
67018- default y
67019- depends on PROC_FS && MMU
67020+ default n
67021+ depends on PROC_FS && MMU && !GRKERNSEC
67022 bool "Enable /proc page monitoring" if EXPERT
67023 help
67024 Various /proc files exist to monitor process memory utilization:
67025diff --git a/fs/proc/array.c b/fs/proc/array.c
67026index 64db2bc..a8185d6 100644
67027--- a/fs/proc/array.c
67028+++ b/fs/proc/array.c
67029@@ -60,6 +60,7 @@
67030 #include <linux/tty.h>
67031 #include <linux/string.h>
67032 #include <linux/mman.h>
67033+#include <linux/grsecurity.h>
67034 #include <linux/proc_fs.h>
67035 #include <linux/ioport.h>
67036 #include <linux/uaccess.h>
67037@@ -356,6 +357,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
67038 seq_putc(m, '\n');
67039 }
67040
67041+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
67042+static inline void task_pax(struct seq_file *m, struct task_struct *p)
67043+{
67044+ if (p->mm)
67045+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
67046+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
67047+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
67048+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
67049+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
67050+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
67051+ else
67052+ seq_printf(m, "PaX:\t-----\n");
67053+}
67054+#endif
67055+
67056 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
67057 struct pid *pid, struct task_struct *task)
67058 {
67059@@ -374,9 +390,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
67060 task_cpus_allowed(m, task);
67061 cpuset_task_status_allowed(m, task);
67062 task_context_switch_counts(m, task);
67063+
67064+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
67065+ task_pax(m, task);
67066+#endif
67067+
67068+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
67069+ task_grsec_rbac(m, task);
67070+#endif
67071+
67072 return 0;
67073 }
67074
67075+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67076+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
67077+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
67078+ _mm->pax_flags & MF_PAX_SEGMEXEC))
67079+#endif
67080+
67081 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
67082 struct pid *pid, struct task_struct *task, int whole)
67083 {
67084@@ -398,6 +429,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
67085 char tcomm[sizeof(task->comm)];
67086 unsigned long flags;
67087
67088+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67089+ if (current->exec_id != m->exec_id) {
67090+ gr_log_badprocpid("stat");
67091+ return 0;
67092+ }
67093+#endif
67094+
67095 state = *get_task_state(task);
67096 vsize = eip = esp = 0;
67097 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
67098@@ -468,6 +506,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
67099 gtime = task_gtime(task);
67100 }
67101
67102+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67103+ if (PAX_RAND_FLAGS(mm)) {
67104+ eip = 0;
67105+ esp = 0;
67106+ wchan = 0;
67107+ }
67108+#endif
67109+#ifdef CONFIG_GRKERNSEC_HIDESYM
67110+ wchan = 0;
67111+ eip =0;
67112+ esp =0;
67113+#endif
67114+
67115 /* scale priority and nice values from timeslices to -20..20 */
67116 /* to make it look like a "normal" Unix priority/nice value */
67117 priority = task_prio(task);
67118@@ -504,9 +555,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
67119 seq_put_decimal_ull(m, ' ', vsize);
67120 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
67121 seq_put_decimal_ull(m, ' ', rsslim);
67122+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67123+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
67124+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
67125+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
67126+#else
67127 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
67128 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
67129 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
67130+#endif
67131 seq_put_decimal_ull(m, ' ', esp);
67132 seq_put_decimal_ull(m, ' ', eip);
67133 /* The signal information here is obsolete.
67134@@ -528,7 +585,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
67135 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
67136 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
67137
67138- if (mm && permitted) {
67139+ if (mm && permitted
67140+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67141+ && !PAX_RAND_FLAGS(mm)
67142+#endif
67143+ ) {
67144 seq_put_decimal_ull(m, ' ', mm->start_data);
67145 seq_put_decimal_ull(m, ' ', mm->end_data);
67146 seq_put_decimal_ull(m, ' ', mm->start_brk);
67147@@ -566,8 +627,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
67148 struct pid *pid, struct task_struct *task)
67149 {
67150 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
67151- struct mm_struct *mm = get_task_mm(task);
67152+ struct mm_struct *mm;
67153
67154+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67155+ if (current->exec_id != m->exec_id) {
67156+ gr_log_badprocpid("statm");
67157+ return 0;
67158+ }
67159+#endif
67160+ mm = get_task_mm(task);
67161 if (mm) {
67162 size = task_statm(mm, &shared, &text, &data, &resident);
67163 mmput(mm);
67164@@ -590,6 +658,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
67165 return 0;
67166 }
67167
67168+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
67169+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
67170+{
67171+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
67172+}
67173+#endif
67174+
67175 #ifdef CONFIG_CHECKPOINT_RESTORE
67176 static struct pid *
67177 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
67178diff --git a/fs/proc/base.c b/fs/proc/base.c
67179index 2d696b0..b9da447 100644
67180--- a/fs/proc/base.c
67181+++ b/fs/proc/base.c
67182@@ -113,6 +113,14 @@ struct pid_entry {
67183 union proc_op op;
67184 };
67185
67186+struct getdents_callback {
67187+ struct linux_dirent __user * current_dir;
67188+ struct linux_dirent __user * previous;
67189+ struct file * file;
67190+ int count;
67191+ int error;
67192+};
67193+
67194 #define NOD(NAME, MODE, IOP, FOP, OP) { \
67195 .name = (NAME), \
67196 .len = sizeof(NAME) - 1, \
67197@@ -205,12 +213,28 @@ static int proc_pid_cmdline(struct task_struct *task, char *buffer)
67198 return get_cmdline(task, buffer, PAGE_SIZE);
67199 }
67200
67201+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67202+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
67203+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
67204+ _mm->pax_flags & MF_PAX_SEGMEXEC))
67205+#endif
67206+
67207 static int proc_pid_auxv(struct task_struct *task, char *buffer)
67208 {
67209 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
67210 int res = PTR_ERR(mm);
67211 if (mm && !IS_ERR(mm)) {
67212 unsigned int nwords = 0;
67213+
67214+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67215+ /* allow if we're currently ptracing this task */
67216+ if (PAX_RAND_FLAGS(mm) &&
67217+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
67218+ mmput(mm);
67219+ return 0;
67220+ }
67221+#endif
67222+
67223 do {
67224 nwords += 2;
67225 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
67226@@ -224,7 +248,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
67227 }
67228
67229
67230-#ifdef CONFIG_KALLSYMS
67231+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67232 /*
67233 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
67234 * Returns the resolved symbol. If that fails, simply return the address.
67235@@ -263,7 +287,7 @@ static void unlock_trace(struct task_struct *task)
67236 mutex_unlock(&task->signal->cred_guard_mutex);
67237 }
67238
67239-#ifdef CONFIG_STACKTRACE
67240+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67241
67242 #define MAX_STACK_TRACE_DEPTH 64
67243
67244@@ -486,7 +510,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
67245 return count;
67246 }
67247
67248-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
67249+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
67250 static int proc_pid_syscall(struct task_struct *task, char *buffer)
67251 {
67252 long nr;
67253@@ -515,7 +539,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
67254 /************************************************************************/
67255
67256 /* permission checks */
67257-static int proc_fd_access_allowed(struct inode *inode)
67258+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
67259 {
67260 struct task_struct *task;
67261 int allowed = 0;
67262@@ -525,7 +549,10 @@ static int proc_fd_access_allowed(struct inode *inode)
67263 */
67264 task = get_proc_task(inode);
67265 if (task) {
67266- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
67267+ if (log)
67268+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
67269+ else
67270+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
67271 put_task_struct(task);
67272 }
67273 return allowed;
67274@@ -556,10 +583,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
67275 struct task_struct *task,
67276 int hide_pid_min)
67277 {
67278+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
67279+ return false;
67280+
67281+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67282+ rcu_read_lock();
67283+ {
67284+ const struct cred *tmpcred = current_cred();
67285+ const struct cred *cred = __task_cred(task);
67286+
67287+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
67288+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67289+ || in_group_p(grsec_proc_gid)
67290+#endif
67291+ ) {
67292+ rcu_read_unlock();
67293+ return true;
67294+ }
67295+ }
67296+ rcu_read_unlock();
67297+
67298+ if (!pid->hide_pid)
67299+ return false;
67300+#endif
67301+
67302 if (pid->hide_pid < hide_pid_min)
67303 return true;
67304 if (in_group_p(pid->pid_gid))
67305 return true;
67306+
67307 return ptrace_may_access(task, PTRACE_MODE_READ);
67308 }
67309
67310@@ -577,7 +629,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
67311 put_task_struct(task);
67312
67313 if (!has_perms) {
67314+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67315+ {
67316+#else
67317 if (pid->hide_pid == 2) {
67318+#endif
67319 /*
67320 * Let's make getdents(), stat(), and open()
67321 * consistent with each other. If a process
67322@@ -675,6 +731,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
67323 if (!task)
67324 return -ESRCH;
67325
67326+ if (gr_acl_handle_procpidmem(task)) {
67327+ put_task_struct(task);
67328+ return -EPERM;
67329+ }
67330+
67331 mm = mm_access(task, mode);
67332 put_task_struct(task);
67333
67334@@ -690,6 +751,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
67335
67336 file->private_data = mm;
67337
67338+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67339+ file->f_version = current->exec_id;
67340+#endif
67341+
67342 return 0;
67343 }
67344
67345@@ -711,6 +776,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
67346 ssize_t copied;
67347 char *page;
67348
67349+#ifdef CONFIG_GRKERNSEC
67350+ if (write)
67351+ return -EPERM;
67352+#endif
67353+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67354+ if (file->f_version != current->exec_id) {
67355+ gr_log_badprocpid("mem");
67356+ return 0;
67357+ }
67358+#endif
67359+
67360 if (!mm)
67361 return 0;
67362
67363@@ -723,7 +799,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
67364 goto free;
67365
67366 while (count > 0) {
67367- int this_len = min_t(int, count, PAGE_SIZE);
67368+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
67369
67370 if (write && copy_from_user(page, buf, this_len)) {
67371 copied = -EFAULT;
67372@@ -815,6 +891,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
67373 if (!mm)
67374 return 0;
67375
67376+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67377+ if (file->f_version != current->exec_id) {
67378+ gr_log_badprocpid("environ");
67379+ return 0;
67380+ }
67381+#endif
67382+
67383 page = (char *)__get_free_page(GFP_TEMPORARY);
67384 if (!page)
67385 return -ENOMEM;
67386@@ -824,7 +907,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
67387 goto free;
67388 while (count > 0) {
67389 size_t this_len, max_len;
67390- int retval;
67391+ ssize_t retval;
67392
67393 if (src >= (mm->env_end - mm->env_start))
67394 break;
67395@@ -1438,7 +1521,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
67396 int error = -EACCES;
67397
67398 /* Are we allowed to snoop on the tasks file descriptors? */
67399- if (!proc_fd_access_allowed(inode))
67400+ if (!proc_fd_access_allowed(inode, 0))
67401 goto out;
67402
67403 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
67404@@ -1482,8 +1565,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
67405 struct path path;
67406
67407 /* Are we allowed to snoop on the tasks file descriptors? */
67408- if (!proc_fd_access_allowed(inode))
67409- goto out;
67410+ /* logging this is needed for learning on chromium to work properly,
67411+ but we don't want to flood the logs from 'ps' which does a readlink
67412+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
67413+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
67414+ */
67415+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
67416+ if (!proc_fd_access_allowed(inode,0))
67417+ goto out;
67418+ } else {
67419+ if (!proc_fd_access_allowed(inode,1))
67420+ goto out;
67421+ }
67422
67423 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
67424 if (error)
67425@@ -1533,7 +1626,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
67426 rcu_read_lock();
67427 cred = __task_cred(task);
67428 inode->i_uid = cred->euid;
67429+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67430+ inode->i_gid = grsec_proc_gid;
67431+#else
67432 inode->i_gid = cred->egid;
67433+#endif
67434 rcu_read_unlock();
67435 }
67436 security_task_to_inode(task, inode);
67437@@ -1569,10 +1666,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
67438 return -ENOENT;
67439 }
67440 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
67441+#ifdef CONFIG_GRKERNSEC_PROC_USER
67442+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
67443+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67444+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
67445+#endif
67446 task_dumpable(task)) {
67447 cred = __task_cred(task);
67448 stat->uid = cred->euid;
67449+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67450+ stat->gid = grsec_proc_gid;
67451+#else
67452 stat->gid = cred->egid;
67453+#endif
67454 }
67455 }
67456 rcu_read_unlock();
67457@@ -1610,11 +1716,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
67458
67459 if (task) {
67460 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
67461+#ifdef CONFIG_GRKERNSEC_PROC_USER
67462+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
67463+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67464+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
67465+#endif
67466 task_dumpable(task)) {
67467 rcu_read_lock();
67468 cred = __task_cred(task);
67469 inode->i_uid = cred->euid;
67470+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67471+ inode->i_gid = grsec_proc_gid;
67472+#else
67473 inode->i_gid = cred->egid;
67474+#endif
67475 rcu_read_unlock();
67476 } else {
67477 inode->i_uid = GLOBAL_ROOT_UID;
67478@@ -2149,6 +2264,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
67479 if (!task)
67480 goto out_no_task;
67481
67482+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
67483+ goto out;
67484+
67485 /*
67486 * Yes, it does not scale. And it should not. Don't add
67487 * new entries into /proc/<tgid>/ without very good reasons.
67488@@ -2179,6 +2297,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
67489 if (!task)
67490 return -ENOENT;
67491
67492+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
67493+ goto out;
67494+
67495 if (!dir_emit_dots(file, ctx))
67496 goto out;
67497
67498@@ -2568,7 +2689,7 @@ static const struct pid_entry tgid_base_stuff[] = {
67499 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
67500 #endif
67501 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
67502-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
67503+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
67504 INF("syscall", S_IRUSR, proc_pid_syscall),
67505 #endif
67506 INF("cmdline", S_IRUGO, proc_pid_cmdline),
67507@@ -2593,10 +2714,10 @@ static const struct pid_entry tgid_base_stuff[] = {
67508 #ifdef CONFIG_SECURITY
67509 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
67510 #endif
67511-#ifdef CONFIG_KALLSYMS
67512+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67513 INF("wchan", S_IRUGO, proc_pid_wchan),
67514 #endif
67515-#ifdef CONFIG_STACKTRACE
67516+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67517 ONE("stack", S_IRUSR, proc_pid_stack),
67518 #endif
67519 #ifdef CONFIG_SCHEDSTATS
67520@@ -2630,6 +2751,9 @@ static const struct pid_entry tgid_base_stuff[] = {
67521 #ifdef CONFIG_HARDWALL
67522 INF("hardwall", S_IRUGO, proc_pid_hardwall),
67523 #endif
67524+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
67525+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
67526+#endif
67527 #ifdef CONFIG_USER_NS
67528 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
67529 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
67530@@ -2760,7 +2884,14 @@ static int proc_pid_instantiate(struct inode *dir,
67531 if (!inode)
67532 goto out;
67533
67534+#ifdef CONFIG_GRKERNSEC_PROC_USER
67535+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
67536+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67537+ inode->i_gid = grsec_proc_gid;
67538+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
67539+#else
67540 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
67541+#endif
67542 inode->i_op = &proc_tgid_base_inode_operations;
67543 inode->i_fop = &proc_tgid_base_operations;
67544 inode->i_flags|=S_IMMUTABLE;
67545@@ -2798,7 +2929,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
67546 if (!task)
67547 goto out;
67548
67549+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
67550+ goto out_put_task;
67551+
67552 result = proc_pid_instantiate(dir, dentry, task, NULL);
67553+out_put_task:
67554 put_task_struct(task);
67555 out:
67556 return ERR_PTR(result);
67557@@ -2904,7 +3039,7 @@ static const struct pid_entry tid_base_stuff[] = {
67558 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
67559 #endif
67560 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
67561-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
67562+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
67563 INF("syscall", S_IRUSR, proc_pid_syscall),
67564 #endif
67565 INF("cmdline", S_IRUGO, proc_pid_cmdline),
67566@@ -2931,10 +3066,10 @@ static const struct pid_entry tid_base_stuff[] = {
67567 #ifdef CONFIG_SECURITY
67568 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
67569 #endif
67570-#ifdef CONFIG_KALLSYMS
67571+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67572 INF("wchan", S_IRUGO, proc_pid_wchan),
67573 #endif
67574-#ifdef CONFIG_STACKTRACE
67575+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67576 ONE("stack", S_IRUSR, proc_pid_stack),
67577 #endif
67578 #ifdef CONFIG_SCHEDSTATS
67579diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
67580index cbd82df..c0407d2 100644
67581--- a/fs/proc/cmdline.c
67582+++ b/fs/proc/cmdline.c
67583@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
67584
67585 static int __init proc_cmdline_init(void)
67586 {
67587+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67588+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
67589+#else
67590 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
67591+#endif
67592 return 0;
67593 }
67594 fs_initcall(proc_cmdline_init);
67595diff --git a/fs/proc/devices.c b/fs/proc/devices.c
67596index 50493ed..248166b 100644
67597--- a/fs/proc/devices.c
67598+++ b/fs/proc/devices.c
67599@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
67600
67601 static int __init proc_devices_init(void)
67602 {
67603+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67604+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
67605+#else
67606 proc_create("devices", 0, NULL, &proc_devinfo_operations);
67607+#endif
67608 return 0;
67609 }
67610 fs_initcall(proc_devices_init);
67611diff --git a/fs/proc/fd.c b/fs/proc/fd.c
67612index 0788d09..9cc1385 100644
67613--- a/fs/proc/fd.c
67614+++ b/fs/proc/fd.c
67615@@ -26,7 +26,8 @@ static int seq_show(struct seq_file *m, void *v)
67616 if (!task)
67617 return -ENOENT;
67618
67619- files = get_files_struct(task);
67620+ if (!gr_acl_handle_procpidmem(task))
67621+ files = get_files_struct(task);
67622 put_task_struct(task);
67623
67624 if (files) {
67625@@ -285,11 +286,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
67626 */
67627 int proc_fd_permission(struct inode *inode, int mask)
67628 {
67629+ struct task_struct *task;
67630 int rv = generic_permission(inode, mask);
67631- if (rv == 0)
67632- return 0;
67633+
67634 if (task_tgid(current) == proc_pid(inode))
67635 rv = 0;
67636+
67637+ task = get_proc_task(inode);
67638+ if (task == NULL)
67639+ return rv;
67640+
67641+ if (gr_acl_handle_procpidmem(task))
67642+ rv = -EACCES;
67643+
67644+ put_task_struct(task);
67645+
67646 return rv;
67647 }
67648
67649diff --git a/fs/proc/generic.c b/fs/proc/generic.c
67650index b7f268e..3bea6b7 100644
67651--- a/fs/proc/generic.c
67652+++ b/fs/proc/generic.c
67653@@ -23,6 +23,7 @@
67654 #include <linux/bitops.h>
67655 #include <linux/spinlock.h>
67656 #include <linux/completion.h>
67657+#include <linux/grsecurity.h>
67658 #include <asm/uaccess.h>
67659
67660 #include "internal.h"
67661@@ -207,6 +208,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
67662 return proc_lookup_de(PDE(dir), dir, dentry);
67663 }
67664
67665+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
67666+ unsigned int flags)
67667+{
67668+ if (gr_proc_is_restricted())
67669+ return ERR_PTR(-EACCES);
67670+
67671+ return proc_lookup_de(PDE(dir), dir, dentry);
67672+}
67673+
67674 /*
67675 * This returns non-zero if at EOF, so that the /proc
67676 * root directory can use this and check if it should
67677@@ -264,6 +274,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
67678 return proc_readdir_de(PDE(inode), file, ctx);
67679 }
67680
67681+int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
67682+{
67683+ struct inode *inode = file_inode(file);
67684+
67685+ if (gr_proc_is_restricted())
67686+ return -EACCES;
67687+
67688+ return proc_readdir_de(PDE(inode), file, ctx);
67689+}
67690+
67691 /*
67692 * These are the generic /proc directory operations. They
67693 * use the in-memory "struct proc_dir_entry" tree to parse
67694@@ -275,6 +295,12 @@ static const struct file_operations proc_dir_operations = {
67695 .iterate = proc_readdir,
67696 };
67697
67698+static const struct file_operations proc_dir_restricted_operations = {
67699+ .llseek = generic_file_llseek,
67700+ .read = generic_read_dir,
67701+ .iterate = proc_readdir_restrict,
67702+};
67703+
67704 /*
67705 * proc directories can do almost nothing..
67706 */
67707@@ -284,6 +310,12 @@ static const struct inode_operations proc_dir_inode_operations = {
67708 .setattr = proc_notify_change,
67709 };
67710
67711+static const struct inode_operations proc_dir_restricted_inode_operations = {
67712+ .lookup = proc_lookup_restrict,
67713+ .getattr = proc_getattr,
67714+ .setattr = proc_notify_change,
67715+};
67716+
67717 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
67718 {
67719 struct proc_dir_entry *tmp;
67720@@ -294,8 +326,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
67721 return ret;
67722
67723 if (S_ISDIR(dp->mode)) {
67724- dp->proc_fops = &proc_dir_operations;
67725- dp->proc_iops = &proc_dir_inode_operations;
67726+ if (dp->restricted) {
67727+ dp->proc_fops = &proc_dir_restricted_operations;
67728+ dp->proc_iops = &proc_dir_restricted_inode_operations;
67729+ } else {
67730+ dp->proc_fops = &proc_dir_operations;
67731+ dp->proc_iops = &proc_dir_inode_operations;
67732+ }
67733 dir->nlink++;
67734 } else if (S_ISLNK(dp->mode)) {
67735 dp->proc_iops = &proc_link_inode_operations;
67736@@ -407,6 +444,27 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
67737 }
67738 EXPORT_SYMBOL_GPL(proc_mkdir_data);
67739
67740+struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
67741+ struct proc_dir_entry *parent, void *data)
67742+{
67743+ struct proc_dir_entry *ent;
67744+
67745+ if (mode == 0)
67746+ mode = S_IRUGO | S_IXUGO;
67747+
67748+ ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
67749+ if (ent) {
67750+ ent->data = data;
67751+ ent->restricted = 1;
67752+ if (proc_register(parent, ent) < 0) {
67753+ kfree(ent);
67754+ ent = NULL;
67755+ }
67756+ }
67757+ return ent;
67758+}
67759+EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
67760+
67761 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
67762 struct proc_dir_entry *parent)
67763 {
67764@@ -421,6 +479,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
67765 }
67766 EXPORT_SYMBOL(proc_mkdir);
67767
67768+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
67769+ struct proc_dir_entry *parent)
67770+{
67771+ return proc_mkdir_data_restrict(name, 0, parent, NULL);
67772+}
67773+EXPORT_SYMBOL(proc_mkdir_restrict);
67774+
67775 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
67776 struct proc_dir_entry *parent,
67777 const struct file_operations *proc_fops,
67778diff --git a/fs/proc/inode.c b/fs/proc/inode.c
67779index 0adbc02..bee4d0b 100644
67780--- a/fs/proc/inode.c
67781+++ b/fs/proc/inode.c
67782@@ -23,11 +23,17 @@
67783 #include <linux/slab.h>
67784 #include <linux/mount.h>
67785 #include <linux/magic.h>
67786+#include <linux/grsecurity.h>
67787
67788 #include <asm/uaccess.h>
67789
67790 #include "internal.h"
67791
67792+#ifdef CONFIG_PROC_SYSCTL
67793+extern const struct inode_operations proc_sys_inode_operations;
67794+extern const struct inode_operations proc_sys_dir_operations;
67795+#endif
67796+
67797 static void proc_evict_inode(struct inode *inode)
67798 {
67799 struct proc_dir_entry *de;
67800@@ -55,6 +61,13 @@ static void proc_evict_inode(struct inode *inode)
67801 ns = PROC_I(inode)->ns.ns;
67802 if (ns_ops && ns)
67803 ns_ops->put(ns);
67804+
67805+#ifdef CONFIG_PROC_SYSCTL
67806+ if (inode->i_op == &proc_sys_inode_operations ||
67807+ inode->i_op == &proc_sys_dir_operations)
67808+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
67809+#endif
67810+
67811 }
67812
67813 static struct kmem_cache * proc_inode_cachep;
67814@@ -413,7 +426,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
67815 if (de->mode) {
67816 inode->i_mode = de->mode;
67817 inode->i_uid = de->uid;
67818+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67819+ inode->i_gid = grsec_proc_gid;
67820+#else
67821 inode->i_gid = de->gid;
67822+#endif
67823 }
67824 if (de->size)
67825 inode->i_size = de->size;
67826diff --git a/fs/proc/internal.h b/fs/proc/internal.h
67827index 3ab6d14..b26174e 100644
67828--- a/fs/proc/internal.h
67829+++ b/fs/proc/internal.h
67830@@ -46,9 +46,10 @@ struct proc_dir_entry {
67831 struct completion *pde_unload_completion;
67832 struct list_head pde_openers; /* who did ->open, but not ->release */
67833 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
67834+ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
67835 u8 namelen;
67836 char name[];
67837-};
67838+} __randomize_layout;
67839
67840 union proc_op {
67841 int (*proc_get_link)(struct dentry *, struct path *);
67842@@ -67,7 +68,7 @@ struct proc_inode {
67843 struct ctl_table *sysctl_entry;
67844 struct proc_ns ns;
67845 struct inode vfs_inode;
67846-};
67847+} __randomize_layout;
67848
67849 /*
67850 * General functions
67851@@ -155,6 +156,9 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
67852 struct pid *, struct task_struct *);
67853 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
67854 struct pid *, struct task_struct *);
67855+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
67856+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
67857+#endif
67858
67859 /*
67860 * base.c
67861@@ -181,9 +185,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
67862 extern spinlock_t proc_subdir_lock;
67863
67864 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
67865+extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
67866 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
67867 struct dentry *);
67868 extern int proc_readdir(struct file *, struct dir_context *);
67869+extern int proc_readdir_restrict(struct file *, struct dir_context *);
67870 extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
67871
67872 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
67873diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
67874index a352d57..cb94a5c 100644
67875--- a/fs/proc/interrupts.c
67876+++ b/fs/proc/interrupts.c
67877@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
67878
67879 static int __init proc_interrupts_init(void)
67880 {
67881+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67882+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
67883+#else
67884 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
67885+#endif
67886 return 0;
67887 }
67888 fs_initcall(proc_interrupts_init);
67889diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
67890index 39e6ef3..2f9cb5e 100644
67891--- a/fs/proc/kcore.c
67892+++ b/fs/proc/kcore.c
67893@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
67894 * the addresses in the elf_phdr on our list.
67895 */
67896 start = kc_offset_to_vaddr(*fpos - elf_buflen);
67897- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
67898+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
67899+ if (tsz > buflen)
67900 tsz = buflen;
67901-
67902+
67903 while (buflen) {
67904 struct kcore_list *m;
67905
67906@@ -514,20 +515,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
67907 kfree(elf_buf);
67908 } else {
67909 if (kern_addr_valid(start)) {
67910- unsigned long n;
67911+ char *elf_buf;
67912+ mm_segment_t oldfs;
67913
67914- n = copy_to_user(buffer, (char *)start, tsz);
67915- /*
67916- * We cannot distinguish between fault on source
67917- * and fault on destination. When this happens
67918- * we clear too and hope it will trigger the
67919- * EFAULT again.
67920- */
67921- if (n) {
67922- if (clear_user(buffer + tsz - n,
67923- n))
67924+ elf_buf = kmalloc(tsz, GFP_KERNEL);
67925+ if (!elf_buf)
67926+ return -ENOMEM;
67927+ oldfs = get_fs();
67928+ set_fs(KERNEL_DS);
67929+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
67930+ set_fs(oldfs);
67931+ if (copy_to_user(buffer, elf_buf, tsz)) {
67932+ kfree(elf_buf);
67933 return -EFAULT;
67934+ }
67935 }
67936+ set_fs(oldfs);
67937+ kfree(elf_buf);
67938 } else {
67939 if (clear_user(buffer, tsz))
67940 return -EFAULT;
67941@@ -547,6 +551,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
67942
67943 static int open_kcore(struct inode *inode, struct file *filp)
67944 {
67945+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
67946+ return -EPERM;
67947+#endif
67948 if (!capable(CAP_SYS_RAWIO))
67949 return -EPERM;
67950 if (kcore_need_update)
67951diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
67952index 7445af0..7c5113c 100644
67953--- a/fs/proc/meminfo.c
67954+++ b/fs/proc/meminfo.c
67955@@ -187,7 +187,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
67956 vmi.used >> 10,
67957 vmi.largest_chunk >> 10
67958 #ifdef CONFIG_MEMORY_FAILURE
67959- ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
67960+ ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
67961 #endif
67962 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
67963 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
67964diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
67965index d4a3574..b421ce9 100644
67966--- a/fs/proc/nommu.c
67967+++ b/fs/proc/nommu.c
67968@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
67969
67970 if (file) {
67971 seq_pad(m, ' ');
67972- seq_path(m, &file->f_path, "");
67973+ seq_path(m, &file->f_path, "\n\\");
67974 }
67975
67976 seq_putc(m, '\n');
67977diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
67978index 4677bb7..dad3045 100644
67979--- a/fs/proc/proc_net.c
67980+++ b/fs/proc/proc_net.c
67981@@ -23,9 +23,27 @@
67982 #include <linux/nsproxy.h>
67983 #include <net/net_namespace.h>
67984 #include <linux/seq_file.h>
67985+#include <linux/grsecurity.h>
67986
67987 #include "internal.h"
67988
67989+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
67990+static struct seq_operations *ipv6_seq_ops_addr;
67991+
67992+void register_ipv6_seq_ops_addr(struct seq_operations *addr)
67993+{
67994+ ipv6_seq_ops_addr = addr;
67995+}
67996+
67997+void unregister_ipv6_seq_ops_addr(void)
67998+{
67999+ ipv6_seq_ops_addr = NULL;
68000+}
68001+
68002+EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
68003+EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
68004+#endif
68005+
68006 static inline struct net *PDE_NET(struct proc_dir_entry *pde)
68007 {
68008 return pde->parent->data;
68009@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode)
68010 return maybe_get_net(PDE_NET(PDE(inode)));
68011 }
68012
68013+extern const struct seq_operations dev_seq_ops;
68014+
68015 int seq_open_net(struct inode *ino, struct file *f,
68016 const struct seq_operations *ops, int size)
68017 {
68018@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f,
68019
68020 BUG_ON(size < sizeof(*p));
68021
68022+ /* only permit access to /proc/net/dev */
68023+ if (
68024+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
68025+ ops != ipv6_seq_ops_addr &&
68026+#endif
68027+ ops != &dev_seq_ops && gr_proc_is_restricted())
68028+ return -EACCES;
68029+
68030 net = get_proc_net(ino);
68031 if (net == NULL)
68032 return -ENXIO;
68033@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file,
68034 int err;
68035 struct net *net;
68036
68037+ if (gr_proc_is_restricted())
68038+ return -EACCES;
68039+
68040 err = -ENXIO;
68041 net = get_proc_net(inode);
68042 if (net == NULL)
68043diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
68044index 7129046..6914844 100644
68045--- a/fs/proc/proc_sysctl.c
68046+++ b/fs/proc/proc_sysctl.c
68047@@ -11,13 +11,21 @@
68048 #include <linux/namei.h>
68049 #include <linux/mm.h>
68050 #include <linux/module.h>
68051+#include <linux/nsproxy.h>
68052+#ifdef CONFIG_GRKERNSEC
68053+#include <net/net_namespace.h>
68054+#endif
68055 #include "internal.h"
68056
68057+extern int gr_handle_chroot_sysctl(const int op);
68058+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
68059+ const int op);
68060+
68061 static const struct dentry_operations proc_sys_dentry_operations;
68062 static const struct file_operations proc_sys_file_operations;
68063-static const struct inode_operations proc_sys_inode_operations;
68064+const struct inode_operations proc_sys_inode_operations;
68065 static const struct file_operations proc_sys_dir_file_operations;
68066-static const struct inode_operations proc_sys_dir_operations;
68067+const struct inode_operations proc_sys_dir_operations;
68068
68069 void proc_sys_poll_notify(struct ctl_table_poll *poll)
68070 {
68071@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
68072
68073 err = NULL;
68074 d_set_d_op(dentry, &proc_sys_dentry_operations);
68075+
68076+ gr_handle_proc_create(dentry, inode);
68077+
68078 d_add(dentry, inode);
68079
68080 out:
68081@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
68082 struct inode *inode = file_inode(filp);
68083 struct ctl_table_header *head = grab_header(inode);
68084 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
68085+ int op = write ? MAY_WRITE : MAY_READ;
68086 ssize_t error;
68087 size_t res;
68088
68089@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
68090 * and won't be until we finish.
68091 */
68092 error = -EPERM;
68093- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
68094+ if (sysctl_perm(head, table, op))
68095 goto out;
68096
68097 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
68098@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
68099 if (!table->proc_handler)
68100 goto out;
68101
68102+#ifdef CONFIG_GRKERNSEC
68103+ error = -EPERM;
68104+ if (gr_handle_chroot_sysctl(op))
68105+ goto out;
68106+ dget(filp->f_path.dentry);
68107+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
68108+ dput(filp->f_path.dentry);
68109+ goto out;
68110+ }
68111+ dput(filp->f_path.dentry);
68112+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
68113+ goto out;
68114+ if (write) {
68115+ if (current->nsproxy->net_ns != table->extra2) {
68116+ if (!capable(CAP_SYS_ADMIN))
68117+ goto out;
68118+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
68119+ goto out;
68120+ }
68121+#endif
68122+
68123 /* careful: calling conventions are nasty here */
68124 res = count;
68125 error = table->proc_handler(table, write, buf, &res, ppos);
68126@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
68127 return false;
68128 } else {
68129 d_set_d_op(child, &proc_sys_dentry_operations);
68130+
68131+ gr_handle_proc_create(child, inode);
68132+
68133 d_add(child, inode);
68134 }
68135 } else {
68136@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
68137 if ((*pos)++ < ctx->pos)
68138 return true;
68139
68140+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
68141+ return 0;
68142+
68143 if (unlikely(S_ISLNK(table->mode)))
68144 res = proc_sys_link_fill_cache(file, ctx, head, table);
68145 else
68146@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
68147 if (IS_ERR(head))
68148 return PTR_ERR(head);
68149
68150+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
68151+ return -ENOENT;
68152+
68153 generic_fillattr(inode, stat);
68154 if (table)
68155 stat->mode = (stat->mode & S_IFMT) | table->mode;
68156@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
68157 .llseek = generic_file_llseek,
68158 };
68159
68160-static const struct inode_operations proc_sys_inode_operations = {
68161+const struct inode_operations proc_sys_inode_operations = {
68162 .permission = proc_sys_permission,
68163 .setattr = proc_sys_setattr,
68164 .getattr = proc_sys_getattr,
68165 };
68166
68167-static const struct inode_operations proc_sys_dir_operations = {
68168+const struct inode_operations proc_sys_dir_operations = {
68169 .lookup = proc_sys_lookup,
68170 .permission = proc_sys_permission,
68171 .setattr = proc_sys_setattr,
68172@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
68173 static struct ctl_dir *new_dir(struct ctl_table_set *set,
68174 const char *name, int namelen)
68175 {
68176- struct ctl_table *table;
68177+ ctl_table_no_const *table;
68178 struct ctl_dir *new;
68179 struct ctl_node *node;
68180 char *new_name;
68181@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
68182 return NULL;
68183
68184 node = (struct ctl_node *)(new + 1);
68185- table = (struct ctl_table *)(node + 1);
68186+ table = (ctl_table_no_const *)(node + 1);
68187 new_name = (char *)(table + 2);
68188 memcpy(new_name, name, namelen);
68189 new_name[namelen] = '\0';
68190@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
68191 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
68192 struct ctl_table_root *link_root)
68193 {
68194- struct ctl_table *link_table, *entry, *link;
68195+ ctl_table_no_const *link_table, *link;
68196+ struct ctl_table *entry;
68197 struct ctl_table_header *links;
68198 struct ctl_node *node;
68199 char *link_name;
68200@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
68201 return NULL;
68202
68203 node = (struct ctl_node *)(links + 1);
68204- link_table = (struct ctl_table *)(node + nr_entries);
68205+ link_table = (ctl_table_no_const *)(node + nr_entries);
68206 link_name = (char *)&link_table[nr_entries + 1];
68207
68208 for (link = link_table, entry = table; entry->procname; link++, entry++) {
68209@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
68210 struct ctl_table_header ***subheader, struct ctl_table_set *set,
68211 struct ctl_table *table)
68212 {
68213- struct ctl_table *ctl_table_arg = NULL;
68214- struct ctl_table *entry, *files;
68215+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
68216+ struct ctl_table *entry;
68217 int nr_files = 0;
68218 int nr_dirs = 0;
68219 int err = -ENOMEM;
68220@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
68221 nr_files++;
68222 }
68223
68224- files = table;
68225 /* If there are mixed files and directories we need a new table */
68226 if (nr_dirs && nr_files) {
68227- struct ctl_table *new;
68228+ ctl_table_no_const *new;
68229 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
68230 GFP_KERNEL);
68231 if (!files)
68232@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
68233 /* Register everything except a directory full of subdirectories */
68234 if (nr_files || !nr_dirs) {
68235 struct ctl_table_header *header;
68236- header = __register_sysctl_table(set, path, files);
68237+ header = __register_sysctl_table(set, path, files ? files : table);
68238 if (!header) {
68239 kfree(ctl_table_arg);
68240 goto out;
68241diff --git a/fs/proc/root.c b/fs/proc/root.c
68242index 5dbadec..473af2f 100644
68243--- a/fs/proc/root.c
68244+++ b/fs/proc/root.c
68245@@ -185,7 +185,15 @@ void __init proc_root_init(void)
68246 proc_mkdir("openprom", NULL);
68247 #endif
68248 proc_tty_init();
68249+#ifdef CONFIG_GRKERNSEC_PROC_ADD
68250+#ifdef CONFIG_GRKERNSEC_PROC_USER
68251+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
68252+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68253+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
68254+#endif
68255+#else
68256 proc_mkdir("bus", NULL);
68257+#endif
68258 proc_sys_init();
68259 }
68260
68261diff --git a/fs/proc/stat.c b/fs/proc/stat.c
68262index bf2d03f..f058f9c 100644
68263--- a/fs/proc/stat.c
68264+++ b/fs/proc/stat.c
68265@@ -11,6 +11,7 @@
68266 #include <linux/irqnr.h>
68267 #include <linux/cputime.h>
68268 #include <linux/tick.h>
68269+#include <linux/grsecurity.h>
68270
68271 #ifndef arch_irq_stat_cpu
68272 #define arch_irq_stat_cpu(cpu) 0
68273@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
68274 u64 sum_softirq = 0;
68275 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
68276 struct timespec boottime;
68277+ int unrestricted = 1;
68278+
68279+#ifdef CONFIG_GRKERNSEC_PROC_ADD
68280+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68281+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
68282+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
68283+ && !in_group_p(grsec_proc_gid)
68284+#endif
68285+ )
68286+ unrestricted = 0;
68287+#endif
68288+#endif
68289
68290 user = nice = system = idle = iowait =
68291 irq = softirq = steal = 0;
68292@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v)
68293 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
68294 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
68295 idle += get_idle_time(i);
68296- iowait += get_iowait_time(i);
68297- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
68298- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
68299- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
68300- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
68301- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
68302- sum += kstat_cpu_irqs_sum(i);
68303- sum += arch_irq_stat_cpu(i);
68304+ if (unrestricted) {
68305+ iowait += get_iowait_time(i);
68306+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
68307+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
68308+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
68309+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
68310+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
68311+ sum += kstat_cpu_irqs_sum(i);
68312+ sum += arch_irq_stat_cpu(i);
68313+ for (j = 0; j < NR_SOFTIRQS; j++) {
68314+ unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
68315
68316- for (j = 0; j < NR_SOFTIRQS; j++) {
68317- unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
68318-
68319- per_softirq_sums[j] += softirq_stat;
68320- sum_softirq += softirq_stat;
68321+ per_softirq_sums[j] += softirq_stat;
68322+ sum_softirq += softirq_stat;
68323+ }
68324 }
68325 }
68326- sum += arch_irq_stat();
68327+ if (unrestricted)
68328+ sum += arch_irq_stat();
68329
68330 seq_puts(p, "cpu ");
68331 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
68332@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v)
68333 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
68334 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
68335 idle = get_idle_time(i);
68336- iowait = get_iowait_time(i);
68337- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
68338- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
68339- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
68340- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
68341- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
68342+ if (unrestricted) {
68343+ iowait = get_iowait_time(i);
68344+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
68345+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
68346+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
68347+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
68348+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
68349+ }
68350 seq_printf(p, "cpu%d", i);
68351 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
68352 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
68353@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
68354
68355 /* sum again ? it could be updated? */
68356 for_each_irq_nr(j)
68357- seq_put_decimal_ull(p, ' ', kstat_irqs(j));
68358+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs(j) : 0ULL);
68359
68360 seq_printf(p,
68361 "\nctxt %llu\n"
68362@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
68363 "processes %lu\n"
68364 "procs_running %lu\n"
68365 "procs_blocked %lu\n",
68366- nr_context_switches(),
68367+ unrestricted ? nr_context_switches() : 0ULL,
68368 (unsigned long)jif,
68369- total_forks,
68370- nr_running(),
68371- nr_iowait());
68372+ unrestricted ? total_forks : 0UL,
68373+ unrestricted ? nr_running() : 0UL,
68374+ unrestricted ? nr_iowait() : 0UL);
68375
68376 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
68377
68378diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
68379index cfa63ee..fce112e 100644
68380--- a/fs/proc/task_mmu.c
68381+++ b/fs/proc/task_mmu.c
68382@@ -13,12 +13,19 @@
68383 #include <linux/swap.h>
68384 #include <linux/swapops.h>
68385 #include <linux/mmu_notifier.h>
68386+#include <linux/grsecurity.h>
68387
68388 #include <asm/elf.h>
68389 #include <asm/uaccess.h>
68390 #include <asm/tlbflush.h>
68391 #include "internal.h"
68392
68393+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68394+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
68395+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
68396+ _mm->pax_flags & MF_PAX_SEGMEXEC))
68397+#endif
68398+
68399 void task_mem(struct seq_file *m, struct mm_struct *mm)
68400 {
68401 unsigned long data, text, lib, swap;
68402@@ -54,8 +61,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
68403 "VmExe:\t%8lu kB\n"
68404 "VmLib:\t%8lu kB\n"
68405 "VmPTE:\t%8lu kB\n"
68406- "VmSwap:\t%8lu kB\n",
68407- hiwater_vm << (PAGE_SHIFT-10),
68408+ "VmSwap:\t%8lu kB\n"
68409+
68410+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
68411+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
68412+#endif
68413+
68414+ ,hiwater_vm << (PAGE_SHIFT-10),
68415 total_vm << (PAGE_SHIFT-10),
68416 mm->locked_vm << (PAGE_SHIFT-10),
68417 mm->pinned_vm << (PAGE_SHIFT-10),
68418@@ -65,7 +77,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
68419 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
68420 (PTRS_PER_PTE * sizeof(pte_t) *
68421 atomic_long_read(&mm->nr_ptes)) >> 10,
68422- swap << (PAGE_SHIFT-10));
68423+ swap << (PAGE_SHIFT-10)
68424+
68425+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
68426+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68427+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
68428+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
68429+#else
68430+ , mm->context.user_cs_base
68431+ , mm->context.user_cs_limit
68432+#endif
68433+#endif
68434+
68435+ );
68436 }
68437
68438 unsigned long task_vsize(struct mm_struct *mm)
68439@@ -271,13 +295,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
68440 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
68441 }
68442
68443- /* We don't show the stack guard page in /proc/maps */
68444+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68445+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
68446+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
68447+#else
68448 start = vma->vm_start;
68449- if (stack_guard_page_start(vma, start))
68450- start += PAGE_SIZE;
68451 end = vma->vm_end;
68452- if (stack_guard_page_end(vma, end))
68453- end -= PAGE_SIZE;
68454+#endif
68455
68456 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
68457 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
68458@@ -287,7 +311,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
68459 flags & VM_WRITE ? 'w' : '-',
68460 flags & VM_EXEC ? 'x' : '-',
68461 flags & VM_MAYSHARE ? 's' : 'p',
68462+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68463+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
68464+#else
68465 pgoff,
68466+#endif
68467 MAJOR(dev), MINOR(dev), ino);
68468
68469 /*
68470@@ -296,7 +324,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
68471 */
68472 if (file) {
68473 seq_pad(m, ' ');
68474- seq_path(m, &file->f_path, "\n");
68475+ seq_path(m, &file->f_path, "\n\\");
68476 goto done;
68477 }
68478
68479@@ -328,8 +356,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
68480 * Thread stack in /proc/PID/task/TID/maps or
68481 * the main process stack.
68482 */
68483- if (!is_pid || (vma->vm_start <= mm->start_stack &&
68484- vma->vm_end >= mm->start_stack)) {
68485+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
68486+ (vma->vm_start <= mm->start_stack &&
68487+ vma->vm_end >= mm->start_stack)) {
68488 name = "[stack]";
68489 } else {
68490 /* Thread stack in /proc/PID/maps */
68491@@ -353,6 +382,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
68492 struct proc_maps_private *priv = m->private;
68493 struct task_struct *task = priv->task;
68494
68495+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68496+ if (current->exec_id != m->exec_id) {
68497+ gr_log_badprocpid("maps");
68498+ return 0;
68499+ }
68500+#endif
68501+
68502 show_map_vma(m, vma, is_pid);
68503
68504 if (m->count < m->size) /* vma is copied successfully */
68505@@ -593,12 +629,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
68506 .private = &mss,
68507 };
68508
68509+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68510+ if (current->exec_id != m->exec_id) {
68511+ gr_log_badprocpid("smaps");
68512+ return 0;
68513+ }
68514+#endif
68515 memset(&mss, 0, sizeof mss);
68516- mss.vma = vma;
68517- /* mmap_sem is held in m_start */
68518- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
68519- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
68520-
68521+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68522+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
68523+#endif
68524+ mss.vma = vma;
68525+ /* mmap_sem is held in m_start */
68526+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
68527+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
68528+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68529+ }
68530+#endif
68531 show_map_vma(m, vma, is_pid);
68532
68533 seq_printf(m,
68534@@ -616,7 +663,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
68535 "KernelPageSize: %8lu kB\n"
68536 "MMUPageSize: %8lu kB\n"
68537 "Locked: %8lu kB\n",
68538+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68539+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
68540+#else
68541 (vma->vm_end - vma->vm_start) >> 10,
68542+#endif
68543 mss.resident >> 10,
68544 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
68545 mss.shared_clean >> 10,
68546@@ -1398,6 +1449,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
68547 char buffer[64];
68548 int nid;
68549
68550+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68551+ if (current->exec_id != m->exec_id) {
68552+ gr_log_badprocpid("numa_maps");
68553+ return 0;
68554+ }
68555+#endif
68556+
68557 if (!mm)
68558 return 0;
68559
68560@@ -1415,11 +1473,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
68561 mpol_to_str(buffer, sizeof(buffer), pol);
68562 mpol_cond_put(pol);
68563
68564+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68565+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
68566+#else
68567 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
68568+#endif
68569
68570 if (file) {
68571 seq_puts(m, " file=");
68572- seq_path(m, &file->f_path, "\n\t= ");
68573+ seq_path(m, &file->f_path, "\n\t\\= ");
68574 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
68575 seq_puts(m, " heap");
68576 } else {
68577diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
68578index 678455d..ebd3245 100644
68579--- a/fs/proc/task_nommu.c
68580+++ b/fs/proc/task_nommu.c
68581@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
68582 else
68583 bytes += kobjsize(mm);
68584
68585- if (current->fs && current->fs->users > 1)
68586+ if (current->fs && atomic_read(&current->fs->users) > 1)
68587 sbytes += kobjsize(current->fs);
68588 else
68589 bytes += kobjsize(current->fs);
68590@@ -161,7 +161,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
68591
68592 if (file) {
68593 seq_pad(m, ' ');
68594- seq_path(m, &file->f_path, "");
68595+ seq_path(m, &file->f_path, "\n\\");
68596 } else if (mm) {
68597 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
68598
68599diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
68600index 382aa89..6b03974 100644
68601--- a/fs/proc/vmcore.c
68602+++ b/fs/proc/vmcore.c
68603@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
68604 nr_bytes = count;
68605
68606 /* If pfn is not ram, return zeros for sparse dump files */
68607- if (pfn_is_ram(pfn) == 0)
68608- memset(buf, 0, nr_bytes);
68609- else {
68610+ if (pfn_is_ram(pfn) == 0) {
68611+ if (userbuf) {
68612+ if (clear_user((char __force_user *)buf, nr_bytes))
68613+ return -EFAULT;
68614+ } else
68615+ memset(buf, 0, nr_bytes);
68616+ } else {
68617 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
68618 offset, userbuf);
68619 if (tmp < 0)
68620@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
68621 static int copy_to(void *target, void *src, size_t size, int userbuf)
68622 {
68623 if (userbuf) {
68624- if (copy_to_user((char __user *) target, src, size))
68625+ if (copy_to_user((char __force_user *) target, src, size))
68626 return -EFAULT;
68627 } else {
68628 memcpy(target, src, size);
68629@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
68630 if (*fpos < m->offset + m->size) {
68631 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
68632 start = m->paddr + *fpos - m->offset;
68633- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
68634+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
68635 if (tmp < 0)
68636 return tmp;
68637 buflen -= tsz;
68638@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
68639 static ssize_t read_vmcore(struct file *file, char __user *buffer,
68640 size_t buflen, loff_t *fpos)
68641 {
68642- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
68643+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
68644 }
68645
68646 /*
68647diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
68648index b00fcc9..e0c6381 100644
68649--- a/fs/qnx6/qnx6.h
68650+++ b/fs/qnx6/qnx6.h
68651@@ -74,7 +74,7 @@ enum {
68652 BYTESEX_BE,
68653 };
68654
68655-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
68656+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
68657 {
68658 if (sbi->s_bytesex == BYTESEX_LE)
68659 return le64_to_cpu((__force __le64)n);
68660@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
68661 return (__force __fs64)cpu_to_be64(n);
68662 }
68663
68664-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
68665+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
68666 {
68667 if (sbi->s_bytesex == BYTESEX_LE)
68668 return le32_to_cpu((__force __le32)n);
68669diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
68670index 72d2917..c917c12 100644
68671--- a/fs/quota/netlink.c
68672+++ b/fs/quota/netlink.c
68673@@ -45,7 +45,7 @@ static struct genl_family quota_genl_family = {
68674 void quota_send_warning(struct kqid qid, dev_t dev,
68675 const char warntype)
68676 {
68677- static atomic_t seq;
68678+ static atomic_unchecked_t seq;
68679 struct sk_buff *skb;
68680 void *msg_head;
68681 int ret;
68682@@ -61,7 +61,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
68683 "VFS: Not enough memory to send quota warning.\n");
68684 return;
68685 }
68686- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
68687+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
68688 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
68689 if (!msg_head) {
68690 printk(KERN_ERR
68691diff --git a/fs/read_write.c b/fs/read_write.c
68692index 009d854..16ce214 100644
68693--- a/fs/read_write.c
68694+++ b/fs/read_write.c
68695@@ -495,7 +495,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
68696
68697 old_fs = get_fs();
68698 set_fs(get_ds());
68699- p = (__force const char __user *)buf;
68700+ p = (const char __force_user *)buf;
68701 if (count > MAX_RW_COUNT)
68702 count = MAX_RW_COUNT;
68703 if (file->f_op->write)
68704diff --git a/fs/readdir.c b/fs/readdir.c
68705index 33fd922..e0d6094 100644
68706--- a/fs/readdir.c
68707+++ b/fs/readdir.c
68708@@ -18,6 +18,7 @@
68709 #include <linux/security.h>
68710 #include <linux/syscalls.h>
68711 #include <linux/unistd.h>
68712+#include <linux/namei.h>
68713
68714 #include <asm/uaccess.h>
68715
68716@@ -71,6 +72,7 @@ struct old_linux_dirent {
68717 struct readdir_callback {
68718 struct dir_context ctx;
68719 struct old_linux_dirent __user * dirent;
68720+ struct file * file;
68721 int result;
68722 };
68723
68724@@ -88,6 +90,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
68725 buf->result = -EOVERFLOW;
68726 return -EOVERFLOW;
68727 }
68728+
68729+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68730+ return 0;
68731+
68732 buf->result++;
68733 dirent = buf->dirent;
68734 if (!access_ok(VERIFY_WRITE, dirent,
68735@@ -119,6 +125,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
68736 if (!f.file)
68737 return -EBADF;
68738
68739+ buf.file = f.file;
68740 error = iterate_dir(f.file, &buf.ctx);
68741 if (buf.result)
68742 error = buf.result;
68743@@ -144,6 +151,7 @@ struct getdents_callback {
68744 struct dir_context ctx;
68745 struct linux_dirent __user * current_dir;
68746 struct linux_dirent __user * previous;
68747+ struct file * file;
68748 int count;
68749 int error;
68750 };
68751@@ -165,6 +173,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
68752 buf->error = -EOVERFLOW;
68753 return -EOVERFLOW;
68754 }
68755+
68756+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68757+ return 0;
68758+
68759 dirent = buf->previous;
68760 if (dirent) {
68761 if (__put_user(offset, &dirent->d_off))
68762@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
68763 if (!f.file)
68764 return -EBADF;
68765
68766+ buf.file = f.file;
68767 error = iterate_dir(f.file, &buf.ctx);
68768 if (error >= 0)
68769 error = buf.error;
68770@@ -228,6 +241,7 @@ struct getdents_callback64 {
68771 struct dir_context ctx;
68772 struct linux_dirent64 __user * current_dir;
68773 struct linux_dirent64 __user * previous;
68774+ struct file *file;
68775 int count;
68776 int error;
68777 };
68778@@ -243,6 +257,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
68779 buf->error = -EINVAL; /* only used if we fail.. */
68780 if (reclen > buf->count)
68781 return -EINVAL;
68782+
68783+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68784+ return 0;
68785+
68786 dirent = buf->previous;
68787 if (dirent) {
68788 if (__put_user(offset, &dirent->d_off))
68789@@ -290,6 +308,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
68790 if (!f.file)
68791 return -EBADF;
68792
68793+ buf.file = f.file;
68794 error = iterate_dir(f.file, &buf.ctx);
68795 if (error >= 0)
68796 error = buf.error;
68797diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
68798index 54fdf19..987862b 100644
68799--- a/fs/reiserfs/do_balan.c
68800+++ b/fs/reiserfs/do_balan.c
68801@@ -1872,7 +1872,7 @@ void do_balance(struct tree_balance *tb, struct item_head *ih,
68802 return;
68803 }
68804
68805- atomic_inc(&fs_generation(tb->tb_sb));
68806+ atomic_inc_unchecked(&fs_generation(tb->tb_sb));
68807 do_balance_starts(tb);
68808
68809 /*
68810diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
68811index cfaee91..b9d0d60 100644
68812--- a/fs/reiserfs/item_ops.c
68813+++ b/fs/reiserfs/item_ops.c
68814@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
68815 }
68816
68817 static struct item_operations errcatch_ops = {
68818- errcatch_bytes_number,
68819- errcatch_decrement_key,
68820- errcatch_is_left_mergeable,
68821- errcatch_print_item,
68822- errcatch_check_item,
68823+ .bytes_number = errcatch_bytes_number,
68824+ .decrement_key = errcatch_decrement_key,
68825+ .is_left_mergeable = errcatch_is_left_mergeable,
68826+ .print_item = errcatch_print_item,
68827+ .check_item = errcatch_check_item,
68828
68829- errcatch_create_vi,
68830- errcatch_check_left,
68831- errcatch_check_right,
68832- errcatch_part_size,
68833- errcatch_unit_num,
68834- errcatch_print_vi
68835+ .create_vi = errcatch_create_vi,
68836+ .check_left = errcatch_check_left,
68837+ .check_right = errcatch_check_right,
68838+ .part_size = errcatch_part_size,
68839+ .unit_num = errcatch_unit_num,
68840+ .print_vi = errcatch_print_vi
68841 };
68842
68843 #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
68844diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
68845index 02b0b7d..c85018b 100644
68846--- a/fs/reiserfs/procfs.c
68847+++ b/fs/reiserfs/procfs.c
68848@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
68849 "SMALL_TAILS " : "NO_TAILS ",
68850 replay_only(sb) ? "REPLAY_ONLY " : "",
68851 convert_reiserfs(sb) ? "CONV " : "",
68852- atomic_read(&r->s_generation_counter),
68853+ atomic_read_unchecked(&r->s_generation_counter),
68854 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
68855 SF(s_do_balance), SF(s_unneeded_left_neighbor),
68856 SF(s_good_search_by_key_reada), SF(s_bmaps),
68857diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
68858index bf53888..227f5ae 100644
68859--- a/fs/reiserfs/reiserfs.h
68860+++ b/fs/reiserfs/reiserfs.h
68861@@ -573,7 +573,7 @@ struct reiserfs_sb_info {
68862 /* Comment? -Hans */
68863 wait_queue_head_t s_wait;
68864 /* increased by one every time the tree gets re-balanced */
68865- atomic_t s_generation_counter;
68866+ atomic_unchecked_t s_generation_counter;
68867
68868 /* File system properties. Currently holds on-disk FS format */
68869 unsigned long s_properties;
68870@@ -2294,7 +2294,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
68871 #define REISERFS_USER_MEM 1 /* user memory mode */
68872
68873 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
68874-#define get_generation(s) atomic_read (&fs_generation(s))
68875+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
68876 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
68877 #define __fs_changed(gen,s) (gen != get_generation (s))
68878 #define fs_changed(gen,s) \
68879diff --git a/fs/select.c b/fs/select.c
68880index 467bb1c..cf9d65a 100644
68881--- a/fs/select.c
68882+++ b/fs/select.c
68883@@ -20,6 +20,7 @@
68884 #include <linux/export.h>
68885 #include <linux/slab.h>
68886 #include <linux/poll.h>
68887+#include <linux/security.h>
68888 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
68889 #include <linux/file.h>
68890 #include <linux/fdtable.h>
68891@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
68892 struct poll_list *walk = head;
68893 unsigned long todo = nfds;
68894
68895+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
68896 if (nfds > rlimit(RLIMIT_NOFILE))
68897 return -EINVAL;
68898
68899diff --git a/fs/seq_file.c b/fs/seq_file.c
68900index 3857b72..0b7281e 100644
68901--- a/fs/seq_file.c
68902+++ b/fs/seq_file.c
68903@@ -12,6 +12,8 @@
68904 #include <linux/slab.h>
68905 #include <linux/cred.h>
68906 #include <linux/mm.h>
68907+#include <linux/sched.h>
68908+#include <linux/grsecurity.h>
68909
68910 #include <asm/uaccess.h>
68911 #include <asm/page.h>
68912@@ -34,12 +36,7 @@ static void seq_set_overflow(struct seq_file *m)
68913
68914 static void *seq_buf_alloc(unsigned long size)
68915 {
68916- void *buf;
68917-
68918- buf = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
68919- if (!buf && size > PAGE_SIZE)
68920- buf = vmalloc(size);
68921- return buf;
68922+ return kmalloc(size, GFP_KERNEL | GFP_USERCOPY);
68923 }
68924
68925 /**
68926@@ -72,6 +69,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
68927 #ifdef CONFIG_USER_NS
68928 p->user_ns = file->f_cred->user_ns;
68929 #endif
68930+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68931+ p->exec_id = current->exec_id;
68932+#endif
68933
68934 /*
68935 * Wrappers around seq_open(e.g. swaps_open) need to be
68936@@ -94,6 +94,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
68937 }
68938 EXPORT_SYMBOL(seq_open);
68939
68940+
68941+int seq_open_restrict(struct file *file, const struct seq_operations *op)
68942+{
68943+ if (gr_proc_is_restricted())
68944+ return -EACCES;
68945+
68946+ return seq_open(file, op);
68947+}
68948+EXPORT_SYMBOL(seq_open_restrict);
68949+
68950 static int traverse(struct seq_file *m, loff_t offset)
68951 {
68952 loff_t pos = 0, index;
68953@@ -165,7 +175,7 @@ Eoverflow:
68954 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
68955 {
68956 struct seq_file *m = file->private_data;
68957- size_t copied = 0;
68958+ ssize_t copied = 0;
68959 loff_t pos;
68960 size_t n;
68961 void *p;
68962@@ -596,7 +606,7 @@ static void single_stop(struct seq_file *p, void *v)
68963 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
68964 void *data)
68965 {
68966- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
68967+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
68968 int res = -ENOMEM;
68969
68970 if (op) {
68971@@ -632,6 +642,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
68972 }
68973 EXPORT_SYMBOL(single_open_size);
68974
68975+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
68976+ void *data)
68977+{
68978+ if (gr_proc_is_restricted())
68979+ return -EACCES;
68980+
68981+ return single_open(file, show, data);
68982+}
68983+EXPORT_SYMBOL(single_open_restrict);
68984+
68985+
68986 int single_release(struct inode *inode, struct file *file)
68987 {
68988 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
68989diff --git a/fs/splice.c b/fs/splice.c
68990index f5cb9ba..8ddb1e9 100644
68991--- a/fs/splice.c
68992+++ b/fs/splice.c
68993@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
68994 pipe_lock(pipe);
68995
68996 for (;;) {
68997- if (!pipe->readers) {
68998+ if (!atomic_read(&pipe->readers)) {
68999 send_sig(SIGPIPE, current, 0);
69000 if (!ret)
69001 ret = -EPIPE;
69002@@ -216,7 +216,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
69003 page_nr++;
69004 ret += buf->len;
69005
69006- if (pipe->files)
69007+ if (atomic_read(&pipe->files))
69008 do_wakeup = 1;
69009
69010 if (!--spd->nr_pages)
69011@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
69012 do_wakeup = 0;
69013 }
69014
69015- pipe->waiting_writers++;
69016+ atomic_inc(&pipe->waiting_writers);
69017 pipe_wait(pipe);
69018- pipe->waiting_writers--;
69019+ atomic_dec(&pipe->waiting_writers);
69020 }
69021
69022 pipe_unlock(pipe);
69023@@ -576,7 +576,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
69024 old_fs = get_fs();
69025 set_fs(get_ds());
69026 /* The cast to a user pointer is valid due to the set_fs() */
69027- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
69028+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
69029 set_fs(old_fs);
69030
69031 return res;
69032@@ -591,7 +591,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
69033 old_fs = get_fs();
69034 set_fs(get_ds());
69035 /* The cast to a user pointer is valid due to the set_fs() */
69036- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
69037+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
69038 set_fs(old_fs);
69039
69040 return res;
69041@@ -644,7 +644,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
69042 goto err;
69043
69044 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
69045- vec[i].iov_base = (void __user *) page_address(page);
69046+ vec[i].iov_base = (void __force_user *) page_address(page);
69047 vec[i].iov_len = this_len;
69048 spd.pages[i] = page;
69049 spd.nr_pages++;
69050@@ -783,7 +783,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
69051 ops->release(pipe, buf);
69052 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
69053 pipe->nrbufs--;
69054- if (pipe->files)
69055+ if (atomic_read(&pipe->files))
69056 sd->need_wakeup = true;
69057 }
69058
69059@@ -807,10 +807,10 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
69060 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
69061 {
69062 while (!pipe->nrbufs) {
69063- if (!pipe->writers)
69064+ if (!atomic_read(&pipe->writers))
69065 return 0;
69066
69067- if (!pipe->waiting_writers && sd->num_spliced)
69068+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
69069 return 0;
69070
69071 if (sd->flags & SPLICE_F_NONBLOCK)
69072@@ -1040,7 +1040,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
69073 ops->release(pipe, buf);
69074 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
69075 pipe->nrbufs--;
69076- if (pipe->files)
69077+ if (atomic_read(&pipe->files))
69078 sd.need_wakeup = true;
69079 } else {
69080 buf->offset += ret;
69081@@ -1200,7 +1200,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
69082 * out of the pipe right after the splice_to_pipe(). So set
69083 * PIPE_READERS appropriately.
69084 */
69085- pipe->readers = 1;
69086+ atomic_set(&pipe->readers, 1);
69087
69088 current->splice_pipe = pipe;
69089 }
69090@@ -1496,6 +1496,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
69091
69092 partial[buffers].offset = off;
69093 partial[buffers].len = plen;
69094+ partial[buffers].private = 0;
69095
69096 off = 0;
69097 len -= plen;
69098@@ -1732,9 +1733,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
69099 ret = -ERESTARTSYS;
69100 break;
69101 }
69102- if (!pipe->writers)
69103+ if (!atomic_read(&pipe->writers))
69104 break;
69105- if (!pipe->waiting_writers) {
69106+ if (!atomic_read(&pipe->waiting_writers)) {
69107 if (flags & SPLICE_F_NONBLOCK) {
69108 ret = -EAGAIN;
69109 break;
69110@@ -1766,7 +1767,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
69111 pipe_lock(pipe);
69112
69113 while (pipe->nrbufs >= pipe->buffers) {
69114- if (!pipe->readers) {
69115+ if (!atomic_read(&pipe->readers)) {
69116 send_sig(SIGPIPE, current, 0);
69117 ret = -EPIPE;
69118 break;
69119@@ -1779,9 +1780,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
69120 ret = -ERESTARTSYS;
69121 break;
69122 }
69123- pipe->waiting_writers++;
69124+ atomic_inc(&pipe->waiting_writers);
69125 pipe_wait(pipe);
69126- pipe->waiting_writers--;
69127+ atomic_dec(&pipe->waiting_writers);
69128 }
69129
69130 pipe_unlock(pipe);
69131@@ -1817,14 +1818,14 @@ retry:
69132 pipe_double_lock(ipipe, opipe);
69133
69134 do {
69135- if (!opipe->readers) {
69136+ if (!atomic_read(&opipe->readers)) {
69137 send_sig(SIGPIPE, current, 0);
69138 if (!ret)
69139 ret = -EPIPE;
69140 break;
69141 }
69142
69143- if (!ipipe->nrbufs && !ipipe->writers)
69144+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
69145 break;
69146
69147 /*
69148@@ -1921,7 +1922,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
69149 pipe_double_lock(ipipe, opipe);
69150
69151 do {
69152- if (!opipe->readers) {
69153+ if (!atomic_read(&opipe->readers)) {
69154 send_sig(SIGPIPE, current, 0);
69155 if (!ret)
69156 ret = -EPIPE;
69157@@ -1966,7 +1967,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
69158 * return EAGAIN if we have the potential of some data in the
69159 * future, otherwise just return 0
69160 */
69161- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
69162+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
69163 ret = -EAGAIN;
69164
69165 pipe_unlock(ipipe);
69166diff --git a/fs/stat.c b/fs/stat.c
69167index ae0c3ce..9ee641c 100644
69168--- a/fs/stat.c
69169+++ b/fs/stat.c
69170@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
69171 stat->gid = inode->i_gid;
69172 stat->rdev = inode->i_rdev;
69173 stat->size = i_size_read(inode);
69174- stat->atime = inode->i_atime;
69175- stat->mtime = inode->i_mtime;
69176+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
69177+ stat->atime = inode->i_ctime;
69178+ stat->mtime = inode->i_ctime;
69179+ } else {
69180+ stat->atime = inode->i_atime;
69181+ stat->mtime = inode->i_mtime;
69182+ }
69183 stat->ctime = inode->i_ctime;
69184 stat->blksize = (1 << inode->i_blkbits);
69185 stat->blocks = inode->i_blocks;
69186@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
69187 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
69188 {
69189 struct inode *inode = path->dentry->d_inode;
69190+ int retval;
69191
69192- if (inode->i_op->getattr)
69193- return inode->i_op->getattr(path->mnt, path->dentry, stat);
69194+ if (inode->i_op->getattr) {
69195+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
69196+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
69197+ stat->atime = stat->ctime;
69198+ stat->mtime = stat->ctime;
69199+ }
69200+ return retval;
69201+ }
69202
69203 generic_fillattr(inode, stat);
69204 return 0;
69205diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
69206index 0b45ff4..847de5b 100644
69207--- a/fs/sysfs/dir.c
69208+++ b/fs/sysfs/dir.c
69209@@ -41,9 +41,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
69210 int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
69211 {
69212 struct kernfs_node *parent, *kn;
69213+ const char *name;
69214+ umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
69215+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
69216+ const char *parent_name;
69217+#endif
69218
69219 BUG_ON(!kobj);
69220
69221+ name = kobject_name(kobj);
69222+
69223 if (kobj->parent)
69224 parent = kobj->parent->sd;
69225 else
69226@@ -52,11 +59,22 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
69227 if (!parent)
69228 return -ENOENT;
69229
69230- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
69231- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
69232+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
69233+ parent_name = parent->name;
69234+ mode = S_IRWXU;
69235+
69236+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
69237+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
69238+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
69239+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
69240+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
69241+#endif
69242+
69243+ kn = kernfs_create_dir_ns(parent, name,
69244+ mode, kobj, ns);
69245 if (IS_ERR(kn)) {
69246 if (PTR_ERR(kn) == -EEXIST)
69247- sysfs_warn_dup(parent, kobject_name(kobj));
69248+ sysfs_warn_dup(parent, name);
69249 return PTR_ERR(kn);
69250 }
69251
69252diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
69253index 69d4889..a810bd4 100644
69254--- a/fs/sysv/sysv.h
69255+++ b/fs/sysv/sysv.h
69256@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
69257 #endif
69258 }
69259
69260-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
69261+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
69262 {
69263 if (sbi->s_bytesex == BYTESEX_PDP)
69264 return PDP_swab((__force __u32)n);
69265diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
69266index 2290d58..7791371 100644
69267--- a/fs/ubifs/io.c
69268+++ b/fs/ubifs/io.c
69269@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
69270 return err;
69271 }
69272
69273-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
69274+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
69275 {
69276 int err;
69277
69278diff --git a/fs/udf/inode.c b/fs/udf/inode.c
69279index 236cd48..a6a4053 100644
69280--- a/fs/udf/inode.c
69281+++ b/fs/udf/inode.c
69282@@ -51,7 +51,6 @@ MODULE_LICENSE("GPL");
69283
69284 static umode_t udf_convert_permissions(struct fileEntry *);
69285 static int udf_update_inode(struct inode *, int);
69286-static void udf_fill_inode(struct inode *, struct buffer_head *);
69287 static int udf_sync_inode(struct inode *inode);
69288 static int udf_alloc_i_data(struct inode *inode, size_t size);
69289 static sector_t inode_getblk(struct inode *, sector_t, int *, int *);
69290@@ -1271,13 +1270,25 @@ update_time:
69291 return 0;
69292 }
69293
69294+/*
69295+ * Maximum length of linked list formed by ICB hierarchy. The chosen number is
69296+ * arbitrary - just that we hopefully don't limit any real use of rewritten
69297+ * inode on write-once media but avoid looping for too long on corrupted media.
69298+ */
69299+#define UDF_MAX_ICB_NESTING 1024
69300+
69301 static void __udf_read_inode(struct inode *inode)
69302 {
69303 struct buffer_head *bh = NULL;
69304 struct fileEntry *fe;
69305+ struct extendedFileEntry *efe;
69306 uint16_t ident;
69307 struct udf_inode_info *iinfo = UDF_I(inode);
69308+ struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
69309+ unsigned int link_count;
69310+ unsigned int indirections = 0;
69311
69312+reread:
69313 /*
69314 * Set defaults, but the inode is still incomplete!
69315 * Note: get_new_inode() sets the following on a new inode:
69316@@ -1307,6 +1318,7 @@ static void __udf_read_inode(struct inode *inode)
69317 }
69318
69319 fe = (struct fileEntry *)bh->b_data;
69320+ efe = (struct extendedFileEntry *)bh->b_data;
69321
69322 if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
69323 struct buffer_head *ibh;
69324@@ -1314,28 +1326,26 @@ static void __udf_read_inode(struct inode *inode)
69325 ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1,
69326 &ident);
69327 if (ident == TAG_IDENT_IE && ibh) {
69328- struct buffer_head *nbh = NULL;
69329 struct kernel_lb_addr loc;
69330 struct indirectEntry *ie;
69331
69332 ie = (struct indirectEntry *)ibh->b_data;
69333 loc = lelb_to_cpu(ie->indirectICB.extLocation);
69334
69335- if (ie->indirectICB.extLength &&
69336- (nbh = udf_read_ptagged(inode->i_sb, &loc, 0,
69337- &ident))) {
69338- if (ident == TAG_IDENT_FE ||
69339- ident == TAG_IDENT_EFE) {
69340- memcpy(&iinfo->i_location,
69341- &loc,
69342- sizeof(struct kernel_lb_addr));
69343- brelse(bh);
69344- brelse(ibh);
69345- brelse(nbh);
69346- __udf_read_inode(inode);
69347+ if (ie->indirectICB.extLength) {
69348+ brelse(bh);
69349+ brelse(ibh);
69350+ memcpy(&iinfo->i_location, &loc,
69351+ sizeof(struct kernel_lb_addr));
69352+ if (++indirections > UDF_MAX_ICB_NESTING) {
69353+ udf_err(inode->i_sb,
69354+ "too many ICBs in ICB hierarchy"
69355+ " (max %d supported)\n",
69356+ UDF_MAX_ICB_NESTING);
69357+ make_bad_inode(inode);
69358 return;
69359 }
69360- brelse(nbh);
69361+ goto reread;
69362 }
69363 }
69364 brelse(ibh);
69365@@ -1346,22 +1356,6 @@ static void __udf_read_inode(struct inode *inode)
69366 make_bad_inode(inode);
69367 return;
69368 }
69369- udf_fill_inode(inode, bh);
69370-
69371- brelse(bh);
69372-}
69373-
69374-static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
69375-{
69376- struct fileEntry *fe;
69377- struct extendedFileEntry *efe;
69378- struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
69379- struct udf_inode_info *iinfo = UDF_I(inode);
69380- unsigned int link_count;
69381-
69382- fe = (struct fileEntry *)bh->b_data;
69383- efe = (struct extendedFileEntry *)bh->b_data;
69384-
69385 if (fe->icbTag.strategyType == cpu_to_le16(4))
69386 iinfo->i_strat4096 = 0;
69387 else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
69388@@ -1551,6 +1545,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
69389 } else
69390 make_bad_inode(inode);
69391 }
69392+ brelse(bh);
69393 }
69394
69395 static int udf_alloc_i_data(struct inode *inode, size_t size)
69396@@ -1664,7 +1659,7 @@ static int udf_update_inode(struct inode *inode, int do_sync)
69397 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
69398 fe->permissions = cpu_to_le32(udfperms);
69399
69400- if (S_ISDIR(inode->i_mode))
69401+ if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0)
69402 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
69403 else
69404 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
69405diff --git a/fs/udf/misc.c b/fs/udf/misc.c
69406index c175b4d..8f36a16 100644
69407--- a/fs/udf/misc.c
69408+++ b/fs/udf/misc.c
69409@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
69410
69411 u8 udf_tag_checksum(const struct tag *t)
69412 {
69413- u8 *data = (u8 *)t;
69414+ const u8 *data = (const u8 *)t;
69415 u8 checksum = 0;
69416 int i;
69417 for (i = 0; i < sizeof(struct tag); ++i)
69418diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
69419index 8d974c4..b82f6ec 100644
69420--- a/fs/ufs/swab.h
69421+++ b/fs/ufs/swab.h
69422@@ -22,7 +22,7 @@ enum {
69423 BYTESEX_BE
69424 };
69425
69426-static inline u64
69427+static inline u64 __intentional_overflow(-1)
69428 fs64_to_cpu(struct super_block *sbp, __fs64 n)
69429 {
69430 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
69431@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
69432 return (__force __fs64)cpu_to_be64(n);
69433 }
69434
69435-static inline u32
69436+static inline u32 __intentional_overflow(-1)
69437 fs32_to_cpu(struct super_block *sbp, __fs32 n)
69438 {
69439 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
69440diff --git a/fs/utimes.c b/fs/utimes.c
69441index aa138d6..5f3a811 100644
69442--- a/fs/utimes.c
69443+++ b/fs/utimes.c
69444@@ -1,6 +1,7 @@
69445 #include <linux/compiler.h>
69446 #include <linux/file.h>
69447 #include <linux/fs.h>
69448+#include <linux/security.h>
69449 #include <linux/linkage.h>
69450 #include <linux/mount.h>
69451 #include <linux/namei.h>
69452@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
69453 }
69454 }
69455 retry_deleg:
69456+
69457+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
69458+ error = -EACCES;
69459+ goto mnt_drop_write_and_out;
69460+ }
69461+
69462 mutex_lock(&inode->i_mutex);
69463 error = notify_change(path->dentry, &newattrs, &delegated_inode);
69464 mutex_unlock(&inode->i_mutex);
69465diff --git a/fs/xattr.c b/fs/xattr.c
69466index c69e6d4..cc56af5 100644
69467--- a/fs/xattr.c
69468+++ b/fs/xattr.c
69469@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
69470 return rc;
69471 }
69472
69473+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
69474+ssize_t
69475+pax_getxattr(struct dentry *dentry, void *value, size_t size)
69476+{
69477+ struct inode *inode = dentry->d_inode;
69478+ ssize_t error;
69479+
69480+ error = inode_permission(inode, MAY_EXEC);
69481+ if (error)
69482+ return error;
69483+
69484+ if (inode->i_op->getxattr)
69485+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
69486+ else
69487+ error = -EOPNOTSUPP;
69488+
69489+ return error;
69490+}
69491+EXPORT_SYMBOL(pax_getxattr);
69492+#endif
69493+
69494 ssize_t
69495 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
69496 {
69497@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
69498 * Extended attribute SET operations
69499 */
69500 static long
69501-setxattr(struct dentry *d, const char __user *name, const void __user *value,
69502+setxattr(struct path *path, const char __user *name, const void __user *value,
69503 size_t size, int flags)
69504 {
69505 int error;
69506@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
69507 posix_acl_fix_xattr_from_user(kvalue, size);
69508 }
69509
69510- error = vfs_setxattr(d, kname, kvalue, size, flags);
69511+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
69512+ error = -EACCES;
69513+ goto out;
69514+ }
69515+
69516+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
69517 out:
69518 if (vvalue)
69519 vfree(vvalue);
69520@@ -377,7 +403,7 @@ retry:
69521 return error;
69522 error = mnt_want_write(path.mnt);
69523 if (!error) {
69524- error = setxattr(path.dentry, name, value, size, flags);
69525+ error = setxattr(&path, name, value, size, flags);
69526 mnt_drop_write(path.mnt);
69527 }
69528 path_put(&path);
69529@@ -401,7 +427,7 @@ retry:
69530 return error;
69531 error = mnt_want_write(path.mnt);
69532 if (!error) {
69533- error = setxattr(path.dentry, name, value, size, flags);
69534+ error = setxattr(&path, name, value, size, flags);
69535 mnt_drop_write(path.mnt);
69536 }
69537 path_put(&path);
69538@@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
69539 const void __user *,value, size_t, size, int, flags)
69540 {
69541 struct fd f = fdget(fd);
69542- struct dentry *dentry;
69543 int error = -EBADF;
69544
69545 if (!f.file)
69546 return error;
69547- dentry = f.file->f_path.dentry;
69548- audit_inode(NULL, dentry, 0);
69549+ audit_inode(NULL, f.file->f_path.dentry, 0);
69550 error = mnt_want_write_file(f.file);
69551 if (!error) {
69552- error = setxattr(dentry, name, value, size, flags);
69553+ error = setxattr(&f.file->f_path, name, value, size, flags);
69554 mnt_drop_write_file(f.file);
69555 }
69556 fdput(f);
69557@@ -626,7 +650,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
69558 * Extended attribute REMOVE operations
69559 */
69560 static long
69561-removexattr(struct dentry *d, const char __user *name)
69562+removexattr(struct path *path, const char __user *name)
69563 {
69564 int error;
69565 char kname[XATTR_NAME_MAX + 1];
69566@@ -637,7 +661,10 @@ removexattr(struct dentry *d, const char __user *name)
69567 if (error < 0)
69568 return error;
69569
69570- return vfs_removexattr(d, kname);
69571+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
69572+ return -EACCES;
69573+
69574+ return vfs_removexattr(path->dentry, kname);
69575 }
69576
69577 SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
69578@@ -652,7 +679,7 @@ retry:
69579 return error;
69580 error = mnt_want_write(path.mnt);
69581 if (!error) {
69582- error = removexattr(path.dentry, name);
69583+ error = removexattr(&path, name);
69584 mnt_drop_write(path.mnt);
69585 }
69586 path_put(&path);
69587@@ -675,7 +702,7 @@ retry:
69588 return error;
69589 error = mnt_want_write(path.mnt);
69590 if (!error) {
69591- error = removexattr(path.dentry, name);
69592+ error = removexattr(&path, name);
69593 mnt_drop_write(path.mnt);
69594 }
69595 path_put(&path);
69596@@ -689,16 +716,16 @@ retry:
69597 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
69598 {
69599 struct fd f = fdget(fd);
69600- struct dentry *dentry;
69601+ struct path *path;
69602 int error = -EBADF;
69603
69604 if (!f.file)
69605 return error;
69606- dentry = f.file->f_path.dentry;
69607- audit_inode(NULL, dentry, 0);
69608+ path = &f.file->f_path;
69609+ audit_inode(NULL, path->dentry, 0);
69610 error = mnt_want_write_file(f.file);
69611 if (!error) {
69612- error = removexattr(dentry, name);
69613+ error = removexattr(path, name);
69614 mnt_drop_write_file(f.file);
69615 }
69616 fdput(f);
69617diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
69618index 75c3fe5..b0f6bbe 100644
69619--- a/fs/xfs/xfs_bmap.c
69620+++ b/fs/xfs/xfs_bmap.c
69621@@ -583,7 +583,7 @@ xfs_bmap_validate_ret(
69622
69623 #else
69624 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
69625-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
69626+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
69627 #endif /* DEBUG */
69628
69629 /*
69630diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
69631index 48e99af..54ebae3 100644
69632--- a/fs/xfs/xfs_dir2_readdir.c
69633+++ b/fs/xfs/xfs_dir2_readdir.c
69634@@ -159,7 +159,12 @@ xfs_dir2_sf_getdents(
69635 ino = dp->d_ops->sf_get_ino(sfp, sfep);
69636 filetype = dp->d_ops->sf_get_ftype(sfep);
69637 ctx->pos = off & 0x7fffffff;
69638- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
69639+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
69640+ char name[sfep->namelen];
69641+ memcpy(name, sfep->name, sfep->namelen);
69642+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(dp->i_mount, filetype)))
69643+ return 0;
69644+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
69645 xfs_dir3_get_dtype(dp->i_mount, filetype)))
69646 return 0;
69647 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
69648diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
69649index 8bc1bbc..0d6911b 100644
69650--- a/fs/xfs/xfs_ioctl.c
69651+++ b/fs/xfs/xfs_ioctl.c
69652@@ -122,7 +122,7 @@ xfs_find_handle(
69653 }
69654
69655 error = -EFAULT;
69656- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
69657+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
69658 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
69659 goto out_put;
69660
69661diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
69662new file mode 100644
69663index 0000000..27cec32
69664--- /dev/null
69665+++ b/grsecurity/Kconfig
69666@@ -0,0 +1,1166 @@
69667+#
69668+# grecurity configuration
69669+#
69670+menu "Memory Protections"
69671+depends on GRKERNSEC
69672+
69673+config GRKERNSEC_KMEM
69674+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
69675+ default y if GRKERNSEC_CONFIG_AUTO
69676+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
69677+ help
69678+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
69679+ be written to or read from to modify or leak the contents of the running
69680+ kernel. /dev/port will also not be allowed to be opened, writing to
69681+ /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
69682+ If you have module support disabled, enabling this will close up several
69683+ ways that are currently used to insert malicious code into the running
69684+ kernel.
69685+
69686+ Even with this feature enabled, we still highly recommend that
69687+ you use the RBAC system, as it is still possible for an attacker to
69688+ modify the running kernel through other more obscure methods.
69689+
69690+ It is highly recommended that you say Y here if you meet all the
69691+ conditions above.
69692+
69693+config GRKERNSEC_VM86
69694+ bool "Restrict VM86 mode"
69695+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
69696+ depends on X86_32
69697+
69698+ help
69699+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
69700+ make use of a special execution mode on 32bit x86 processors called
69701+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
69702+ video cards and will still work with this option enabled. The purpose
69703+ of the option is to prevent exploitation of emulation errors in
69704+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
69705+ Nearly all users should be able to enable this option.
69706+
69707+config GRKERNSEC_IO
69708+ bool "Disable privileged I/O"
69709+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
69710+ depends on X86
69711+ select RTC_CLASS
69712+ select RTC_INTF_DEV
69713+ select RTC_DRV_CMOS
69714+
69715+ help
69716+ If you say Y here, all ioperm and iopl calls will return an error.
69717+ Ioperm and iopl can be used to modify the running kernel.
69718+ Unfortunately, some programs need this access to operate properly,
69719+ the most notable of which are XFree86 and hwclock. hwclock can be
69720+ remedied by having RTC support in the kernel, so real-time
69721+ clock support is enabled if this option is enabled, to ensure
69722+ that hwclock operates correctly. If hwclock still does not work,
69723+ either update udev or symlink /dev/rtc to /dev/rtc0.
69724+
69725+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
69726+ you may not be able to boot into a graphical environment with this
69727+ option enabled. In this case, you should use the RBAC system instead.
69728+
69729+config GRKERNSEC_JIT_HARDEN
69730+ bool "Harden BPF JIT against spray attacks"
69731+ default y if GRKERNSEC_CONFIG_AUTO
69732+ depends on BPF_JIT && X86
69733+ help
69734+ If you say Y here, the native code generated by the kernel's Berkeley
69735+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
69736+ attacks that attempt to fit attacker-beneficial instructions in
69737+ 32bit immediate fields of JIT-generated native instructions. The
69738+ attacker will generally aim to cause an unintended instruction sequence
69739+ of JIT-generated native code to execute by jumping into the middle of
69740+ a generated instruction. This feature effectively randomizes the 32bit
69741+ immediate constants present in the generated code to thwart such attacks.
69742+
69743+ If you're using KERNEXEC, it's recommended that you enable this option
69744+ to supplement the hardening of the kernel.
69745+
69746+config GRKERNSEC_PERF_HARDEN
69747+ bool "Disable unprivileged PERF_EVENTS usage by default"
69748+ default y if GRKERNSEC_CONFIG_AUTO
69749+ depends on PERF_EVENTS
69750+ help
69751+ If you say Y here, the range of acceptable values for the
69752+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
69753+ default to a new value: 3. When the sysctl is set to this value, no
69754+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
69755+
69756+ Though PERF_EVENTS can be used legitimately for performance monitoring
69757+ and low-level application profiling, it is forced on regardless of
69758+ configuration, has been at fault for several vulnerabilities, and
69759+ creates new opportunities for side channels and other information leaks.
69760+
69761+ This feature puts PERF_EVENTS into a secure default state and permits
69762+ the administrator to change out of it temporarily if unprivileged
69763+ application profiling is needed.
69764+
69765+config GRKERNSEC_RAND_THREADSTACK
69766+ bool "Insert random gaps between thread stacks"
69767+ default y if GRKERNSEC_CONFIG_AUTO
69768+ depends on PAX_RANDMMAP && !PPC
69769+ help
69770+ If you say Y here, a random-sized gap will be enforced between allocated
69771+ thread stacks. Glibc's NPTL and other threading libraries that
69772+ pass MAP_STACK to the kernel for thread stack allocation are supported.
69773+ The implementation currently provides 8 bits of entropy for the gap.
69774+
69775+ Many distributions do not compile threaded remote services with the
69776+ -fstack-check argument to GCC, causing the variable-sized stack-based
69777+ allocator, alloca(), to not probe the stack on allocation. This
69778+ permits an unbounded alloca() to skip over any guard page and potentially
69779+ modify another thread's stack reliably. An enforced random gap
69780+ reduces the reliability of such an attack and increases the chance
69781+ that such a read/write to another thread's stack instead lands in
69782+ an unmapped area, causing a crash and triggering grsecurity's
69783+ anti-bruteforcing logic.
69784+
69785+config GRKERNSEC_PROC_MEMMAP
69786+ bool "Harden ASLR against information leaks and entropy reduction"
69787+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
69788+ depends on PAX_NOEXEC || PAX_ASLR
69789+ help
69790+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
69791+ give no information about the addresses of its mappings if
69792+ PaX features that rely on random addresses are enabled on the task.
69793+ In addition to sanitizing this information and disabling other
69794+ dangerous sources of information, this option causes reads of sensitive
69795+ /proc/<pid> entries where the file descriptor was opened in a different
69796+ task than the one performing the read. Such attempts are logged.
69797+ This option also limits argv/env strings for suid/sgid binaries
69798+ to 512KB to prevent a complete exhaustion of the stack entropy provided
69799+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
69800+ binaries to prevent alternative mmap layouts from being abused.
69801+
69802+ If you use PaX it is essential that you say Y here as it closes up
69803+ several holes that make full ASLR useless locally.
69804+
69805+
69806+config GRKERNSEC_KSTACKOVERFLOW
69807+ bool "Prevent kernel stack overflows"
69808+ default y if GRKERNSEC_CONFIG_AUTO
69809+ depends on !IA64 && 64BIT
69810+ help
69811+ If you say Y here, the kernel's process stacks will be allocated
69812+ with vmalloc instead of the kernel's default allocator. This
69813+ introduces guard pages that in combination with the alloca checking
69814+ of the STACKLEAK feature prevents all forms of kernel process stack
69815+ overflow abuse. Note that this is different from kernel stack
69816+ buffer overflows.
69817+
69818+config GRKERNSEC_BRUTE
69819+ bool "Deter exploit bruteforcing"
69820+ default y if GRKERNSEC_CONFIG_AUTO
69821+ help
69822+ If you say Y here, attempts to bruteforce exploits against forking
69823+ daemons such as apache or sshd, as well as against suid/sgid binaries
69824+ will be deterred. When a child of a forking daemon is killed by PaX
69825+ or crashes due to an illegal instruction or other suspicious signal,
69826+ the parent process will be delayed 30 seconds upon every subsequent
69827+ fork until the administrator is able to assess the situation and
69828+ restart the daemon.
69829+ In the suid/sgid case, the attempt is logged, the user has all their
69830+ existing instances of the suid/sgid binary terminated and will
69831+ be unable to execute any suid/sgid binaries for 15 minutes.
69832+
69833+ It is recommended that you also enable signal logging in the auditing
69834+ section so that logs are generated when a process triggers a suspicious
69835+ signal.
69836+ If the sysctl option is enabled, a sysctl option with name
69837+ "deter_bruteforce" is created.
69838+
69839+config GRKERNSEC_MODHARDEN
69840+ bool "Harden module auto-loading"
69841+ default y if GRKERNSEC_CONFIG_AUTO
69842+ depends on MODULES
69843+ help
69844+ If you say Y here, module auto-loading in response to use of some
69845+ feature implemented by an unloaded module will be restricted to
69846+ root users. Enabling this option helps defend against attacks
69847+ by unprivileged users who abuse the auto-loading behavior to
69848+ cause a vulnerable module to load that is then exploited.
69849+
69850+ If this option prevents a legitimate use of auto-loading for a
69851+ non-root user, the administrator can execute modprobe manually
69852+ with the exact name of the module mentioned in the alert log.
69853+ Alternatively, the administrator can add the module to the list
69854+ of modules loaded at boot by modifying init scripts.
69855+
69856+ Modification of init scripts will most likely be needed on
69857+ Ubuntu servers with encrypted home directory support enabled,
69858+ as the first non-root user logging in will cause the ecb(aes),
69859+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
69860+
69861+config GRKERNSEC_HIDESYM
69862+ bool "Hide kernel symbols"
69863+ default y if GRKERNSEC_CONFIG_AUTO
69864+ select PAX_USERCOPY_SLABS
69865+ help
69866+ If you say Y here, getting information on loaded modules, and
69867+ displaying all kernel symbols through a syscall will be restricted
69868+ to users with CAP_SYS_MODULE. For software compatibility reasons,
69869+ /proc/kallsyms will be restricted to the root user. The RBAC
69870+ system can hide that entry even from root.
69871+
69872+ This option also prevents leaking of kernel addresses through
69873+ several /proc entries.
69874+
69875+ Note that this option is only effective provided the following
69876+ conditions are met:
69877+ 1) The kernel using grsecurity is not precompiled by some distribution
69878+ 2) You have also enabled GRKERNSEC_DMESG
69879+ 3) You are using the RBAC system and hiding other files such as your
69880+ kernel image and System.map. Alternatively, enabling this option
69881+ causes the permissions on /boot, /lib/modules, and the kernel
69882+ source directory to change at compile time to prevent
69883+ reading by non-root users.
69884+ If the above conditions are met, this option will aid in providing a
69885+ useful protection against local kernel exploitation of overflows
69886+ and arbitrary read/write vulnerabilities.
69887+
69888+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
69889+ in addition to this feature.
69890+
69891+config GRKERNSEC_RANDSTRUCT
69892+ bool "Randomize layout of sensitive kernel structures"
69893+ default y if GRKERNSEC_CONFIG_AUTO
69894+ select GRKERNSEC_HIDESYM
69895+ select MODVERSIONS if MODULES
69896+ help
69897+ If you say Y here, the layouts of a number of sensitive kernel
69898+ structures (task, fs, cred, etc) and all structures composed entirely
69899+ of function pointers (aka "ops" structs) will be randomized at compile-time.
69900+ This can introduce the requirement of an additional infoleak
69901+ vulnerability for exploits targeting these structure types.
69902+
69903+ Enabling this feature will introduce some performance impact, slightly
69904+ increase memory usage, and prevent the use of forensic tools like
69905+ Volatility against the system (unless the kernel source tree isn't
69906+ cleaned after kernel installation).
69907+
69908+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
69909+ It remains after a make clean to allow for external modules to be compiled
69910+ with the existing seed and will be removed by a make mrproper or
69911+ make distclean.
69912+
69913+ Note that the implementation requires gcc 4.6.4. or newer. You may need
69914+ to install the supporting headers explicitly in addition to the normal
69915+ gcc package.
69916+
69917+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
69918+ bool "Use cacheline-aware structure randomization"
69919+ depends on GRKERNSEC_RANDSTRUCT
69920+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
69921+ help
69922+ If you say Y here, the RANDSTRUCT randomization will make a best effort
69923+ at restricting randomization to cacheline-sized groups of elements. It
69924+ will further not randomize bitfields in structures. This reduces the
69925+ performance hit of RANDSTRUCT at the cost of weakened randomization.
69926+
69927+config GRKERNSEC_KERN_LOCKOUT
69928+ bool "Active kernel exploit response"
69929+ default y if GRKERNSEC_CONFIG_AUTO
69930+ depends on X86 || ARM || PPC || SPARC
69931+ help
69932+ If you say Y here, when a PaX alert is triggered due to suspicious
69933+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
69934+ or an OOPS occurs due to bad memory accesses, instead of just
69935+ terminating the offending process (and potentially allowing
69936+ a subsequent exploit from the same user), we will take one of two
69937+ actions:
69938+ If the user was root, we will panic the system
69939+ If the user was non-root, we will log the attempt, terminate
69940+ all processes owned by the user, then prevent them from creating
69941+ any new processes until the system is restarted
69942+ This deters repeated kernel exploitation/bruteforcing attempts
69943+ and is useful for later forensics.
69944+
69945+config GRKERNSEC_OLD_ARM_USERLAND
69946+ bool "Old ARM userland compatibility"
69947+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
69948+ help
69949+ If you say Y here, stubs of executable code to perform such operations
69950+ as "compare-exchange" will be placed at fixed locations in the ARM vector
69951+ table. This is unfortunately needed for old ARM userland meant to run
69952+ across a wide range of processors. Without this option enabled,
69953+ the get_tls and data memory barrier stubs will be emulated by the kernel,
69954+ which is enough for Linaro userlands or other userlands designed for v6
69955+ and newer ARM CPUs. It's recommended that you try without this option enabled
69956+ first, and only enable it if your userland does not boot (it will likely fail
69957+ at init time).
69958+
69959+endmenu
69960+menu "Role Based Access Control Options"
69961+depends on GRKERNSEC
69962+
69963+config GRKERNSEC_RBAC_DEBUG
69964+ bool
69965+
69966+config GRKERNSEC_NO_RBAC
69967+ bool "Disable RBAC system"
69968+ help
69969+ If you say Y here, the /dev/grsec device will be removed from the kernel,
69970+ preventing the RBAC system from being enabled. You should only say Y
69971+ here if you have no intention of using the RBAC system, so as to prevent
69972+ an attacker with root access from misusing the RBAC system to hide files
69973+ and processes when loadable module support and /dev/[k]mem have been
69974+ locked down.
69975+
69976+config GRKERNSEC_ACL_HIDEKERN
69977+ bool "Hide kernel processes"
69978+ help
69979+ If you say Y here, all kernel threads will be hidden to all
69980+ processes but those whose subject has the "view hidden processes"
69981+ flag.
69982+
69983+config GRKERNSEC_ACL_MAXTRIES
69984+ int "Maximum tries before password lockout"
69985+ default 3
69986+ help
69987+ This option enforces the maximum number of times a user can attempt
69988+ to authorize themselves with the grsecurity RBAC system before being
69989+ denied the ability to attempt authorization again for a specified time.
69990+ The lower the number, the harder it will be to brute-force a password.
69991+
69992+config GRKERNSEC_ACL_TIMEOUT
69993+ int "Time to wait after max password tries, in seconds"
69994+ default 30
69995+ help
69996+ This option specifies the time the user must wait after attempting to
69997+ authorize to the RBAC system with the maximum number of invalid
69998+ passwords. The higher the number, the harder it will be to brute-force
69999+ a password.
70000+
70001+endmenu
70002+menu "Filesystem Protections"
70003+depends on GRKERNSEC
70004+
70005+config GRKERNSEC_PROC
70006+ bool "Proc restrictions"
70007+ default y if GRKERNSEC_CONFIG_AUTO
70008+ help
70009+ If you say Y here, the permissions of the /proc filesystem
70010+ will be altered to enhance system security and privacy. You MUST
70011+ choose either a user only restriction or a user and group restriction.
70012+ Depending upon the option you choose, you can either restrict users to
70013+ see only the processes they themselves run, or choose a group that can
70014+ view all processes and files normally restricted to root if you choose
70015+ the "restrict to user only" option. NOTE: If you're running identd or
70016+ ntpd as a non-root user, you will have to run it as the group you
70017+ specify here.
70018+
70019+config GRKERNSEC_PROC_USER
70020+ bool "Restrict /proc to user only"
70021+ depends on GRKERNSEC_PROC
70022+ help
70023+ If you say Y here, non-root users will only be able to view their own
70024+ processes, and restricts them from viewing network-related information,
70025+ and viewing kernel symbol and module information.
70026+
70027+config GRKERNSEC_PROC_USERGROUP
70028+ bool "Allow special group"
70029+ default y if GRKERNSEC_CONFIG_AUTO
70030+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
70031+ help
70032+ If you say Y here, you will be able to select a group that will be
70033+ able to view all processes and network-related information. If you've
70034+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
70035+ remain hidden. This option is useful if you want to run identd as
70036+ a non-root user. The group you select may also be chosen at boot time
70037+ via "grsec_proc_gid=" on the kernel commandline.
70038+
70039+config GRKERNSEC_PROC_GID
70040+ int "GID for special group"
70041+ depends on GRKERNSEC_PROC_USERGROUP
70042+ default 1001
70043+
70044+config GRKERNSEC_PROC_ADD
70045+ bool "Additional restrictions"
70046+ default y if GRKERNSEC_CONFIG_AUTO
70047+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
70048+ help
70049+ If you say Y here, additional restrictions will be placed on
70050+ /proc that keep normal users from viewing device information and
70051+ slabinfo information that could be useful for exploits.
70052+
70053+config GRKERNSEC_LINK
70054+ bool "Linking restrictions"
70055+ default y if GRKERNSEC_CONFIG_AUTO
70056+ help
70057+ If you say Y here, /tmp race exploits will be prevented, since users
70058+ will no longer be able to follow symlinks owned by other users in
70059+ world-writable +t directories (e.g. /tmp), unless the owner of the
70060+ symlink is the owner of the directory. users will also not be
70061+ able to hardlink to files they do not own. If the sysctl option is
70062+ enabled, a sysctl option with name "linking_restrictions" is created.
70063+
70064+config GRKERNSEC_SYMLINKOWN
70065+ bool "Kernel-enforced SymlinksIfOwnerMatch"
70066+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
70067+ help
70068+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
70069+ that prevents it from being used as a security feature. As Apache
70070+ verifies the symlink by performing a stat() against the target of
70071+ the symlink before it is followed, an attacker can setup a symlink
70072+ to point to a same-owned file, then replace the symlink with one
70073+ that targets another user's file just after Apache "validates" the
70074+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
70075+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
70076+ will be in place for the group you specify. If the sysctl option
70077+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
70078+ created.
70079+
70080+config GRKERNSEC_SYMLINKOWN_GID
70081+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
70082+ depends on GRKERNSEC_SYMLINKOWN
70083+ default 1006
70084+ help
70085+ Setting this GID determines what group kernel-enforced
70086+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
70087+ is enabled, a sysctl option with name "symlinkown_gid" is created.
70088+
70089+config GRKERNSEC_FIFO
70090+ bool "FIFO restrictions"
70091+ default y if GRKERNSEC_CONFIG_AUTO
70092+ help
70093+ If you say Y here, users will not be able to write to FIFOs they don't
70094+ own in world-writable +t directories (e.g. /tmp), unless the owner of
70095+ the FIFO is the same owner of the directory it's held in. If the sysctl
70096+ option is enabled, a sysctl option with name "fifo_restrictions" is
70097+ created.
70098+
70099+config GRKERNSEC_SYSFS_RESTRICT
70100+ bool "Sysfs/debugfs restriction"
70101+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
70102+ depends on SYSFS
70103+ help
70104+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
70105+ any filesystem normally mounted under it (e.g. debugfs) will be
70106+ mostly accessible only by root. These filesystems generally provide access
70107+ to hardware and debug information that isn't appropriate for unprivileged
70108+ users of the system. Sysfs and debugfs have also become a large source
70109+ of new vulnerabilities, ranging from infoleaks to local compromise.
70110+ There has been very little oversight with an eye toward security involved
70111+ in adding new exporters of information to these filesystems, so their
70112+ use is discouraged.
70113+ For reasons of compatibility, a few directories have been whitelisted
70114+ for access by non-root users:
70115+ /sys/fs/selinux
70116+ /sys/fs/fuse
70117+ /sys/devices/system/cpu
70118+
70119+config GRKERNSEC_ROFS
70120+ bool "Runtime read-only mount protection"
70121+ depends on SYSCTL
70122+ help
70123+ If you say Y here, a sysctl option with name "romount_protect" will
70124+ be created. By setting this option to 1 at runtime, filesystems
70125+ will be protected in the following ways:
70126+ * No new writable mounts will be allowed
70127+ * Existing read-only mounts won't be able to be remounted read/write
70128+ * Write operations will be denied on all block devices
70129+ This option acts independently of grsec_lock: once it is set to 1,
70130+ it cannot be turned off. Therefore, please be mindful of the resulting
70131+ behavior if this option is enabled in an init script on a read-only
70132+ filesystem.
70133+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
70134+ and GRKERNSEC_IO should be enabled and module loading disabled via
70135+ config or at runtime.
70136+ This feature is mainly intended for secure embedded systems.
70137+
70138+
70139+config GRKERNSEC_DEVICE_SIDECHANNEL
70140+ bool "Eliminate stat/notify-based device sidechannels"
70141+ default y if GRKERNSEC_CONFIG_AUTO
70142+ help
70143+ If you say Y here, timing analyses on block or character
70144+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
70145+ will be thwarted for unprivileged users. If a process without
70146+ CAP_MKNOD stats such a device, the last access and last modify times
70147+ will match the device's create time. No access or modify events
70148+ will be triggered through inotify/dnotify/fanotify for such devices.
70149+ This feature will prevent attacks that may at a minimum
70150+ allow an attacker to determine the administrator's password length.
70151+
70152+config GRKERNSEC_CHROOT
70153+ bool "Chroot jail restrictions"
70154+ default y if GRKERNSEC_CONFIG_AUTO
70155+ help
70156+ If you say Y here, you will be able to choose several options that will
70157+ make breaking out of a chrooted jail much more difficult. If you
70158+ encounter no software incompatibilities with the following options, it
70159+ is recommended that you enable each one.
70160+
70161+ Note that the chroot restrictions are not intended to apply to "chroots"
70162+ to directories that are simple bind mounts of the global root filesystem.
70163+ For several other reasons, a user shouldn't expect any significant
70164+ security by performing such a chroot.
70165+
70166+config GRKERNSEC_CHROOT_MOUNT
70167+ bool "Deny mounts"
70168+ default y if GRKERNSEC_CONFIG_AUTO
70169+ depends on GRKERNSEC_CHROOT
70170+ help
70171+ If you say Y here, processes inside a chroot will not be able to
70172+ mount or remount filesystems. If the sysctl option is enabled, a
70173+ sysctl option with name "chroot_deny_mount" is created.
70174+
70175+config GRKERNSEC_CHROOT_DOUBLE
70176+ bool "Deny double-chroots"
70177+ default y if GRKERNSEC_CONFIG_AUTO
70178+ depends on GRKERNSEC_CHROOT
70179+ help
70180+ If you say Y here, processes inside a chroot will not be able to chroot
70181+ again outside the chroot. This is a widely used method of breaking
70182+ out of a chroot jail and should not be allowed. If the sysctl
70183+ option is enabled, a sysctl option with name
70184+ "chroot_deny_chroot" is created.
70185+
70186+config GRKERNSEC_CHROOT_PIVOT
70187+ bool "Deny pivot_root in chroot"
70188+ default y if GRKERNSEC_CONFIG_AUTO
70189+ depends on GRKERNSEC_CHROOT
70190+ help
70191+ If you say Y here, processes inside a chroot will not be able to use
70192+ a function called pivot_root() that was introduced in Linux 2.3.41. It
70193+ works similar to chroot in that it changes the root filesystem. This
70194+ function could be misused in a chrooted process to attempt to break out
70195+ of the chroot, and therefore should not be allowed. If the sysctl
70196+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
70197+ created.
70198+
70199+config GRKERNSEC_CHROOT_CHDIR
70200+ bool "Enforce chdir(\"/\") on all chroots"
70201+ default y if GRKERNSEC_CONFIG_AUTO
70202+ depends on GRKERNSEC_CHROOT
70203+ help
70204+ If you say Y here, the current working directory of all newly-chrooted
70205+ applications will be set to the the root directory of the chroot.
70206+ The man page on chroot(2) states:
70207+ Note that this call does not change the current working
70208+ directory, so that `.' can be outside the tree rooted at
70209+ `/'. In particular, the super-user can escape from a
70210+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
70211+
70212+ It is recommended that you say Y here, since it's not known to break
70213+ any software. If the sysctl option is enabled, a sysctl option with
70214+ name "chroot_enforce_chdir" is created.
70215+
70216+config GRKERNSEC_CHROOT_CHMOD
70217+ bool "Deny (f)chmod +s"
70218+ default y if GRKERNSEC_CONFIG_AUTO
70219+ depends on GRKERNSEC_CHROOT
70220+ help
70221+ If you say Y here, processes inside a chroot will not be able to chmod
70222+ or fchmod files to make them have suid or sgid bits. This protects
70223+ against another published method of breaking a chroot. If the sysctl
70224+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
70225+ created.
70226+
70227+config GRKERNSEC_CHROOT_FCHDIR
70228+ bool "Deny fchdir and fhandle out of chroot"
70229+ default y if GRKERNSEC_CONFIG_AUTO
70230+ depends on GRKERNSEC_CHROOT
70231+ help
70232+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
70233+ to a file descriptor of the chrooting process that points to a directory
70234+ outside the filesystem will be stopped. Additionally, this option prevents
70235+ use of the recently-created syscall for opening files by a guessable "file
70236+ handle" inside a chroot. If the sysctl option is enabled, a sysctl option
70237+ with name "chroot_deny_fchdir" is created.
70238+
70239+config GRKERNSEC_CHROOT_MKNOD
70240+ bool "Deny mknod"
70241+ default y if GRKERNSEC_CONFIG_AUTO
70242+ depends on GRKERNSEC_CHROOT
70243+ help
70244+ If you say Y here, processes inside a chroot will not be allowed to
70245+ mknod. The problem with using mknod inside a chroot is that it
70246+ would allow an attacker to create a device entry that is the same
70247+ as one on the physical root of your system, which could range from
70248+ anything from the console device to a device for your harddrive (which
70249+ they could then use to wipe the drive or steal data). It is recommended
70250+ that you say Y here, unless you run into software incompatibilities.
70251+ If the sysctl option is enabled, a sysctl option with name
70252+ "chroot_deny_mknod" is created.
70253+
70254+config GRKERNSEC_CHROOT_SHMAT
70255+ bool "Deny shmat() out of chroot"
70256+ default y if GRKERNSEC_CONFIG_AUTO
70257+ depends on GRKERNSEC_CHROOT
70258+ help
70259+ If you say Y here, processes inside a chroot will not be able to attach
70260+ to shared memory segments that were created outside of the chroot jail.
70261+ It is recommended that you say Y here. If the sysctl option is enabled,
70262+ a sysctl option with name "chroot_deny_shmat" is created.
70263+
70264+config GRKERNSEC_CHROOT_UNIX
70265+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
70266+ default y if GRKERNSEC_CONFIG_AUTO
70267+ depends on GRKERNSEC_CHROOT
70268+ help
70269+ If you say Y here, processes inside a chroot will not be able to
70270+ connect to abstract (meaning not belonging to a filesystem) Unix
70271+ domain sockets that were bound outside of a chroot. It is recommended
70272+ that you say Y here. If the sysctl option is enabled, a sysctl option
70273+ with name "chroot_deny_unix" is created.
70274+
70275+config GRKERNSEC_CHROOT_FINDTASK
70276+ bool "Protect outside processes"
70277+ default y if GRKERNSEC_CONFIG_AUTO
70278+ depends on GRKERNSEC_CHROOT
70279+ help
70280+ If you say Y here, processes inside a chroot will not be able to
70281+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
70282+ getsid, or view any process outside of the chroot. If the sysctl
70283+ option is enabled, a sysctl option with name "chroot_findtask" is
70284+ created.
70285+
70286+config GRKERNSEC_CHROOT_NICE
70287+ bool "Restrict priority changes"
70288+ default y if GRKERNSEC_CONFIG_AUTO
70289+ depends on GRKERNSEC_CHROOT
70290+ help
70291+ If you say Y here, processes inside a chroot will not be able to raise
70292+ the priority of processes in the chroot, or alter the priority of
70293+ processes outside the chroot. This provides more security than simply
70294+ removing CAP_SYS_NICE from the process' capability set. If the
70295+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
70296+ is created.
70297+
70298+config GRKERNSEC_CHROOT_SYSCTL
70299+ bool "Deny sysctl writes"
70300+ default y if GRKERNSEC_CONFIG_AUTO
70301+ depends on GRKERNSEC_CHROOT
70302+ help
70303+ If you say Y here, an attacker in a chroot will not be able to
70304+ write to sysctl entries, either by sysctl(2) or through a /proc
70305+ interface. It is strongly recommended that you say Y here. If the
70306+ sysctl option is enabled, a sysctl option with name
70307+ "chroot_deny_sysctl" is created.
70308+
70309+config GRKERNSEC_CHROOT_CAPS
70310+ bool "Capability restrictions"
70311+ default y if GRKERNSEC_CONFIG_AUTO
70312+ depends on GRKERNSEC_CHROOT
70313+ help
70314+ If you say Y here, the capabilities on all processes within a
70315+ chroot jail will be lowered to stop module insertion, raw i/o,
70316+ system and net admin tasks, rebooting the system, modifying immutable
70317+ files, modifying IPC owned by another, and changing the system time.
70318+ This is left an option because it can break some apps. Disable this
70319+ if your chrooted apps are having problems performing those kinds of
70320+ tasks. If the sysctl option is enabled, a sysctl option with
70321+ name "chroot_caps" is created.
70322+
70323+config GRKERNSEC_CHROOT_INITRD
70324+ bool "Exempt initrd tasks from restrictions"
70325+ default y if GRKERNSEC_CONFIG_AUTO
70326+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
70327+ help
70328+ If you say Y here, tasks started prior to init will be exempted from
70329+ grsecurity's chroot restrictions. This option is mainly meant to
70330+ resolve Plymouth's performing privileged operations unnecessarily
70331+ in a chroot.
70332+
70333+endmenu
70334+menu "Kernel Auditing"
70335+depends on GRKERNSEC
70336+
70337+config GRKERNSEC_AUDIT_GROUP
70338+ bool "Single group for auditing"
70339+ help
70340+ If you say Y here, the exec and chdir logging features will only operate
70341+ on a group you specify. This option is recommended if you only want to
70342+ watch certain users instead of having a large amount of logs from the
70343+ entire system. If the sysctl option is enabled, a sysctl option with
70344+ name "audit_group" is created.
70345+
70346+config GRKERNSEC_AUDIT_GID
70347+ int "GID for auditing"
70348+ depends on GRKERNSEC_AUDIT_GROUP
70349+ default 1007
70350+
70351+config GRKERNSEC_EXECLOG
70352+ bool "Exec logging"
70353+ help
70354+ If you say Y here, all execve() calls will be logged (since the
70355+ other exec*() calls are frontends to execve(), all execution
70356+ will be logged). Useful for shell-servers that like to keep track
70357+ of their users. If the sysctl option is enabled, a sysctl option with
70358+ name "exec_logging" is created.
70359+ WARNING: This option when enabled will produce a LOT of logs, especially
70360+ on an active system.
70361+
70362+config GRKERNSEC_RESLOG
70363+ bool "Resource logging"
70364+ default y if GRKERNSEC_CONFIG_AUTO
70365+ help
70366+ If you say Y here, all attempts to overstep resource limits will
70367+ be logged with the resource name, the requested size, and the current
70368+ limit. It is highly recommended that you say Y here. If the sysctl
70369+ option is enabled, a sysctl option with name "resource_logging" is
70370+ created. If the RBAC system is enabled, the sysctl value is ignored.
70371+
70372+config GRKERNSEC_CHROOT_EXECLOG
70373+ bool "Log execs within chroot"
70374+ help
70375+ If you say Y here, all executions inside a chroot jail will be logged
70376+ to syslog. This can cause a large amount of logs if certain
70377+ applications (eg. djb's daemontools) are installed on the system, and
70378+ is therefore left as an option. If the sysctl option is enabled, a
70379+ sysctl option with name "chroot_execlog" is created.
70380+
70381+config GRKERNSEC_AUDIT_PTRACE
70382+ bool "Ptrace logging"
70383+ help
70384+ If you say Y here, all attempts to attach to a process via ptrace
70385+ will be logged. If the sysctl option is enabled, a sysctl option
70386+ with name "audit_ptrace" is created.
70387+
70388+config GRKERNSEC_AUDIT_CHDIR
70389+ bool "Chdir logging"
70390+ help
70391+ If you say Y here, all chdir() calls will be logged. If the sysctl
70392+ option is enabled, a sysctl option with name "audit_chdir" is created.
70393+
70394+config GRKERNSEC_AUDIT_MOUNT
70395+ bool "(Un)Mount logging"
70396+ help
70397+ If you say Y here, all mounts and unmounts will be logged. If the
70398+ sysctl option is enabled, a sysctl option with name "audit_mount" is
70399+ created.
70400+
70401+config GRKERNSEC_SIGNAL
70402+ bool "Signal logging"
70403+ default y if GRKERNSEC_CONFIG_AUTO
70404+ help
70405+ If you say Y here, certain important signals will be logged, such as
70406+ SIGSEGV, which will as a result inform you of when a error in a program
70407+ occurred, which in some cases could mean a possible exploit attempt.
70408+ If the sysctl option is enabled, a sysctl option with name
70409+ "signal_logging" is created.
70410+
70411+config GRKERNSEC_FORKFAIL
70412+ bool "Fork failure logging"
70413+ help
70414+ If you say Y here, all failed fork() attempts will be logged.
70415+ This could suggest a fork bomb, or someone attempting to overstep
70416+ their process limit. If the sysctl option is enabled, a sysctl option
70417+ with name "forkfail_logging" is created.
70418+
70419+config GRKERNSEC_TIME
70420+ bool "Time change logging"
70421+ default y if GRKERNSEC_CONFIG_AUTO
70422+ help
70423+ If you say Y here, any changes of the system clock will be logged.
70424+ If the sysctl option is enabled, a sysctl option with name
70425+ "timechange_logging" is created.
70426+
70427+config GRKERNSEC_PROC_IPADDR
70428+ bool "/proc/<pid>/ipaddr support"
70429+ default y if GRKERNSEC_CONFIG_AUTO
70430+ help
70431+ If you say Y here, a new entry will be added to each /proc/<pid>
70432+ directory that contains the IP address of the person using the task.
70433+ The IP is carried across local TCP and AF_UNIX stream sockets.
70434+ This information can be useful for IDS/IPSes to perform remote response
70435+ to a local attack. The entry is readable by only the owner of the
70436+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
70437+ the RBAC system), and thus does not create privacy concerns.
70438+
70439+config GRKERNSEC_RWXMAP_LOG
70440+ bool 'Denied RWX mmap/mprotect logging'
70441+ default y if GRKERNSEC_CONFIG_AUTO
70442+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
70443+ help
70444+ If you say Y here, calls to mmap() and mprotect() with explicit
70445+ usage of PROT_WRITE and PROT_EXEC together will be logged when
70446+ denied by the PAX_MPROTECT feature. This feature will also
70447+ log other problematic scenarios that can occur when PAX_MPROTECT
70448+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
70449+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
70450+ is created.
70451+
70452+endmenu
70453+
70454+menu "Executable Protections"
70455+depends on GRKERNSEC
70456+
70457+config GRKERNSEC_DMESG
70458+ bool "Dmesg(8) restriction"
70459+ default y if GRKERNSEC_CONFIG_AUTO
70460+ help
70461+ If you say Y here, non-root users will not be able to use dmesg(8)
70462+ to view the contents of the kernel's circular log buffer.
70463+ The kernel's log buffer often contains kernel addresses and other
70464+ identifying information useful to an attacker in fingerprinting a
70465+ system for a targeted exploit.
70466+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
70467+ created.
70468+
70469+config GRKERNSEC_HARDEN_PTRACE
70470+ bool "Deter ptrace-based process snooping"
70471+ default y if GRKERNSEC_CONFIG_AUTO
70472+ help
70473+ If you say Y here, TTY sniffers and other malicious monitoring
70474+ programs implemented through ptrace will be defeated. If you
70475+ have been using the RBAC system, this option has already been
70476+ enabled for several years for all users, with the ability to make
70477+ fine-grained exceptions.
70478+
70479+ This option only affects the ability of non-root users to ptrace
70480+ processes that are not a descendent of the ptracing process.
70481+ This means that strace ./binary and gdb ./binary will still work,
70482+ but attaching to arbitrary processes will not. If the sysctl
70483+ option is enabled, a sysctl option with name "harden_ptrace" is
70484+ created.
70485+
70486+config GRKERNSEC_PTRACE_READEXEC
70487+ bool "Require read access to ptrace sensitive binaries"
70488+ default y if GRKERNSEC_CONFIG_AUTO
70489+ help
70490+ If you say Y here, unprivileged users will not be able to ptrace unreadable
70491+ binaries. This option is useful in environments that
70492+ remove the read bits (e.g. file mode 4711) from suid binaries to
70493+ prevent infoleaking of their contents. This option adds
70494+ consistency to the use of that file mode, as the binary could normally
70495+ be read out when run without privileges while ptracing.
70496+
70497+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
70498+ is created.
70499+
70500+config GRKERNSEC_SETXID
70501+ bool "Enforce consistent multithreaded privileges"
70502+ default y if GRKERNSEC_CONFIG_AUTO
70503+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
70504+ help
70505+ If you say Y here, a change from a root uid to a non-root uid
70506+ in a multithreaded application will cause the resulting uids,
70507+ gids, supplementary groups, and capabilities in that thread
70508+ to be propagated to the other threads of the process. In most
70509+ cases this is unnecessary, as glibc will emulate this behavior
70510+ on behalf of the application. Other libcs do not act in the
70511+ same way, allowing the other threads of the process to continue
70512+ running with root privileges. If the sysctl option is enabled,
70513+ a sysctl option with name "consistent_setxid" is created.
70514+
70515+config GRKERNSEC_HARDEN_IPC
70516+ bool "Disallow access to overly-permissive IPC objects"
70517+ default y if GRKERNSEC_CONFIG_AUTO
70518+ depends on SYSVIPC
70519+ help
70520+ If you say Y here, access to overly-permissive IPC objects (shared
70521+ memory, message queues, and semaphores) will be denied for processes
70522+ given the following criteria beyond normal permission checks:
70523+ 1) If the IPC object is world-accessible and the euid doesn't match
70524+ that of the creator or current uid for the IPC object
70525+ 2) If the IPC object is group-accessible and the egid doesn't
70526+ match that of the creator or current gid for the IPC object
70527+ It's a common error to grant too much permission to these objects,
70528+ with impact ranging from denial of service and information leaking to
70529+ privilege escalation. This feature was developed in response to
70530+ research by Tim Brown:
70531+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
70532+ who found hundreds of such insecure usages. Processes with
70533+ CAP_IPC_OWNER are still permitted to access these IPC objects.
70534+ If the sysctl option is enabled, a sysctl option with name
70535+ "harden_ipc" is created.
70536+
70537+config GRKERNSEC_TPE
70538+ bool "Trusted Path Execution (TPE)"
70539+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
70540+ help
70541+ If you say Y here, you will be able to choose a gid to add to the
70542+ supplementary groups of users you want to mark as "untrusted."
70543+ These users will not be able to execute any files that are not in
70544+ root-owned directories writable only by root. If the sysctl option
70545+ is enabled, a sysctl option with name "tpe" is created.
70546+
70547+config GRKERNSEC_TPE_ALL
70548+ bool "Partially restrict all non-root users"
70549+ depends on GRKERNSEC_TPE
70550+ help
70551+ If you say Y here, all non-root users will be covered under
70552+ a weaker TPE restriction. This is separate from, and in addition to,
70553+ the main TPE options that you have selected elsewhere. Thus, if a
70554+ "trusted" GID is chosen, this restriction applies to even that GID.
70555+ Under this restriction, all non-root users will only be allowed to
70556+ execute files in directories they own that are not group or
70557+ world-writable, or in directories owned by root and writable only by
70558+ root. If the sysctl option is enabled, a sysctl option with name
70559+ "tpe_restrict_all" is created.
70560+
70561+config GRKERNSEC_TPE_INVERT
70562+ bool "Invert GID option"
70563+ depends on GRKERNSEC_TPE
70564+ help
70565+ If you say Y here, the group you specify in the TPE configuration will
70566+ decide what group TPE restrictions will be *disabled* for. This
70567+ option is useful if you want TPE restrictions to be applied to most
70568+ users on the system. If the sysctl option is enabled, a sysctl option
70569+ with name "tpe_invert" is created. Unlike other sysctl options, this
70570+ entry will default to on for backward-compatibility.
70571+
70572+config GRKERNSEC_TPE_GID
70573+ int
70574+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
70575+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
70576+
70577+config GRKERNSEC_TPE_UNTRUSTED_GID
70578+ int "GID for TPE-untrusted users"
70579+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
70580+ default 1005
70581+ help
70582+ Setting this GID determines what group TPE restrictions will be
70583+ *enabled* for. If the sysctl option is enabled, a sysctl option
70584+ with name "tpe_gid" is created.
70585+
70586+config GRKERNSEC_TPE_TRUSTED_GID
70587+ int "GID for TPE-trusted users"
70588+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
70589+ default 1005
70590+ help
70591+ Setting this GID determines what group TPE restrictions will be
70592+ *disabled* for. If the sysctl option is enabled, a sysctl option
70593+ with name "tpe_gid" is created.
70594+
70595+endmenu
70596+menu "Network Protections"
70597+depends on GRKERNSEC
70598+
70599+config GRKERNSEC_BLACKHOLE
70600+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
70601+ default y if GRKERNSEC_CONFIG_AUTO
70602+ depends on NET
70603+ help
70604+ If you say Y here, neither TCP resets nor ICMP
70605+ destination-unreachable packets will be sent in response to packets
70606+ sent to ports for which no associated listening process exists.
70607+ This feature supports both IPV4 and IPV6 and exempts the
70608+ loopback interface from blackholing. Enabling this feature
70609+ makes a host more resilient to DoS attacks and reduces network
70610+ visibility against scanners.
70611+
70612+ The blackhole feature as-implemented is equivalent to the FreeBSD
70613+ blackhole feature, as it prevents RST responses to all packets, not
70614+ just SYNs. Under most application behavior this causes no
70615+ problems, but applications (like haproxy) may not close certain
70616+ connections in a way that cleanly terminates them on the remote
70617+ end, leaving the remote host in LAST_ACK state. Because of this
70618+ side-effect and to prevent intentional LAST_ACK DoSes, this
70619+ feature also adds automatic mitigation against such attacks.
70620+ The mitigation drastically reduces the amount of time a socket
70621+ can spend in LAST_ACK state. If you're using haproxy and not
70622+ all servers it connects to have this option enabled, consider
70623+ disabling this feature on the haproxy host.
70624+
70625+ If the sysctl option is enabled, two sysctl options with names
70626+ "ip_blackhole" and "lastack_retries" will be created.
70627+ While "ip_blackhole" takes the standard zero/non-zero on/off
70628+ toggle, "lastack_retries" uses the same kinds of values as
70629+ "tcp_retries1" and "tcp_retries2". The default value of 4
70630+ prevents a socket from lasting more than 45 seconds in LAST_ACK
70631+ state.
70632+
70633+config GRKERNSEC_NO_SIMULT_CONNECT
70634+ bool "Disable TCP Simultaneous Connect"
70635+ default y if GRKERNSEC_CONFIG_AUTO
70636+ depends on NET
70637+ help
70638+ If you say Y here, a feature by Willy Tarreau will be enabled that
70639+ removes a weakness in Linux's strict implementation of TCP that
70640+ allows two clients to connect to each other without either entering
70641+ a listening state. The weakness allows an attacker to easily prevent
70642+ a client from connecting to a known server provided the source port
70643+ for the connection is guessed correctly.
70644+
70645+ As the weakness could be used to prevent an antivirus or IPS from
70646+ fetching updates, or prevent an SSL gateway from fetching a CRL,
70647+ it should be eliminated by enabling this option. Though Linux is
70648+ one of few operating systems supporting simultaneous connect, it
70649+ has no legitimate use in practice and is rarely supported by firewalls.
70650+
70651+config GRKERNSEC_SOCKET
70652+ bool "Socket restrictions"
70653+ depends on NET
70654+ help
70655+ If you say Y here, you will be able to choose from several options.
70656+ If you assign a GID on your system and add it to the supplementary
70657+ groups of users you want to restrict socket access to, this patch
70658+ will perform up to three things, based on the option(s) you choose.
70659+
70660+config GRKERNSEC_SOCKET_ALL
70661+ bool "Deny any sockets to group"
70662+ depends on GRKERNSEC_SOCKET
70663+ help
70664+ If you say Y here, you will be able to choose a GID of whose users will
70665+ be unable to connect to other hosts from your machine or run server
70666+ applications from your machine. If the sysctl option is enabled, a
70667+ sysctl option with name "socket_all" is created.
70668+
70669+config GRKERNSEC_SOCKET_ALL_GID
70670+ int "GID to deny all sockets for"
70671+ depends on GRKERNSEC_SOCKET_ALL
70672+ default 1004
70673+ help
70674+ Here you can choose the GID to disable socket access for. Remember to
70675+ add the users you want socket access disabled for to the GID
70676+ specified here. If the sysctl option is enabled, a sysctl option
70677+ with name "socket_all_gid" is created.
70678+
70679+config GRKERNSEC_SOCKET_CLIENT
70680+ bool "Deny client sockets to group"
70681+ depends on GRKERNSEC_SOCKET
70682+ help
70683+ If you say Y here, you will be able to choose a GID of whose users will
70684+ be unable to connect to other hosts from your machine, but will be
70685+ able to run servers. If this option is enabled, all users in the group
70686+ you specify will have to use passive mode when initiating ftp transfers
70687+ from the shell on your machine. If the sysctl option is enabled, a
70688+ sysctl option with name "socket_client" is created.
70689+
70690+config GRKERNSEC_SOCKET_CLIENT_GID
70691+ int "GID to deny client sockets for"
70692+ depends on GRKERNSEC_SOCKET_CLIENT
70693+ default 1003
70694+ help
70695+ Here you can choose the GID to disable client socket access for.
70696+ Remember to add the users you want client socket access disabled for to
70697+ the GID specified here. If the sysctl option is enabled, a sysctl
70698+ option with name "socket_client_gid" is created.
70699+
70700+config GRKERNSEC_SOCKET_SERVER
70701+ bool "Deny server sockets to group"
70702+ depends on GRKERNSEC_SOCKET
70703+ help
70704+ If you say Y here, you will be able to choose a GID of whose users will
70705+ be unable to run server applications from your machine. If the sysctl
70706+ option is enabled, a sysctl option with name "socket_server" is created.
70707+
70708+config GRKERNSEC_SOCKET_SERVER_GID
70709+ int "GID to deny server sockets for"
70710+ depends on GRKERNSEC_SOCKET_SERVER
70711+ default 1002
70712+ help
70713+ Here you can choose the GID to disable server socket access for.
70714+ Remember to add the users you want server socket access disabled for to
70715+ the GID specified here. If the sysctl option is enabled, a sysctl
70716+ option with name "socket_server_gid" is created.
70717+
70718+endmenu
70719+
70720+menu "Physical Protections"
70721+depends on GRKERNSEC
70722+
70723+config GRKERNSEC_DENYUSB
70724+ bool "Deny new USB connections after toggle"
70725+ default y if GRKERNSEC_CONFIG_AUTO
70726+ depends on SYSCTL && USB_SUPPORT
70727+ help
70728+ If you say Y here, a new sysctl option with name "deny_new_usb"
70729+ will be created. Setting its value to 1 will prevent any new
70730+ USB devices from being recognized by the OS. Any attempted USB
70731+ device insertion will be logged. This option is intended to be
70732+ used against custom USB devices designed to exploit vulnerabilities
70733+ in various USB device drivers.
70734+
70735+ For greatest effectiveness, this sysctl should be set after any
70736+ relevant init scripts. This option is safe to enable in distros
70737+ as each user can choose whether or not to toggle the sysctl.
70738+
70739+config GRKERNSEC_DENYUSB_FORCE
70740+ bool "Reject all USB devices not connected at boot"
70741+ select USB
70742+ depends on GRKERNSEC_DENYUSB
70743+ help
70744+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
70745+ that doesn't involve a sysctl entry. This option should only be
70746+ enabled if you're sure you want to deny all new USB connections
70747+ at runtime and don't want to modify init scripts. This should not
70748+ be enabled by distros. It forces the core USB code to be built
70749+ into the kernel image so that all devices connected at boot time
70750+ can be recognized and new USB device connections can be prevented
70751+ prior to init running.
70752+
70753+endmenu
70754+
70755+menu "Sysctl Support"
70756+depends on GRKERNSEC && SYSCTL
70757+
70758+config GRKERNSEC_SYSCTL
70759+ bool "Sysctl support"
70760+ default y if GRKERNSEC_CONFIG_AUTO
70761+ help
70762+ If you say Y here, you will be able to change the options that
70763+ grsecurity runs with at bootup, without having to recompile your
70764+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
70765+ to enable (1) or disable (0) various features. All the sysctl entries
70766+ are mutable until the "grsec_lock" entry is set to a non-zero value.
70767+ All features enabled in the kernel configuration are disabled at boot
70768+ if you do not say Y to the "Turn on features by default" option.
70769+ All options should be set at startup, and the grsec_lock entry should
70770+ be set to a non-zero value after all the options are set.
70771+ *THIS IS EXTREMELY IMPORTANT*
70772+
70773+config GRKERNSEC_SYSCTL_DISTRO
70774+ bool "Extra sysctl support for distro makers (READ HELP)"
70775+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
70776+ help
70777+ If you say Y here, additional sysctl options will be created
70778+ for features that affect processes running as root. Therefore,
70779+ it is critical when using this option that the grsec_lock entry be
70780+ enabled after boot. Only distros with prebuilt kernel packages
70781+ with this option enabled that can ensure grsec_lock is enabled
70782+ after boot should use this option.
70783+ *Failure to set grsec_lock after boot makes all grsec features
70784+ this option covers useless*
70785+
70786+ Currently this option creates the following sysctl entries:
70787+ "Disable Privileged I/O": "disable_priv_io"
70788+
70789+config GRKERNSEC_SYSCTL_ON
70790+ bool "Turn on features by default"
70791+ default y if GRKERNSEC_CONFIG_AUTO
70792+ depends on GRKERNSEC_SYSCTL
70793+ help
70794+ If you say Y here, instead of having all features enabled in the
70795+ kernel configuration disabled at boot time, the features will be
70796+ enabled at boot time. It is recommended you say Y here unless
70797+ there is some reason you would want all sysctl-tunable features to
70798+ be disabled by default. As mentioned elsewhere, it is important
70799+ to enable the grsec_lock entry once you have finished modifying
70800+ the sysctl entries.
70801+
70802+endmenu
70803+menu "Logging Options"
70804+depends on GRKERNSEC
70805+
70806+config GRKERNSEC_FLOODTIME
70807+ int "Seconds in between log messages (minimum)"
70808+ default 10
70809+ help
70810+ This option allows you to enforce the number of seconds between
70811+ grsecurity log messages. The default should be suitable for most
70812+ people, however, if you choose to change it, choose a value small enough
70813+ to allow informative logs to be produced, but large enough to
70814+ prevent flooding.
70815+
70816+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
70817+ any rate limiting on grsecurity log messages.
70818+
70819+config GRKERNSEC_FLOODBURST
70820+ int "Number of messages in a burst (maximum)"
70821+ default 6
70822+ help
70823+ This option allows you to choose the maximum number of messages allowed
70824+ within the flood time interval you chose in a separate option. The
70825+ default should be suitable for most people, however if you find that
70826+ many of your logs are being interpreted as flooding, you may want to
70827+ raise this value.
70828+
70829+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
70830+ any rate limiting on grsecurity log messages.
70831+
70832+endmenu
70833diff --git a/grsecurity/Makefile b/grsecurity/Makefile
70834new file mode 100644
70835index 0000000..30ababb
70836--- /dev/null
70837+++ b/grsecurity/Makefile
70838@@ -0,0 +1,54 @@
70839+# grsecurity – access control and security hardening for Linux
70840+# All code in this directory and various hooks located throughout the Linux kernel are
70841+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
70842+# http://www.grsecurity.net spender@grsecurity.net
70843+#
70844+# This program is free software; you can redistribute it and/or
70845+# modify it under the terms of the GNU General Public License version 2
70846+# as published by the Free Software Foundation.
70847+#
70848+# This program is distributed in the hope that it will be useful,
70849+# but WITHOUT ANY WARRANTY; without even the implied warranty of
70850+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
70851+# GNU General Public License for more details.
70852+#
70853+# You should have received a copy of the GNU General Public License
70854+# along with this program; if not, write to the Free Software
70855+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
70856+
70857+KBUILD_CFLAGS += -Werror
70858+
70859+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
70860+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
70861+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
70862+ grsec_usb.o grsec_ipc.o grsec_proc.o
70863+
70864+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
70865+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
70866+ gracl_learn.o grsec_log.o gracl_policy.o
70867+ifdef CONFIG_COMPAT
70868+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
70869+endif
70870+
70871+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
70872+
70873+ifdef CONFIG_NET
70874+obj-y += grsec_sock.o
70875+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
70876+endif
70877+
70878+ifndef CONFIG_GRKERNSEC
70879+obj-y += grsec_disabled.o
70880+endif
70881+
70882+ifdef CONFIG_GRKERNSEC_HIDESYM
70883+extra-y := grsec_hidesym.o
70884+$(obj)/grsec_hidesym.o:
70885+ @-chmod -f 500 /boot
70886+ @-chmod -f 500 /lib/modules
70887+ @-chmod -f 500 /lib64/modules
70888+ @-chmod -f 500 /lib32/modules
70889+ @-chmod -f 700 .
70890+ @-chmod -f 700 $(objtree)
70891+ @echo ' grsec: protected kernel image paths'
70892+endif
70893diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
70894new file mode 100644
70895index 0000000..58223f6
70896--- /dev/null
70897+++ b/grsecurity/gracl.c
70898@@ -0,0 +1,2702 @@
70899+#include <linux/kernel.h>
70900+#include <linux/module.h>
70901+#include <linux/sched.h>
70902+#include <linux/mm.h>
70903+#include <linux/file.h>
70904+#include <linux/fs.h>
70905+#include <linux/namei.h>
70906+#include <linux/mount.h>
70907+#include <linux/tty.h>
70908+#include <linux/proc_fs.h>
70909+#include <linux/lglock.h>
70910+#include <linux/slab.h>
70911+#include <linux/vmalloc.h>
70912+#include <linux/types.h>
70913+#include <linux/sysctl.h>
70914+#include <linux/netdevice.h>
70915+#include <linux/ptrace.h>
70916+#include <linux/gracl.h>
70917+#include <linux/gralloc.h>
70918+#include <linux/security.h>
70919+#include <linux/grinternal.h>
70920+#include <linux/pid_namespace.h>
70921+#include <linux/stop_machine.h>
70922+#include <linux/fdtable.h>
70923+#include <linux/percpu.h>
70924+#include <linux/lglock.h>
70925+#include <linux/hugetlb.h>
70926+#include <linux/posix-timers.h>
70927+#include <linux/prefetch.h>
70928+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
70929+#include <linux/magic.h>
70930+#include <linux/pagemap.h>
70931+#include "../fs/btrfs/async-thread.h"
70932+#include "../fs/btrfs/ctree.h"
70933+#include "../fs/btrfs/btrfs_inode.h"
70934+#endif
70935+#include "../fs/mount.h"
70936+
70937+#include <asm/uaccess.h>
70938+#include <asm/errno.h>
70939+#include <asm/mman.h>
70940+
70941+#define FOR_EACH_ROLE_START(role) \
70942+ role = running_polstate.role_list; \
70943+ while (role) {
70944+
70945+#define FOR_EACH_ROLE_END(role) \
70946+ role = role->prev; \
70947+ }
70948+
70949+extern struct path gr_real_root;
70950+
70951+static struct gr_policy_state running_polstate;
70952+struct gr_policy_state *polstate = &running_polstate;
70953+extern struct gr_alloc_state *current_alloc_state;
70954+
70955+extern char *gr_shared_page[4];
70956+DEFINE_RWLOCK(gr_inode_lock);
70957+
70958+static unsigned int gr_status __read_only = GR_STATUS_INIT;
70959+
70960+#ifdef CONFIG_NET
70961+extern struct vfsmount *sock_mnt;
70962+#endif
70963+
70964+extern struct vfsmount *pipe_mnt;
70965+extern struct vfsmount *shm_mnt;
70966+
70967+#ifdef CONFIG_HUGETLBFS
70968+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
70969+#endif
70970+
70971+extern u16 acl_sp_role_value;
70972+extern struct acl_object_label *fakefs_obj_rw;
70973+extern struct acl_object_label *fakefs_obj_rwx;
70974+
70975+int gr_acl_is_enabled(void)
70976+{
70977+ return (gr_status & GR_READY);
70978+}
70979+
70980+void gr_enable_rbac_system(void)
70981+{
70982+ pax_open_kernel();
70983+ gr_status |= GR_READY;
70984+ pax_close_kernel();
70985+}
70986+
70987+int gr_rbac_disable(void *unused)
70988+{
70989+ pax_open_kernel();
70990+ gr_status &= ~GR_READY;
70991+ pax_close_kernel();
70992+
70993+ return 0;
70994+}
70995+
70996+static inline dev_t __get_dev(const struct dentry *dentry)
70997+{
70998+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
70999+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
71000+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
71001+ else
71002+#endif
71003+ return dentry->d_sb->s_dev;
71004+}
71005+
71006+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
71007+{
71008+ return __get_dev(dentry);
71009+}
71010+
71011+static char gr_task_roletype_to_char(struct task_struct *task)
71012+{
71013+ switch (task->role->roletype &
71014+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
71015+ GR_ROLE_SPECIAL)) {
71016+ case GR_ROLE_DEFAULT:
71017+ return 'D';
71018+ case GR_ROLE_USER:
71019+ return 'U';
71020+ case GR_ROLE_GROUP:
71021+ return 'G';
71022+ case GR_ROLE_SPECIAL:
71023+ return 'S';
71024+ }
71025+
71026+ return 'X';
71027+}
71028+
71029+char gr_roletype_to_char(void)
71030+{
71031+ return gr_task_roletype_to_char(current);
71032+}
71033+
71034+__inline__ int
71035+gr_acl_tpe_check(void)
71036+{
71037+ if (unlikely(!(gr_status & GR_READY)))
71038+ return 0;
71039+ if (current->role->roletype & GR_ROLE_TPE)
71040+ return 1;
71041+ else
71042+ return 0;
71043+}
71044+
71045+int
71046+gr_handle_rawio(const struct inode *inode)
71047+{
71048+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
71049+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
71050+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
71051+ !capable(CAP_SYS_RAWIO))
71052+ return 1;
71053+#endif
71054+ return 0;
71055+}
71056+
71057+int
71058+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
71059+{
71060+ if (likely(lena != lenb))
71061+ return 0;
71062+
71063+ return !memcmp(a, b, lena);
71064+}
71065+
71066+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
71067+{
71068+ *buflen -= namelen;
71069+ if (*buflen < 0)
71070+ return -ENAMETOOLONG;
71071+ *buffer -= namelen;
71072+ memcpy(*buffer, str, namelen);
71073+ return 0;
71074+}
71075+
71076+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
71077+{
71078+ return prepend(buffer, buflen, name->name, name->len);
71079+}
71080+
71081+static int prepend_path(const struct path *path, struct path *root,
71082+ char **buffer, int *buflen)
71083+{
71084+ struct dentry *dentry = path->dentry;
71085+ struct vfsmount *vfsmnt = path->mnt;
71086+ struct mount *mnt = real_mount(vfsmnt);
71087+ bool slash = false;
71088+ int error = 0;
71089+
71090+ while (dentry != root->dentry || vfsmnt != root->mnt) {
71091+ struct dentry * parent;
71092+
71093+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
71094+ /* Global root? */
71095+ if (!mnt_has_parent(mnt)) {
71096+ goto out;
71097+ }
71098+ dentry = mnt->mnt_mountpoint;
71099+ mnt = mnt->mnt_parent;
71100+ vfsmnt = &mnt->mnt;
71101+ continue;
71102+ }
71103+ parent = dentry->d_parent;
71104+ prefetch(parent);
71105+ spin_lock(&dentry->d_lock);
71106+ error = prepend_name(buffer, buflen, &dentry->d_name);
71107+ spin_unlock(&dentry->d_lock);
71108+ if (!error)
71109+ error = prepend(buffer, buflen, "/", 1);
71110+ if (error)
71111+ break;
71112+
71113+ slash = true;
71114+ dentry = parent;
71115+ }
71116+
71117+out:
71118+ if (!error && !slash)
71119+ error = prepend(buffer, buflen, "/", 1);
71120+
71121+ return error;
71122+}
71123+
71124+/* this must be called with mount_lock and rename_lock held */
71125+
71126+static char *__our_d_path(const struct path *path, struct path *root,
71127+ char *buf, int buflen)
71128+{
71129+ char *res = buf + buflen;
71130+ int error;
71131+
71132+ prepend(&res, &buflen, "\0", 1);
71133+ error = prepend_path(path, root, &res, &buflen);
71134+ if (error)
71135+ return ERR_PTR(error);
71136+
71137+ return res;
71138+}
71139+
71140+static char *
71141+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
71142+{
71143+ char *retval;
71144+
71145+ retval = __our_d_path(path, root, buf, buflen);
71146+ if (unlikely(IS_ERR(retval)))
71147+ retval = strcpy(buf, "<path too long>");
71148+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
71149+ retval[1] = '\0';
71150+
71151+ return retval;
71152+}
71153+
71154+static char *
71155+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
71156+ char *buf, int buflen)
71157+{
71158+ struct path path;
71159+ char *res;
71160+
71161+ path.dentry = (struct dentry *)dentry;
71162+ path.mnt = (struct vfsmount *)vfsmnt;
71163+
71164+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
71165+ by the RBAC system */
71166+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
71167+
71168+ return res;
71169+}
71170+
71171+static char *
71172+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
71173+ char *buf, int buflen)
71174+{
71175+ char *res;
71176+ struct path path;
71177+ struct path root;
71178+ struct task_struct *reaper = init_pid_ns.child_reaper;
71179+
71180+ path.dentry = (struct dentry *)dentry;
71181+ path.mnt = (struct vfsmount *)vfsmnt;
71182+
71183+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
71184+ get_fs_root(reaper->fs, &root);
71185+
71186+ read_seqlock_excl(&mount_lock);
71187+ write_seqlock(&rename_lock);
71188+ res = gen_full_path(&path, &root, buf, buflen);
71189+ write_sequnlock(&rename_lock);
71190+ read_sequnlock_excl(&mount_lock);
71191+
71192+ path_put(&root);
71193+ return res;
71194+}
71195+
71196+char *
71197+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
71198+{
71199+ char *ret;
71200+ read_seqlock_excl(&mount_lock);
71201+ write_seqlock(&rename_lock);
71202+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
71203+ PAGE_SIZE);
71204+ write_sequnlock(&rename_lock);
71205+ read_sequnlock_excl(&mount_lock);
71206+ return ret;
71207+}
71208+
71209+static char *
71210+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
71211+{
71212+ char *ret;
71213+ char *buf;
71214+ int buflen;
71215+
71216+ read_seqlock_excl(&mount_lock);
71217+ write_seqlock(&rename_lock);
71218+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
71219+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
71220+ buflen = (int)(ret - buf);
71221+ if (buflen >= 5)
71222+ prepend(&ret, &buflen, "/proc", 5);
71223+ else
71224+ ret = strcpy(buf, "<path too long>");
71225+ write_sequnlock(&rename_lock);
71226+ read_sequnlock_excl(&mount_lock);
71227+ return ret;
71228+}
71229+
71230+char *
71231+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
71232+{
71233+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
71234+ PAGE_SIZE);
71235+}
71236+
71237+char *
71238+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
71239+{
71240+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
71241+ PAGE_SIZE);
71242+}
71243+
71244+char *
71245+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
71246+{
71247+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
71248+ PAGE_SIZE);
71249+}
71250+
71251+char *
71252+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
71253+{
71254+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
71255+ PAGE_SIZE);
71256+}
71257+
71258+char *
71259+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
71260+{
71261+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
71262+ PAGE_SIZE);
71263+}
71264+
71265+__inline__ __u32
71266+to_gr_audit(const __u32 reqmode)
71267+{
71268+ /* masks off auditable permission flags, then shifts them to create
71269+ auditing flags, and adds the special case of append auditing if
71270+ we're requesting write */
71271+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
71272+}
71273+
71274+struct acl_role_label *
71275+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
71276+ const gid_t gid)
71277+{
71278+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
71279+ struct acl_role_label *match;
71280+ struct role_allowed_ip *ipp;
71281+ unsigned int x;
71282+ u32 curr_ip = task->signal->saved_ip;
71283+
71284+ match = state->acl_role_set.r_hash[index];
71285+
71286+ while (match) {
71287+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
71288+ for (x = 0; x < match->domain_child_num; x++) {
71289+ if (match->domain_children[x] == uid)
71290+ goto found;
71291+ }
71292+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
71293+ break;
71294+ match = match->next;
71295+ }
71296+found:
71297+ if (match == NULL) {
71298+ try_group:
71299+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
71300+ match = state->acl_role_set.r_hash[index];
71301+
71302+ while (match) {
71303+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
71304+ for (x = 0; x < match->domain_child_num; x++) {
71305+ if (match->domain_children[x] == gid)
71306+ goto found2;
71307+ }
71308+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
71309+ break;
71310+ match = match->next;
71311+ }
71312+found2:
71313+ if (match == NULL)
71314+ match = state->default_role;
71315+ if (match->allowed_ips == NULL)
71316+ return match;
71317+ else {
71318+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
71319+ if (likely
71320+ ((ntohl(curr_ip) & ipp->netmask) ==
71321+ (ntohl(ipp->addr) & ipp->netmask)))
71322+ return match;
71323+ }
71324+ match = state->default_role;
71325+ }
71326+ } else if (match->allowed_ips == NULL) {
71327+ return match;
71328+ } else {
71329+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
71330+ if (likely
71331+ ((ntohl(curr_ip) & ipp->netmask) ==
71332+ (ntohl(ipp->addr) & ipp->netmask)))
71333+ return match;
71334+ }
71335+ goto try_group;
71336+ }
71337+
71338+ return match;
71339+}
71340+
71341+static struct acl_role_label *
71342+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
71343+ const gid_t gid)
71344+{
71345+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
71346+}
71347+
71348+struct acl_subject_label *
71349+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
71350+ const struct acl_role_label *role)
71351+{
71352+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
71353+ struct acl_subject_label *match;
71354+
71355+ match = role->subj_hash[index];
71356+
71357+ while (match && (match->inode != ino || match->device != dev ||
71358+ (match->mode & GR_DELETED))) {
71359+ match = match->next;
71360+ }
71361+
71362+ if (match && !(match->mode & GR_DELETED))
71363+ return match;
71364+ else
71365+ return NULL;
71366+}
71367+
71368+struct acl_subject_label *
71369+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
71370+ const struct acl_role_label *role)
71371+{
71372+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
71373+ struct acl_subject_label *match;
71374+
71375+ match = role->subj_hash[index];
71376+
71377+ while (match && (match->inode != ino || match->device != dev ||
71378+ !(match->mode & GR_DELETED))) {
71379+ match = match->next;
71380+ }
71381+
71382+ if (match && (match->mode & GR_DELETED))
71383+ return match;
71384+ else
71385+ return NULL;
71386+}
71387+
71388+static struct acl_object_label *
71389+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
71390+ const struct acl_subject_label *subj)
71391+{
71392+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
71393+ struct acl_object_label *match;
71394+
71395+ match = subj->obj_hash[index];
71396+
71397+ while (match && (match->inode != ino || match->device != dev ||
71398+ (match->mode & GR_DELETED))) {
71399+ match = match->next;
71400+ }
71401+
71402+ if (match && !(match->mode & GR_DELETED))
71403+ return match;
71404+ else
71405+ return NULL;
71406+}
71407+
71408+static struct acl_object_label *
71409+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
71410+ const struct acl_subject_label *subj)
71411+{
71412+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
71413+ struct acl_object_label *match;
71414+
71415+ match = subj->obj_hash[index];
71416+
71417+ while (match && (match->inode != ino || match->device != dev ||
71418+ !(match->mode & GR_DELETED))) {
71419+ match = match->next;
71420+ }
71421+
71422+ if (match && (match->mode & GR_DELETED))
71423+ return match;
71424+
71425+ match = subj->obj_hash[index];
71426+
71427+ while (match && (match->inode != ino || match->device != dev ||
71428+ (match->mode & GR_DELETED))) {
71429+ match = match->next;
71430+ }
71431+
71432+ if (match && !(match->mode & GR_DELETED))
71433+ return match;
71434+ else
71435+ return NULL;
71436+}
71437+
71438+struct name_entry *
71439+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
71440+{
71441+ unsigned int len = strlen(name);
71442+ unsigned int key = full_name_hash(name, len);
71443+ unsigned int index = key % state->name_set.n_size;
71444+ struct name_entry *match;
71445+
71446+ match = state->name_set.n_hash[index];
71447+
71448+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
71449+ match = match->next;
71450+
71451+ return match;
71452+}
71453+
71454+static struct name_entry *
71455+lookup_name_entry(const char *name)
71456+{
71457+ return __lookup_name_entry(&running_polstate, name);
71458+}
71459+
71460+static struct name_entry *
71461+lookup_name_entry_create(const char *name)
71462+{
71463+ unsigned int len = strlen(name);
71464+ unsigned int key = full_name_hash(name, len);
71465+ unsigned int index = key % running_polstate.name_set.n_size;
71466+ struct name_entry *match;
71467+
71468+ match = running_polstate.name_set.n_hash[index];
71469+
71470+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
71471+ !match->deleted))
71472+ match = match->next;
71473+
71474+ if (match && match->deleted)
71475+ return match;
71476+
71477+ match = running_polstate.name_set.n_hash[index];
71478+
71479+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
71480+ match->deleted))
71481+ match = match->next;
71482+
71483+ if (match && !match->deleted)
71484+ return match;
71485+ else
71486+ return NULL;
71487+}
71488+
71489+static struct inodev_entry *
71490+lookup_inodev_entry(const ino_t ino, const dev_t dev)
71491+{
71492+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
71493+ struct inodev_entry *match;
71494+
71495+ match = running_polstate.inodev_set.i_hash[index];
71496+
71497+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
71498+ match = match->next;
71499+
71500+ return match;
71501+}
71502+
71503+void
71504+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
71505+{
71506+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
71507+ state->inodev_set.i_size);
71508+ struct inodev_entry **curr;
71509+
71510+ entry->prev = NULL;
71511+
71512+ curr = &state->inodev_set.i_hash[index];
71513+ if (*curr != NULL)
71514+ (*curr)->prev = entry;
71515+
71516+ entry->next = *curr;
71517+ *curr = entry;
71518+
71519+ return;
71520+}
71521+
71522+static void
71523+insert_inodev_entry(struct inodev_entry *entry)
71524+{
71525+ __insert_inodev_entry(&running_polstate, entry);
71526+}
71527+
71528+void
71529+insert_acl_obj_label(struct acl_object_label *obj,
71530+ struct acl_subject_label *subj)
71531+{
71532+ unsigned int index =
71533+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
71534+ struct acl_object_label **curr;
71535+
71536+ obj->prev = NULL;
71537+
71538+ curr = &subj->obj_hash[index];
71539+ if (*curr != NULL)
71540+ (*curr)->prev = obj;
71541+
71542+ obj->next = *curr;
71543+ *curr = obj;
71544+
71545+ return;
71546+}
71547+
71548+void
71549+insert_acl_subj_label(struct acl_subject_label *obj,
71550+ struct acl_role_label *role)
71551+{
71552+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
71553+ struct acl_subject_label **curr;
71554+
71555+ obj->prev = NULL;
71556+
71557+ curr = &role->subj_hash[index];
71558+ if (*curr != NULL)
71559+ (*curr)->prev = obj;
71560+
71561+ obj->next = *curr;
71562+ *curr = obj;
71563+
71564+ return;
71565+}
71566+
71567+/* derived from glibc fnmatch() 0: match, 1: no match*/
71568+
71569+static int
71570+glob_match(const char *p, const char *n)
71571+{
71572+ char c;
71573+
71574+ while ((c = *p++) != '\0') {
71575+ switch (c) {
71576+ case '?':
71577+ if (*n == '\0')
71578+ return 1;
71579+ else if (*n == '/')
71580+ return 1;
71581+ break;
71582+ case '\\':
71583+ if (*n != c)
71584+ return 1;
71585+ break;
71586+ case '*':
71587+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
71588+ if (*n == '/')
71589+ return 1;
71590+ else if (c == '?') {
71591+ if (*n == '\0')
71592+ return 1;
71593+ else
71594+ ++n;
71595+ }
71596+ }
71597+ if (c == '\0') {
71598+ return 0;
71599+ } else {
71600+ const char *endp;
71601+
71602+ if ((endp = strchr(n, '/')) == NULL)
71603+ endp = n + strlen(n);
71604+
71605+ if (c == '[') {
71606+ for (--p; n < endp; ++n)
71607+ if (!glob_match(p, n))
71608+ return 0;
71609+ } else if (c == '/') {
71610+ while (*n != '\0' && *n != '/')
71611+ ++n;
71612+ if (*n == '/' && !glob_match(p, n + 1))
71613+ return 0;
71614+ } else {
71615+ for (--p; n < endp; ++n)
71616+ if (*n == c && !glob_match(p, n))
71617+ return 0;
71618+ }
71619+
71620+ return 1;
71621+ }
71622+ case '[':
71623+ {
71624+ int not;
71625+ char cold;
71626+
71627+ if (*n == '\0' || *n == '/')
71628+ return 1;
71629+
71630+ not = (*p == '!' || *p == '^');
71631+ if (not)
71632+ ++p;
71633+
71634+ c = *p++;
71635+ for (;;) {
71636+ unsigned char fn = (unsigned char)*n;
71637+
71638+ if (c == '\0')
71639+ return 1;
71640+ else {
71641+ if (c == fn)
71642+ goto matched;
71643+ cold = c;
71644+ c = *p++;
71645+
71646+ if (c == '-' && *p != ']') {
71647+ unsigned char cend = *p++;
71648+
71649+ if (cend == '\0')
71650+ return 1;
71651+
71652+ if (cold <= fn && fn <= cend)
71653+ goto matched;
71654+
71655+ c = *p++;
71656+ }
71657+ }
71658+
71659+ if (c == ']')
71660+ break;
71661+ }
71662+ if (!not)
71663+ return 1;
71664+ break;
71665+ matched:
71666+ while (c != ']') {
71667+ if (c == '\0')
71668+ return 1;
71669+
71670+ c = *p++;
71671+ }
71672+ if (not)
71673+ return 1;
71674+ }
71675+ break;
71676+ default:
71677+ if (c != *n)
71678+ return 1;
71679+ }
71680+
71681+ ++n;
71682+ }
71683+
71684+ if (*n == '\0')
71685+ return 0;
71686+
71687+ if (*n == '/')
71688+ return 0;
71689+
71690+ return 1;
71691+}
71692+
71693+static struct acl_object_label *
71694+chk_glob_label(struct acl_object_label *globbed,
71695+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
71696+{
71697+ struct acl_object_label *tmp;
71698+
71699+ if (*path == NULL)
71700+ *path = gr_to_filename_nolock(dentry, mnt);
71701+
71702+ tmp = globbed;
71703+
71704+ while (tmp) {
71705+ if (!glob_match(tmp->filename, *path))
71706+ return tmp;
71707+ tmp = tmp->next;
71708+ }
71709+
71710+ return NULL;
71711+}
71712+
71713+static struct acl_object_label *
71714+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
71715+ const ino_t curr_ino, const dev_t curr_dev,
71716+ const struct acl_subject_label *subj, char **path, const int checkglob)
71717+{
71718+ struct acl_subject_label *tmpsubj;
71719+ struct acl_object_label *retval;
71720+ struct acl_object_label *retval2;
71721+
71722+ tmpsubj = (struct acl_subject_label *) subj;
71723+ read_lock(&gr_inode_lock);
71724+ do {
71725+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
71726+ if (retval) {
71727+ if (checkglob && retval->globbed) {
71728+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
71729+ if (retval2)
71730+ retval = retval2;
71731+ }
71732+ break;
71733+ }
71734+ } while ((tmpsubj = tmpsubj->parent_subject));
71735+ read_unlock(&gr_inode_lock);
71736+
71737+ return retval;
71738+}
71739+
71740+static __inline__ struct acl_object_label *
71741+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
71742+ struct dentry *curr_dentry,
71743+ const struct acl_subject_label *subj, char **path, const int checkglob)
71744+{
71745+ int newglob = checkglob;
71746+ ino_t inode;
71747+ dev_t device;
71748+
71749+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
71750+ as we don't want a / * rule to match instead of the / object
71751+ don't do this for create lookups that call this function though, since they're looking up
71752+ on the parent and thus need globbing checks on all paths
71753+ */
71754+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
71755+ newglob = GR_NO_GLOB;
71756+
71757+ spin_lock(&curr_dentry->d_lock);
71758+ inode = curr_dentry->d_inode->i_ino;
71759+ device = __get_dev(curr_dentry);
71760+ spin_unlock(&curr_dentry->d_lock);
71761+
71762+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
71763+}
71764+
71765+#ifdef CONFIG_HUGETLBFS
71766+static inline bool
71767+is_hugetlbfs_mnt(const struct vfsmount *mnt)
71768+{
71769+ int i;
71770+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
71771+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
71772+ return true;
71773+ }
71774+
71775+ return false;
71776+}
71777+#endif
71778+
71779+static struct acl_object_label *
71780+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71781+ const struct acl_subject_label *subj, char *path, const int checkglob)
71782+{
71783+ struct dentry *dentry = (struct dentry *) l_dentry;
71784+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
71785+ struct mount *real_mnt = real_mount(mnt);
71786+ struct acl_object_label *retval;
71787+ struct dentry *parent;
71788+
71789+ read_seqlock_excl(&mount_lock);
71790+ write_seqlock(&rename_lock);
71791+
71792+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
71793+#ifdef CONFIG_NET
71794+ mnt == sock_mnt ||
71795+#endif
71796+#ifdef CONFIG_HUGETLBFS
71797+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
71798+#endif
71799+ /* ignore Eric Biederman */
71800+ IS_PRIVATE(l_dentry->d_inode))) {
71801+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
71802+ goto out;
71803+ }
71804+
71805+ for (;;) {
71806+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
71807+ break;
71808+
71809+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
71810+ if (!mnt_has_parent(real_mnt))
71811+ break;
71812+
71813+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
71814+ if (retval != NULL)
71815+ goto out;
71816+
71817+ dentry = real_mnt->mnt_mountpoint;
71818+ real_mnt = real_mnt->mnt_parent;
71819+ mnt = &real_mnt->mnt;
71820+ continue;
71821+ }
71822+
71823+ parent = dentry->d_parent;
71824+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
71825+ if (retval != NULL)
71826+ goto out;
71827+
71828+ dentry = parent;
71829+ }
71830+
71831+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
71832+
71833+ /* gr_real_root is pinned so we don't have to hold a reference */
71834+ if (retval == NULL)
71835+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
71836+out:
71837+ write_sequnlock(&rename_lock);
71838+ read_sequnlock_excl(&mount_lock);
71839+
71840+ BUG_ON(retval == NULL);
71841+
71842+ return retval;
71843+}
71844+
71845+static __inline__ struct acl_object_label *
71846+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71847+ const struct acl_subject_label *subj)
71848+{
71849+ char *path = NULL;
71850+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
71851+}
71852+
71853+static __inline__ struct acl_object_label *
71854+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71855+ const struct acl_subject_label *subj)
71856+{
71857+ char *path = NULL;
71858+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
71859+}
71860+
71861+static __inline__ struct acl_object_label *
71862+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71863+ const struct acl_subject_label *subj, char *path)
71864+{
71865+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
71866+}
71867+
71868+struct acl_subject_label *
71869+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71870+ const struct acl_role_label *role)
71871+{
71872+ struct dentry *dentry = (struct dentry *) l_dentry;
71873+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
71874+ struct mount *real_mnt = real_mount(mnt);
71875+ struct acl_subject_label *retval;
71876+ struct dentry *parent;
71877+
71878+ read_seqlock_excl(&mount_lock);
71879+ write_seqlock(&rename_lock);
71880+
71881+ for (;;) {
71882+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
71883+ break;
71884+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
71885+ if (!mnt_has_parent(real_mnt))
71886+ break;
71887+
71888+ spin_lock(&dentry->d_lock);
71889+ read_lock(&gr_inode_lock);
71890+ retval =
71891+ lookup_acl_subj_label(dentry->d_inode->i_ino,
71892+ __get_dev(dentry), role);
71893+ read_unlock(&gr_inode_lock);
71894+ spin_unlock(&dentry->d_lock);
71895+ if (retval != NULL)
71896+ goto out;
71897+
71898+ dentry = real_mnt->mnt_mountpoint;
71899+ real_mnt = real_mnt->mnt_parent;
71900+ mnt = &real_mnt->mnt;
71901+ continue;
71902+ }
71903+
71904+ spin_lock(&dentry->d_lock);
71905+ read_lock(&gr_inode_lock);
71906+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
71907+ __get_dev(dentry), role);
71908+ read_unlock(&gr_inode_lock);
71909+ parent = dentry->d_parent;
71910+ spin_unlock(&dentry->d_lock);
71911+
71912+ if (retval != NULL)
71913+ goto out;
71914+
71915+ dentry = parent;
71916+ }
71917+
71918+ spin_lock(&dentry->d_lock);
71919+ read_lock(&gr_inode_lock);
71920+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
71921+ __get_dev(dentry), role);
71922+ read_unlock(&gr_inode_lock);
71923+ spin_unlock(&dentry->d_lock);
71924+
71925+ if (unlikely(retval == NULL)) {
71926+ /* gr_real_root is pinned, we don't need to hold a reference */
71927+ read_lock(&gr_inode_lock);
71928+ retval = lookup_acl_subj_label(gr_real_root.dentry->d_inode->i_ino,
71929+ __get_dev(gr_real_root.dentry), role);
71930+ read_unlock(&gr_inode_lock);
71931+ }
71932+out:
71933+ write_sequnlock(&rename_lock);
71934+ read_sequnlock_excl(&mount_lock);
71935+
71936+ BUG_ON(retval == NULL);
71937+
71938+ return retval;
71939+}
71940+
71941+void
71942+assign_special_role(const char *rolename)
71943+{
71944+ struct acl_object_label *obj;
71945+ struct acl_role_label *r;
71946+ struct acl_role_label *assigned = NULL;
71947+ struct task_struct *tsk;
71948+ struct file *filp;
71949+
71950+ FOR_EACH_ROLE_START(r)
71951+ if (!strcmp(rolename, r->rolename) &&
71952+ (r->roletype & GR_ROLE_SPECIAL)) {
71953+ assigned = r;
71954+ break;
71955+ }
71956+ FOR_EACH_ROLE_END(r)
71957+
71958+ if (!assigned)
71959+ return;
71960+
71961+ read_lock(&tasklist_lock);
71962+ read_lock(&grsec_exec_file_lock);
71963+
71964+ tsk = current->real_parent;
71965+ if (tsk == NULL)
71966+ goto out_unlock;
71967+
71968+ filp = tsk->exec_file;
71969+ if (filp == NULL)
71970+ goto out_unlock;
71971+
71972+ tsk->is_writable = 0;
71973+ tsk->inherited = 0;
71974+
71975+ tsk->acl_sp_role = 1;
71976+ tsk->acl_role_id = ++acl_sp_role_value;
71977+ tsk->role = assigned;
71978+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
71979+
71980+ /* ignore additional mmap checks for processes that are writable
71981+ by the default ACL */
71982+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71983+ if (unlikely(obj->mode & GR_WRITE))
71984+ tsk->is_writable = 1;
71985+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
71986+ if (unlikely(obj->mode & GR_WRITE))
71987+ tsk->is_writable = 1;
71988+
71989+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71990+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
71991+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
71992+#endif
71993+
71994+out_unlock:
71995+ read_unlock(&grsec_exec_file_lock);
71996+ read_unlock(&tasklist_lock);
71997+ return;
71998+}
71999+
72000+
72001+static void
72002+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
72003+{
72004+ struct task_struct *task = current;
72005+ const struct cred *cred = current_cred();
72006+
72007+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
72008+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
72009+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
72010+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
72011+
72012+ return;
72013+}
72014+
72015+static void
72016+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
72017+{
72018+ struct task_struct *task = current;
72019+ const struct cred *cred = current_cred();
72020+
72021+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
72022+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
72023+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
72024+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
72025+
72026+ return;
72027+}
72028+
72029+static void
72030+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
72031+{
72032+ struct task_struct *task = current;
72033+ const struct cred *cred = current_cred();
72034+
72035+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
72036+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
72037+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
72038+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
72039+
72040+ return;
72041+}
72042+
72043+static void
72044+gr_set_proc_res(struct task_struct *task)
72045+{
72046+ struct acl_subject_label *proc;
72047+ unsigned short i;
72048+
72049+ proc = task->acl;
72050+
72051+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
72052+ return;
72053+
72054+ for (i = 0; i < RLIM_NLIMITS; i++) {
72055+ if (!(proc->resmask & (1U << i)))
72056+ continue;
72057+
72058+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
72059+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
72060+
72061+ if (i == RLIMIT_CPU)
72062+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
72063+ }
72064+
72065+ return;
72066+}
72067+
72068+/* both of the below must be called with
72069+ rcu_read_lock();
72070+ read_lock(&tasklist_lock);
72071+ read_lock(&grsec_exec_file_lock);
72072+*/
72073+
72074+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename)
72075+{
72076+ char *tmpname;
72077+ struct acl_subject_label *tmpsubj;
72078+ struct file *filp;
72079+ struct name_entry *nmatch;
72080+
72081+ filp = task->exec_file;
72082+ if (filp == NULL)
72083+ return NULL;
72084+
72085+ /* the following is to apply the correct subject
72086+ on binaries running when the RBAC system
72087+ is enabled, when the binaries have been
72088+ replaced or deleted since their execution
72089+ -----
72090+ when the RBAC system starts, the inode/dev
72091+ from exec_file will be one the RBAC system
72092+ is unaware of. It only knows the inode/dev
72093+ of the present file on disk, or the absence
72094+ of it.
72095+ */
72096+
72097+ if (filename)
72098+ nmatch = __lookup_name_entry(state, filename);
72099+ else {
72100+ preempt_disable();
72101+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
72102+
72103+ nmatch = __lookup_name_entry(state, tmpname);
72104+ preempt_enable();
72105+ }
72106+ tmpsubj = NULL;
72107+ if (nmatch) {
72108+ if (nmatch->deleted)
72109+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
72110+ else
72111+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
72112+ }
72113+ /* this also works for the reload case -- if we don't match a potentially inherited subject
72114+ then we fall back to a normal lookup based on the binary's ino/dev
72115+ */
72116+ if (tmpsubj == NULL)
72117+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
72118+
72119+ return tmpsubj;
72120+}
72121+
72122+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename)
72123+{
72124+ return __gr_get_subject_for_task(&running_polstate, task, filename);
72125+}
72126+
72127+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
72128+{
72129+ struct acl_object_label *obj;
72130+ struct file *filp;
72131+
72132+ filp = task->exec_file;
72133+
72134+ task->acl = subj;
72135+ task->is_writable = 0;
72136+ /* ignore additional mmap checks for processes that are writable
72137+ by the default ACL */
72138+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
72139+ if (unlikely(obj->mode & GR_WRITE))
72140+ task->is_writable = 1;
72141+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
72142+ if (unlikely(obj->mode & GR_WRITE))
72143+ task->is_writable = 1;
72144+
72145+ gr_set_proc_res(task);
72146+
72147+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
72148+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
72149+#endif
72150+}
72151+
72152+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
72153+{
72154+ __gr_apply_subject_to_task(&running_polstate, task, subj);
72155+}
72156+
72157+__u32
72158+gr_search_file(const struct dentry * dentry, const __u32 mode,
72159+ const struct vfsmount * mnt)
72160+{
72161+ __u32 retval = mode;
72162+ struct acl_subject_label *curracl;
72163+ struct acl_object_label *currobj;
72164+
72165+ if (unlikely(!(gr_status & GR_READY)))
72166+ return (mode & ~GR_AUDITS);
72167+
72168+ curracl = current->acl;
72169+
72170+ currobj = chk_obj_label(dentry, mnt, curracl);
72171+ retval = currobj->mode & mode;
72172+
72173+ /* if we're opening a specified transfer file for writing
72174+ (e.g. /dev/initctl), then transfer our role to init
72175+ */
72176+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
72177+ current->role->roletype & GR_ROLE_PERSIST)) {
72178+ struct task_struct *task = init_pid_ns.child_reaper;
72179+
72180+ if (task->role != current->role) {
72181+ struct acl_subject_label *subj;
72182+
72183+ task->acl_sp_role = 0;
72184+ task->acl_role_id = current->acl_role_id;
72185+ task->role = current->role;
72186+ rcu_read_lock();
72187+ read_lock(&grsec_exec_file_lock);
72188+ subj = gr_get_subject_for_task(task, NULL);
72189+ gr_apply_subject_to_task(task, subj);
72190+ read_unlock(&grsec_exec_file_lock);
72191+ rcu_read_unlock();
72192+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
72193+ }
72194+ }
72195+
72196+ if (unlikely
72197+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
72198+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
72199+ __u32 new_mode = mode;
72200+
72201+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
72202+
72203+ retval = new_mode;
72204+
72205+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
72206+ new_mode |= GR_INHERIT;
72207+
72208+ if (!(mode & GR_NOLEARN))
72209+ gr_log_learn(dentry, mnt, new_mode);
72210+ }
72211+
72212+ return retval;
72213+}
72214+
72215+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
72216+ const struct dentry *parent,
72217+ const struct vfsmount *mnt)
72218+{
72219+ struct name_entry *match;
72220+ struct acl_object_label *matchpo;
72221+ struct acl_subject_label *curracl;
72222+ char *path;
72223+
72224+ if (unlikely(!(gr_status & GR_READY)))
72225+ return NULL;
72226+
72227+ preempt_disable();
72228+ path = gr_to_filename_rbac(new_dentry, mnt);
72229+ match = lookup_name_entry_create(path);
72230+
72231+ curracl = current->acl;
72232+
72233+ if (match) {
72234+ read_lock(&gr_inode_lock);
72235+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
72236+ read_unlock(&gr_inode_lock);
72237+
72238+ if (matchpo) {
72239+ preempt_enable();
72240+ return matchpo;
72241+ }
72242+ }
72243+
72244+ // lookup parent
72245+
72246+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
72247+
72248+ preempt_enable();
72249+ return matchpo;
72250+}
72251+
72252+__u32
72253+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
72254+ const struct vfsmount * mnt, const __u32 mode)
72255+{
72256+ struct acl_object_label *matchpo;
72257+ __u32 retval;
72258+
72259+ if (unlikely(!(gr_status & GR_READY)))
72260+ return (mode & ~GR_AUDITS);
72261+
72262+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
72263+
72264+ retval = matchpo->mode & mode;
72265+
72266+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
72267+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
72268+ __u32 new_mode = mode;
72269+
72270+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
72271+
72272+ gr_log_learn(new_dentry, mnt, new_mode);
72273+ return new_mode;
72274+ }
72275+
72276+ return retval;
72277+}
72278+
72279+__u32
72280+gr_check_link(const struct dentry * new_dentry,
72281+ const struct dentry * parent_dentry,
72282+ const struct vfsmount * parent_mnt,
72283+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
72284+{
72285+ struct acl_object_label *obj;
72286+ __u32 oldmode, newmode;
72287+ __u32 needmode;
72288+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
72289+ GR_DELETE | GR_INHERIT;
72290+
72291+ if (unlikely(!(gr_status & GR_READY)))
72292+ return (GR_CREATE | GR_LINK);
72293+
72294+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
72295+ oldmode = obj->mode;
72296+
72297+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
72298+ newmode = obj->mode;
72299+
72300+ needmode = newmode & checkmodes;
72301+
72302+ // old name for hardlink must have at least the permissions of the new name
72303+ if ((oldmode & needmode) != needmode)
72304+ goto bad;
72305+
72306+ // if old name had restrictions/auditing, make sure the new name does as well
72307+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
72308+
72309+ // don't allow hardlinking of suid/sgid/fcapped files without permission
72310+ if (is_privileged_binary(old_dentry))
72311+ needmode |= GR_SETID;
72312+
72313+ if ((newmode & needmode) != needmode)
72314+ goto bad;
72315+
72316+ // enforce minimum permissions
72317+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
72318+ return newmode;
72319+bad:
72320+ needmode = oldmode;
72321+ if (is_privileged_binary(old_dentry))
72322+ needmode |= GR_SETID;
72323+
72324+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
72325+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
72326+ return (GR_CREATE | GR_LINK);
72327+ } else if (newmode & GR_SUPPRESS)
72328+ return GR_SUPPRESS;
72329+ else
72330+ return 0;
72331+}
72332+
72333+int
72334+gr_check_hidden_task(const struct task_struct *task)
72335+{
72336+ if (unlikely(!(gr_status & GR_READY)))
72337+ return 0;
72338+
72339+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
72340+ return 1;
72341+
72342+ return 0;
72343+}
72344+
72345+int
72346+gr_check_protected_task(const struct task_struct *task)
72347+{
72348+ if (unlikely(!(gr_status & GR_READY) || !task))
72349+ return 0;
72350+
72351+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
72352+ task->acl != current->acl)
72353+ return 1;
72354+
72355+ return 0;
72356+}
72357+
72358+int
72359+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
72360+{
72361+ struct task_struct *p;
72362+ int ret = 0;
72363+
72364+ if (unlikely(!(gr_status & GR_READY) || !pid))
72365+ return ret;
72366+
72367+ read_lock(&tasklist_lock);
72368+ do_each_pid_task(pid, type, p) {
72369+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
72370+ p->acl != current->acl) {
72371+ ret = 1;
72372+ goto out;
72373+ }
72374+ } while_each_pid_task(pid, type, p);
72375+out:
72376+ read_unlock(&tasklist_lock);
72377+
72378+ return ret;
72379+}
72380+
72381+void
72382+gr_copy_label(struct task_struct *tsk)
72383+{
72384+ struct task_struct *p = current;
72385+
72386+ tsk->inherited = p->inherited;
72387+ tsk->acl_sp_role = 0;
72388+ tsk->acl_role_id = p->acl_role_id;
72389+ tsk->acl = p->acl;
72390+ tsk->role = p->role;
72391+ tsk->signal->used_accept = 0;
72392+ tsk->signal->curr_ip = p->signal->curr_ip;
72393+ tsk->signal->saved_ip = p->signal->saved_ip;
72394+ if (p->exec_file)
72395+ get_file(p->exec_file);
72396+ tsk->exec_file = p->exec_file;
72397+ tsk->is_writable = p->is_writable;
72398+ if (unlikely(p->signal->used_accept)) {
72399+ p->signal->curr_ip = 0;
72400+ p->signal->saved_ip = 0;
72401+ }
72402+
72403+ return;
72404+}
72405+
72406+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
72407+
72408+int
72409+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
72410+{
72411+ unsigned int i;
72412+ __u16 num;
72413+ uid_t *uidlist;
72414+ uid_t curuid;
72415+ int realok = 0;
72416+ int effectiveok = 0;
72417+ int fsok = 0;
72418+ uid_t globalreal, globaleffective, globalfs;
72419+
72420+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
72421+ struct user_struct *user;
72422+
72423+ if (!uid_valid(real))
72424+ goto skipit;
72425+
72426+ /* find user based on global namespace */
72427+
72428+ globalreal = GR_GLOBAL_UID(real);
72429+
72430+ user = find_user(make_kuid(&init_user_ns, globalreal));
72431+ if (user == NULL)
72432+ goto skipit;
72433+
72434+ if (gr_process_kernel_setuid_ban(user)) {
72435+ /* for find_user */
72436+ free_uid(user);
72437+ return 1;
72438+ }
72439+
72440+ /* for find_user */
72441+ free_uid(user);
72442+
72443+skipit:
72444+#endif
72445+
72446+ if (unlikely(!(gr_status & GR_READY)))
72447+ return 0;
72448+
72449+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
72450+ gr_log_learn_uid_change(real, effective, fs);
72451+
72452+ num = current->acl->user_trans_num;
72453+ uidlist = current->acl->user_transitions;
72454+
72455+ if (uidlist == NULL)
72456+ return 0;
72457+
72458+ if (!uid_valid(real)) {
72459+ realok = 1;
72460+ globalreal = (uid_t)-1;
72461+ } else {
72462+ globalreal = GR_GLOBAL_UID(real);
72463+ }
72464+ if (!uid_valid(effective)) {
72465+ effectiveok = 1;
72466+ globaleffective = (uid_t)-1;
72467+ } else {
72468+ globaleffective = GR_GLOBAL_UID(effective);
72469+ }
72470+ if (!uid_valid(fs)) {
72471+ fsok = 1;
72472+ globalfs = (uid_t)-1;
72473+ } else {
72474+ globalfs = GR_GLOBAL_UID(fs);
72475+ }
72476+
72477+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
72478+ for (i = 0; i < num; i++) {
72479+ curuid = uidlist[i];
72480+ if (globalreal == curuid)
72481+ realok = 1;
72482+ if (globaleffective == curuid)
72483+ effectiveok = 1;
72484+ if (globalfs == curuid)
72485+ fsok = 1;
72486+ }
72487+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
72488+ for (i = 0; i < num; i++) {
72489+ curuid = uidlist[i];
72490+ if (globalreal == curuid)
72491+ break;
72492+ if (globaleffective == curuid)
72493+ break;
72494+ if (globalfs == curuid)
72495+ break;
72496+ }
72497+ /* not in deny list */
72498+ if (i == num) {
72499+ realok = 1;
72500+ effectiveok = 1;
72501+ fsok = 1;
72502+ }
72503+ }
72504+
72505+ if (realok && effectiveok && fsok)
72506+ return 0;
72507+ else {
72508+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
72509+ return 1;
72510+ }
72511+}
72512+
72513+int
72514+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
72515+{
72516+ unsigned int i;
72517+ __u16 num;
72518+ gid_t *gidlist;
72519+ gid_t curgid;
72520+ int realok = 0;
72521+ int effectiveok = 0;
72522+ int fsok = 0;
72523+ gid_t globalreal, globaleffective, globalfs;
72524+
72525+ if (unlikely(!(gr_status & GR_READY)))
72526+ return 0;
72527+
72528+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
72529+ gr_log_learn_gid_change(real, effective, fs);
72530+
72531+ num = current->acl->group_trans_num;
72532+ gidlist = current->acl->group_transitions;
72533+
72534+ if (gidlist == NULL)
72535+ return 0;
72536+
72537+ if (!gid_valid(real)) {
72538+ realok = 1;
72539+ globalreal = (gid_t)-1;
72540+ } else {
72541+ globalreal = GR_GLOBAL_GID(real);
72542+ }
72543+ if (!gid_valid(effective)) {
72544+ effectiveok = 1;
72545+ globaleffective = (gid_t)-1;
72546+ } else {
72547+ globaleffective = GR_GLOBAL_GID(effective);
72548+ }
72549+ if (!gid_valid(fs)) {
72550+ fsok = 1;
72551+ globalfs = (gid_t)-1;
72552+ } else {
72553+ globalfs = GR_GLOBAL_GID(fs);
72554+ }
72555+
72556+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
72557+ for (i = 0; i < num; i++) {
72558+ curgid = gidlist[i];
72559+ if (globalreal == curgid)
72560+ realok = 1;
72561+ if (globaleffective == curgid)
72562+ effectiveok = 1;
72563+ if (globalfs == curgid)
72564+ fsok = 1;
72565+ }
72566+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
72567+ for (i = 0; i < num; i++) {
72568+ curgid = gidlist[i];
72569+ if (globalreal == curgid)
72570+ break;
72571+ if (globaleffective == curgid)
72572+ break;
72573+ if (globalfs == curgid)
72574+ break;
72575+ }
72576+ /* not in deny list */
72577+ if (i == num) {
72578+ realok = 1;
72579+ effectiveok = 1;
72580+ fsok = 1;
72581+ }
72582+ }
72583+
72584+ if (realok && effectiveok && fsok)
72585+ return 0;
72586+ else {
72587+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
72588+ return 1;
72589+ }
72590+}
72591+
72592+extern int gr_acl_is_capable(const int cap);
72593+
72594+void
72595+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
72596+{
72597+ struct acl_role_label *role = task->role;
72598+ struct acl_subject_label *subj = NULL;
72599+ struct acl_object_label *obj;
72600+ struct file *filp;
72601+ uid_t uid;
72602+ gid_t gid;
72603+
72604+ if (unlikely(!(gr_status & GR_READY)))
72605+ return;
72606+
72607+ uid = GR_GLOBAL_UID(kuid);
72608+ gid = GR_GLOBAL_GID(kgid);
72609+
72610+ filp = task->exec_file;
72611+
72612+ /* kernel process, we'll give them the kernel role */
72613+ if (unlikely(!filp)) {
72614+ task->role = running_polstate.kernel_role;
72615+ task->acl = running_polstate.kernel_role->root_label;
72616+ return;
72617+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
72618+ /* save the current ip at time of role lookup so that the proper
72619+ IP will be learned for role_allowed_ip */
72620+ task->signal->saved_ip = task->signal->curr_ip;
72621+ role = lookup_acl_role_label(task, uid, gid);
72622+ }
72623+
72624+ /* don't change the role if we're not a privileged process */
72625+ if (role && task->role != role &&
72626+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
72627+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
72628+ return;
72629+
72630+ /* perform subject lookup in possibly new role
72631+ we can use this result below in the case where role == task->role
72632+ */
72633+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
72634+
72635+ /* if we changed uid/gid, but result in the same role
72636+ and are using inheritance, don't lose the inherited subject
72637+ if current subject is other than what normal lookup
72638+ would result in, we arrived via inheritance, don't
72639+ lose subject
72640+ */
72641+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
72642+ (subj == task->acl)))
72643+ task->acl = subj;
72644+
72645+ /* leave task->inherited unaffected */
72646+
72647+ task->role = role;
72648+
72649+ task->is_writable = 0;
72650+
72651+ /* ignore additional mmap checks for processes that are writable
72652+ by the default ACL */
72653+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
72654+ if (unlikely(obj->mode & GR_WRITE))
72655+ task->is_writable = 1;
72656+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
72657+ if (unlikely(obj->mode & GR_WRITE))
72658+ task->is_writable = 1;
72659+
72660+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
72661+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
72662+#endif
72663+
72664+ gr_set_proc_res(task);
72665+
72666+ return;
72667+}
72668+
72669+int
72670+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
72671+ const int unsafe_flags)
72672+{
72673+ struct task_struct *task = current;
72674+ struct acl_subject_label *newacl;
72675+ struct acl_object_label *obj;
72676+ __u32 retmode;
72677+
72678+ if (unlikely(!(gr_status & GR_READY)))
72679+ return 0;
72680+
72681+ newacl = chk_subj_label(dentry, mnt, task->role);
72682+
72683+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
72684+ did an exec
72685+ */
72686+ rcu_read_lock();
72687+ read_lock(&tasklist_lock);
72688+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
72689+ (task->parent->acl->mode & GR_POVERRIDE))) {
72690+ read_unlock(&tasklist_lock);
72691+ rcu_read_unlock();
72692+ goto skip_check;
72693+ }
72694+ read_unlock(&tasklist_lock);
72695+ rcu_read_unlock();
72696+
72697+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
72698+ !(task->role->roletype & GR_ROLE_GOD) &&
72699+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
72700+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
72701+ if (unsafe_flags & LSM_UNSAFE_SHARE)
72702+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
72703+ else
72704+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
72705+ return -EACCES;
72706+ }
72707+
72708+skip_check:
72709+
72710+ obj = chk_obj_label(dentry, mnt, task->acl);
72711+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
72712+
72713+ if (!(task->acl->mode & GR_INHERITLEARN) &&
72714+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
72715+ if (obj->nested)
72716+ task->acl = obj->nested;
72717+ else
72718+ task->acl = newacl;
72719+ task->inherited = 0;
72720+ } else {
72721+ task->inherited = 1;
72722+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
72723+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
72724+ }
72725+
72726+ task->is_writable = 0;
72727+
72728+ /* ignore additional mmap checks for processes that are writable
72729+ by the default ACL */
72730+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
72731+ if (unlikely(obj->mode & GR_WRITE))
72732+ task->is_writable = 1;
72733+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
72734+ if (unlikely(obj->mode & GR_WRITE))
72735+ task->is_writable = 1;
72736+
72737+ gr_set_proc_res(task);
72738+
72739+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
72740+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
72741+#endif
72742+ return 0;
72743+}
72744+
72745+/* always called with valid inodev ptr */
72746+static void
72747+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
72748+{
72749+ struct acl_object_label *matchpo;
72750+ struct acl_subject_label *matchps;
72751+ struct acl_subject_label *subj;
72752+ struct acl_role_label *role;
72753+ unsigned int x;
72754+
72755+ FOR_EACH_ROLE_START(role)
72756+ FOR_EACH_SUBJECT_START(role, subj, x)
72757+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
72758+ matchpo->mode |= GR_DELETED;
72759+ FOR_EACH_SUBJECT_END(subj,x)
72760+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
72761+ /* nested subjects aren't in the role's subj_hash table */
72762+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
72763+ matchpo->mode |= GR_DELETED;
72764+ FOR_EACH_NESTED_SUBJECT_END(subj)
72765+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
72766+ matchps->mode |= GR_DELETED;
72767+ FOR_EACH_ROLE_END(role)
72768+
72769+ inodev->nentry->deleted = 1;
72770+
72771+ return;
72772+}
72773+
72774+void
72775+gr_handle_delete(const ino_t ino, const dev_t dev)
72776+{
72777+ struct inodev_entry *inodev;
72778+
72779+ if (unlikely(!(gr_status & GR_READY)))
72780+ return;
72781+
72782+ write_lock(&gr_inode_lock);
72783+ inodev = lookup_inodev_entry(ino, dev);
72784+ if (inodev != NULL)
72785+ do_handle_delete(inodev, ino, dev);
72786+ write_unlock(&gr_inode_lock);
72787+
72788+ return;
72789+}
72790+
72791+static void
72792+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
72793+ const ino_t newinode, const dev_t newdevice,
72794+ struct acl_subject_label *subj)
72795+{
72796+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
72797+ struct acl_object_label *match;
72798+
72799+ match = subj->obj_hash[index];
72800+
72801+ while (match && (match->inode != oldinode ||
72802+ match->device != olddevice ||
72803+ !(match->mode & GR_DELETED)))
72804+ match = match->next;
72805+
72806+ if (match && (match->inode == oldinode)
72807+ && (match->device == olddevice)
72808+ && (match->mode & GR_DELETED)) {
72809+ if (match->prev == NULL) {
72810+ subj->obj_hash[index] = match->next;
72811+ if (match->next != NULL)
72812+ match->next->prev = NULL;
72813+ } else {
72814+ match->prev->next = match->next;
72815+ if (match->next != NULL)
72816+ match->next->prev = match->prev;
72817+ }
72818+ match->prev = NULL;
72819+ match->next = NULL;
72820+ match->inode = newinode;
72821+ match->device = newdevice;
72822+ match->mode &= ~GR_DELETED;
72823+
72824+ insert_acl_obj_label(match, subj);
72825+ }
72826+
72827+ return;
72828+}
72829+
72830+static void
72831+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
72832+ const ino_t newinode, const dev_t newdevice,
72833+ struct acl_role_label *role)
72834+{
72835+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
72836+ struct acl_subject_label *match;
72837+
72838+ match = role->subj_hash[index];
72839+
72840+ while (match && (match->inode != oldinode ||
72841+ match->device != olddevice ||
72842+ !(match->mode & GR_DELETED)))
72843+ match = match->next;
72844+
72845+ if (match && (match->inode == oldinode)
72846+ && (match->device == olddevice)
72847+ && (match->mode & GR_DELETED)) {
72848+ if (match->prev == NULL) {
72849+ role->subj_hash[index] = match->next;
72850+ if (match->next != NULL)
72851+ match->next->prev = NULL;
72852+ } else {
72853+ match->prev->next = match->next;
72854+ if (match->next != NULL)
72855+ match->next->prev = match->prev;
72856+ }
72857+ match->prev = NULL;
72858+ match->next = NULL;
72859+ match->inode = newinode;
72860+ match->device = newdevice;
72861+ match->mode &= ~GR_DELETED;
72862+
72863+ insert_acl_subj_label(match, role);
72864+ }
72865+
72866+ return;
72867+}
72868+
72869+static void
72870+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
72871+ const ino_t newinode, const dev_t newdevice)
72872+{
72873+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
72874+ struct inodev_entry *match;
72875+
72876+ match = running_polstate.inodev_set.i_hash[index];
72877+
72878+ while (match && (match->nentry->inode != oldinode ||
72879+ match->nentry->device != olddevice || !match->nentry->deleted))
72880+ match = match->next;
72881+
72882+ if (match && (match->nentry->inode == oldinode)
72883+ && (match->nentry->device == olddevice) &&
72884+ match->nentry->deleted) {
72885+ if (match->prev == NULL) {
72886+ running_polstate.inodev_set.i_hash[index] = match->next;
72887+ if (match->next != NULL)
72888+ match->next->prev = NULL;
72889+ } else {
72890+ match->prev->next = match->next;
72891+ if (match->next != NULL)
72892+ match->next->prev = match->prev;
72893+ }
72894+ match->prev = NULL;
72895+ match->next = NULL;
72896+ match->nentry->inode = newinode;
72897+ match->nentry->device = newdevice;
72898+ match->nentry->deleted = 0;
72899+
72900+ insert_inodev_entry(match);
72901+ }
72902+
72903+ return;
72904+}
72905+
72906+static void
72907+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
72908+{
72909+ struct acl_subject_label *subj;
72910+ struct acl_role_label *role;
72911+ unsigned int x;
72912+
72913+ FOR_EACH_ROLE_START(role)
72914+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
72915+
72916+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
72917+ if ((subj->inode == ino) && (subj->device == dev)) {
72918+ subj->inode = ino;
72919+ subj->device = dev;
72920+ }
72921+ /* nested subjects aren't in the role's subj_hash table */
72922+ update_acl_obj_label(matchn->inode, matchn->device,
72923+ ino, dev, subj);
72924+ FOR_EACH_NESTED_SUBJECT_END(subj)
72925+ FOR_EACH_SUBJECT_START(role, subj, x)
72926+ update_acl_obj_label(matchn->inode, matchn->device,
72927+ ino, dev, subj);
72928+ FOR_EACH_SUBJECT_END(subj,x)
72929+ FOR_EACH_ROLE_END(role)
72930+
72931+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
72932+
72933+ return;
72934+}
72935+
72936+static void
72937+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
72938+ const struct vfsmount *mnt)
72939+{
72940+ ino_t ino = dentry->d_inode->i_ino;
72941+ dev_t dev = __get_dev(dentry);
72942+
72943+ __do_handle_create(matchn, ino, dev);
72944+
72945+ return;
72946+}
72947+
72948+void
72949+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
72950+{
72951+ struct name_entry *matchn;
72952+
72953+ if (unlikely(!(gr_status & GR_READY)))
72954+ return;
72955+
72956+ preempt_disable();
72957+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
72958+
72959+ if (unlikely((unsigned long)matchn)) {
72960+ write_lock(&gr_inode_lock);
72961+ do_handle_create(matchn, dentry, mnt);
72962+ write_unlock(&gr_inode_lock);
72963+ }
72964+ preempt_enable();
72965+
72966+ return;
72967+}
72968+
72969+void
72970+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
72971+{
72972+ struct name_entry *matchn;
72973+
72974+ if (unlikely(!(gr_status & GR_READY)))
72975+ return;
72976+
72977+ preempt_disable();
72978+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
72979+
72980+ if (unlikely((unsigned long)matchn)) {
72981+ write_lock(&gr_inode_lock);
72982+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
72983+ write_unlock(&gr_inode_lock);
72984+ }
72985+ preempt_enable();
72986+
72987+ return;
72988+}
72989+
72990+void
72991+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
72992+ struct dentry *old_dentry,
72993+ struct dentry *new_dentry,
72994+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
72995+{
72996+ struct name_entry *matchn;
72997+ struct name_entry *matchn2 = NULL;
72998+ struct inodev_entry *inodev;
72999+ struct inode *inode = new_dentry->d_inode;
73000+ ino_t old_ino = old_dentry->d_inode->i_ino;
73001+ dev_t old_dev = __get_dev(old_dentry);
73002+ unsigned int exchange = flags & RENAME_EXCHANGE;
73003+
73004+ /* vfs_rename swaps the name and parent link for old_dentry and
73005+ new_dentry
73006+ at this point, old_dentry has the new name, parent link, and inode
73007+ for the renamed file
73008+ if a file is being replaced by a rename, new_dentry has the inode
73009+ and name for the replaced file
73010+ */
73011+
73012+ if (unlikely(!(gr_status & GR_READY)))
73013+ return;
73014+
73015+ preempt_disable();
73016+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
73017+
73018+ /* exchange cases:
73019+ a filename exists for the source, but not dest
73020+ do a recreate on source
73021+ a filename exists for the dest, but not source
73022+ do a recreate on dest
73023+ a filename exists for both source and dest
73024+ delete source and dest, then create source and dest
73025+ a filename exists for neither source nor dest
73026+ no updates needed
73027+
73028+ the name entry lookups get us the old inode/dev associated with
73029+ each name, so do the deletes first (if possible) so that when
73030+ we do the create, we pick up on the right entries
73031+ */
73032+
73033+ if (exchange)
73034+ matchn2 = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt));
73035+
73036+ /* we wouldn't have to check d_inode if it weren't for
73037+ NFS silly-renaming
73038+ */
73039+
73040+ write_lock(&gr_inode_lock);
73041+ if (unlikely((replace || exchange) && inode)) {
73042+ ino_t new_ino = inode->i_ino;
73043+ dev_t new_dev = __get_dev(new_dentry);
73044+
73045+ inodev = lookup_inodev_entry(new_ino, new_dev);
73046+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
73047+ do_handle_delete(inodev, new_ino, new_dev);
73048+ }
73049+
73050+ inodev = lookup_inodev_entry(old_ino, old_dev);
73051+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
73052+ do_handle_delete(inodev, old_ino, old_dev);
73053+
73054+ if (unlikely(matchn != NULL))
73055+ do_handle_create(matchn, old_dentry, mnt);
73056+
73057+ if (unlikely(matchn2 != NULL))
73058+ do_handle_create(matchn2, new_dentry, mnt);
73059+
73060+ write_unlock(&gr_inode_lock);
73061+ preempt_enable();
73062+
73063+ return;
73064+}
73065+
73066+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
73067+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
73068+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
73069+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
73070+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
73071+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
73072+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
73073+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
73074+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
73075+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
73076+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
73077+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
73078+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
73079+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
73080+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
73081+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
73082+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
73083+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
73084+};
73085+
73086+void
73087+gr_learn_resource(const struct task_struct *task,
73088+ const int res, const unsigned long wanted, const int gt)
73089+{
73090+ struct acl_subject_label *acl;
73091+ const struct cred *cred;
73092+
73093+ if (unlikely((gr_status & GR_READY) &&
73094+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
73095+ goto skip_reslog;
73096+
73097+ gr_log_resource(task, res, wanted, gt);
73098+skip_reslog:
73099+
73100+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
73101+ return;
73102+
73103+ acl = task->acl;
73104+
73105+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
73106+ !(acl->resmask & (1U << (unsigned short) res))))
73107+ return;
73108+
73109+ if (wanted >= acl->res[res].rlim_cur) {
73110+ unsigned long res_add;
73111+
73112+ res_add = wanted + res_learn_bumps[res];
73113+
73114+ acl->res[res].rlim_cur = res_add;
73115+
73116+ if (wanted > acl->res[res].rlim_max)
73117+ acl->res[res].rlim_max = res_add;
73118+
73119+ /* only log the subject filename, since resource logging is supported for
73120+ single-subject learning only */
73121+ rcu_read_lock();
73122+ cred = __task_cred(task);
73123+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
73124+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
73125+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
73126+ "", (unsigned long) res, &task->signal->saved_ip);
73127+ rcu_read_unlock();
73128+ }
73129+
73130+ return;
73131+}
73132+EXPORT_SYMBOL_GPL(gr_learn_resource);
73133+#endif
73134+
73135+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
73136+void
73137+pax_set_initial_flags(struct linux_binprm *bprm)
73138+{
73139+ struct task_struct *task = current;
73140+ struct acl_subject_label *proc;
73141+ unsigned long flags;
73142+
73143+ if (unlikely(!(gr_status & GR_READY)))
73144+ return;
73145+
73146+ flags = pax_get_flags(task);
73147+
73148+ proc = task->acl;
73149+
73150+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
73151+ flags &= ~MF_PAX_PAGEEXEC;
73152+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
73153+ flags &= ~MF_PAX_SEGMEXEC;
73154+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
73155+ flags &= ~MF_PAX_RANDMMAP;
73156+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
73157+ flags &= ~MF_PAX_EMUTRAMP;
73158+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
73159+ flags &= ~MF_PAX_MPROTECT;
73160+
73161+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
73162+ flags |= MF_PAX_PAGEEXEC;
73163+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
73164+ flags |= MF_PAX_SEGMEXEC;
73165+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
73166+ flags |= MF_PAX_RANDMMAP;
73167+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
73168+ flags |= MF_PAX_EMUTRAMP;
73169+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
73170+ flags |= MF_PAX_MPROTECT;
73171+
73172+ pax_set_flags(task, flags);
73173+
73174+ return;
73175+}
73176+#endif
73177+
73178+int
73179+gr_handle_proc_ptrace(struct task_struct *task)
73180+{
73181+ struct file *filp;
73182+ struct task_struct *tmp = task;
73183+ struct task_struct *curtemp = current;
73184+ __u32 retmode;
73185+
73186+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
73187+ if (unlikely(!(gr_status & GR_READY)))
73188+ return 0;
73189+#endif
73190+
73191+ read_lock(&tasklist_lock);
73192+ read_lock(&grsec_exec_file_lock);
73193+ filp = task->exec_file;
73194+
73195+ while (task_pid_nr(tmp) > 0) {
73196+ if (tmp == curtemp)
73197+ break;
73198+ tmp = tmp->real_parent;
73199+ }
73200+
73201+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
73202+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
73203+ read_unlock(&grsec_exec_file_lock);
73204+ read_unlock(&tasklist_lock);
73205+ return 1;
73206+ }
73207+
73208+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
73209+ if (!(gr_status & GR_READY)) {
73210+ read_unlock(&grsec_exec_file_lock);
73211+ read_unlock(&tasklist_lock);
73212+ return 0;
73213+ }
73214+#endif
73215+
73216+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
73217+ read_unlock(&grsec_exec_file_lock);
73218+ read_unlock(&tasklist_lock);
73219+
73220+ if (retmode & GR_NOPTRACE)
73221+ return 1;
73222+
73223+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
73224+ && (current->acl != task->acl || (current->acl != current->role->root_label
73225+ && task_pid_nr(current) != task_pid_nr(task))))
73226+ return 1;
73227+
73228+ return 0;
73229+}
73230+
73231+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
73232+{
73233+ if (unlikely(!(gr_status & GR_READY)))
73234+ return;
73235+
73236+ if (!(current->role->roletype & GR_ROLE_GOD))
73237+ return;
73238+
73239+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
73240+ p->role->rolename, gr_task_roletype_to_char(p),
73241+ p->acl->filename);
73242+}
73243+
73244+int
73245+gr_handle_ptrace(struct task_struct *task, const long request)
73246+{
73247+ struct task_struct *tmp = task;
73248+ struct task_struct *curtemp = current;
73249+ __u32 retmode;
73250+
73251+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
73252+ if (unlikely(!(gr_status & GR_READY)))
73253+ return 0;
73254+#endif
73255+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
73256+ read_lock(&tasklist_lock);
73257+ while (task_pid_nr(tmp) > 0) {
73258+ if (tmp == curtemp)
73259+ break;
73260+ tmp = tmp->real_parent;
73261+ }
73262+
73263+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
73264+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
73265+ read_unlock(&tasklist_lock);
73266+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
73267+ return 1;
73268+ }
73269+ read_unlock(&tasklist_lock);
73270+ }
73271+
73272+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
73273+ if (!(gr_status & GR_READY))
73274+ return 0;
73275+#endif
73276+
73277+ read_lock(&grsec_exec_file_lock);
73278+ if (unlikely(!task->exec_file)) {
73279+ read_unlock(&grsec_exec_file_lock);
73280+ return 0;
73281+ }
73282+
73283+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
73284+ read_unlock(&grsec_exec_file_lock);
73285+
73286+ if (retmode & GR_NOPTRACE) {
73287+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
73288+ return 1;
73289+ }
73290+
73291+ if (retmode & GR_PTRACERD) {
73292+ switch (request) {
73293+ case PTRACE_SEIZE:
73294+ case PTRACE_POKETEXT:
73295+ case PTRACE_POKEDATA:
73296+ case PTRACE_POKEUSR:
73297+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
73298+ case PTRACE_SETREGS:
73299+ case PTRACE_SETFPREGS:
73300+#endif
73301+#ifdef CONFIG_X86
73302+ case PTRACE_SETFPXREGS:
73303+#endif
73304+#ifdef CONFIG_ALTIVEC
73305+ case PTRACE_SETVRREGS:
73306+#endif
73307+ return 1;
73308+ default:
73309+ return 0;
73310+ }
73311+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
73312+ !(current->role->roletype & GR_ROLE_GOD) &&
73313+ (current->acl != task->acl)) {
73314+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
73315+ return 1;
73316+ }
73317+
73318+ return 0;
73319+}
73320+
73321+static int is_writable_mmap(const struct file *filp)
73322+{
73323+ struct task_struct *task = current;
73324+ struct acl_object_label *obj, *obj2;
73325+
73326+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
73327+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
73328+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
73329+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
73330+ task->role->root_label);
73331+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
73332+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
73333+ return 1;
73334+ }
73335+ }
73336+ return 0;
73337+}
73338+
73339+int
73340+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
73341+{
73342+ __u32 mode;
73343+
73344+ if (unlikely(!file || !(prot & PROT_EXEC)))
73345+ return 1;
73346+
73347+ if (is_writable_mmap(file))
73348+ return 0;
73349+
73350+ mode =
73351+ gr_search_file(file->f_path.dentry,
73352+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
73353+ file->f_path.mnt);
73354+
73355+ if (!gr_tpe_allow(file))
73356+ return 0;
73357+
73358+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
73359+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
73360+ return 0;
73361+ } else if (unlikely(!(mode & GR_EXEC))) {
73362+ return 0;
73363+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
73364+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
73365+ return 1;
73366+ }
73367+
73368+ return 1;
73369+}
73370+
73371+int
73372+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
73373+{
73374+ __u32 mode;
73375+
73376+ if (unlikely(!file || !(prot & PROT_EXEC)))
73377+ return 1;
73378+
73379+ if (is_writable_mmap(file))
73380+ return 0;
73381+
73382+ mode =
73383+ gr_search_file(file->f_path.dentry,
73384+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
73385+ file->f_path.mnt);
73386+
73387+ if (!gr_tpe_allow(file))
73388+ return 0;
73389+
73390+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
73391+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
73392+ return 0;
73393+ } else if (unlikely(!(mode & GR_EXEC))) {
73394+ return 0;
73395+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
73396+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
73397+ return 1;
73398+ }
73399+
73400+ return 1;
73401+}
73402+
73403+void
73404+gr_acl_handle_psacct(struct task_struct *task, const long code)
73405+{
73406+ unsigned long runtime, cputime;
73407+ cputime_t utime, stime;
73408+ unsigned int wday, cday;
73409+ __u8 whr, chr;
73410+ __u8 wmin, cmin;
73411+ __u8 wsec, csec;
73412+ struct timespec timeval;
73413+
73414+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
73415+ !(task->acl->mode & GR_PROCACCT)))
73416+ return;
73417+
73418+ do_posix_clock_monotonic_gettime(&timeval);
73419+ runtime = timeval.tv_sec - task->start_time.tv_sec;
73420+ wday = runtime / (60 * 60 * 24);
73421+ runtime -= wday * (60 * 60 * 24);
73422+ whr = runtime / (60 * 60);
73423+ runtime -= whr * (60 * 60);
73424+ wmin = runtime / 60;
73425+ runtime -= wmin * 60;
73426+ wsec = runtime;
73427+
73428+ task_cputime(task, &utime, &stime);
73429+ cputime = cputime_to_secs(utime + stime);
73430+ cday = cputime / (60 * 60 * 24);
73431+ cputime -= cday * (60 * 60 * 24);
73432+ chr = cputime / (60 * 60);
73433+ cputime -= chr * (60 * 60);
73434+ cmin = cputime / 60;
73435+ cputime -= cmin * 60;
73436+ csec = cputime;
73437+
73438+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
73439+
73440+ return;
73441+}
73442+
73443+#ifdef CONFIG_TASKSTATS
73444+int gr_is_taskstats_denied(int pid)
73445+{
73446+ struct task_struct *task;
73447+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73448+ const struct cred *cred;
73449+#endif
73450+ int ret = 0;
73451+
73452+ /* restrict taskstats viewing to un-chrooted root users
73453+ who have the 'view' subject flag if the RBAC system is enabled
73454+ */
73455+
73456+ rcu_read_lock();
73457+ read_lock(&tasklist_lock);
73458+ task = find_task_by_vpid(pid);
73459+ if (task) {
73460+#ifdef CONFIG_GRKERNSEC_CHROOT
73461+ if (proc_is_chrooted(task))
73462+ ret = -EACCES;
73463+#endif
73464+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73465+ cred = __task_cred(task);
73466+#ifdef CONFIG_GRKERNSEC_PROC_USER
73467+ if (gr_is_global_nonroot(cred->uid))
73468+ ret = -EACCES;
73469+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73470+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
73471+ ret = -EACCES;
73472+#endif
73473+#endif
73474+ if (gr_status & GR_READY) {
73475+ if (!(task->acl->mode & GR_VIEW))
73476+ ret = -EACCES;
73477+ }
73478+ } else
73479+ ret = -ENOENT;
73480+
73481+ read_unlock(&tasklist_lock);
73482+ rcu_read_unlock();
73483+
73484+ return ret;
73485+}
73486+#endif
73487+
73488+/* AUXV entries are filled via a descendant of search_binary_handler
73489+ after we've already applied the subject for the target
73490+*/
73491+int gr_acl_enable_at_secure(void)
73492+{
73493+ if (unlikely(!(gr_status & GR_READY)))
73494+ return 0;
73495+
73496+ if (current->acl->mode & GR_ATSECURE)
73497+ return 1;
73498+
73499+ return 0;
73500+}
73501+
73502+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
73503+{
73504+ struct task_struct *task = current;
73505+ struct dentry *dentry = file->f_path.dentry;
73506+ struct vfsmount *mnt = file->f_path.mnt;
73507+ struct acl_object_label *obj, *tmp;
73508+ struct acl_subject_label *subj;
73509+ unsigned int bufsize;
73510+ int is_not_root;
73511+ char *path;
73512+ dev_t dev = __get_dev(dentry);
73513+
73514+ if (unlikely(!(gr_status & GR_READY)))
73515+ return 1;
73516+
73517+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
73518+ return 1;
73519+
73520+ /* ignore Eric Biederman */
73521+ if (IS_PRIVATE(dentry->d_inode))
73522+ return 1;
73523+
73524+ subj = task->acl;
73525+ read_lock(&gr_inode_lock);
73526+ do {
73527+ obj = lookup_acl_obj_label(ino, dev, subj);
73528+ if (obj != NULL) {
73529+ read_unlock(&gr_inode_lock);
73530+ return (obj->mode & GR_FIND) ? 1 : 0;
73531+ }
73532+ } while ((subj = subj->parent_subject));
73533+ read_unlock(&gr_inode_lock);
73534+
73535+ /* this is purely an optimization since we're looking for an object
73536+ for the directory we're doing a readdir on
73537+ if it's possible for any globbed object to match the entry we're
73538+ filling into the directory, then the object we find here will be
73539+ an anchor point with attached globbed objects
73540+ */
73541+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
73542+ if (obj->globbed == NULL)
73543+ return (obj->mode & GR_FIND) ? 1 : 0;
73544+
73545+ is_not_root = ((obj->filename[0] == '/') &&
73546+ (obj->filename[1] == '\0')) ? 0 : 1;
73547+ bufsize = PAGE_SIZE - namelen - is_not_root;
73548+
73549+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
73550+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
73551+ return 1;
73552+
73553+ preempt_disable();
73554+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
73555+ bufsize);
73556+
73557+ bufsize = strlen(path);
73558+
73559+ /* if base is "/", don't append an additional slash */
73560+ if (is_not_root)
73561+ *(path + bufsize) = '/';
73562+ memcpy(path + bufsize + is_not_root, name, namelen);
73563+ *(path + bufsize + namelen + is_not_root) = '\0';
73564+
73565+ tmp = obj->globbed;
73566+ while (tmp) {
73567+ if (!glob_match(tmp->filename, path)) {
73568+ preempt_enable();
73569+ return (tmp->mode & GR_FIND) ? 1 : 0;
73570+ }
73571+ tmp = tmp->next;
73572+ }
73573+ preempt_enable();
73574+ return (obj->mode & GR_FIND) ? 1 : 0;
73575+}
73576+
73577+void gr_put_exec_file(struct task_struct *task)
73578+{
73579+ struct file *filp;
73580+
73581+ write_lock(&grsec_exec_file_lock);
73582+ filp = task->exec_file;
73583+ task->exec_file = NULL;
73584+ write_unlock(&grsec_exec_file_lock);
73585+
73586+ if (filp)
73587+ fput(filp);
73588+
73589+ return;
73590+}
73591+
73592+
73593+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
73594+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
73595+#endif
73596+#ifdef CONFIG_SECURITY
73597+EXPORT_SYMBOL_GPL(gr_check_user_change);
73598+EXPORT_SYMBOL_GPL(gr_check_group_change);
73599+#endif
73600+
73601diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
73602new file mode 100644
73603index 0000000..18ffbbd
73604--- /dev/null
73605+++ b/grsecurity/gracl_alloc.c
73606@@ -0,0 +1,105 @@
73607+#include <linux/kernel.h>
73608+#include <linux/mm.h>
73609+#include <linux/slab.h>
73610+#include <linux/vmalloc.h>
73611+#include <linux/gracl.h>
73612+#include <linux/grsecurity.h>
73613+
73614+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
73615+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
73616+
73617+static __inline__ int
73618+alloc_pop(void)
73619+{
73620+ if (current_alloc_state->alloc_stack_next == 1)
73621+ return 0;
73622+
73623+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
73624+
73625+ current_alloc_state->alloc_stack_next--;
73626+
73627+ return 1;
73628+}
73629+
73630+static __inline__ int
73631+alloc_push(void *buf)
73632+{
73633+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
73634+ return 1;
73635+
73636+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
73637+
73638+ current_alloc_state->alloc_stack_next++;
73639+
73640+ return 0;
73641+}
73642+
73643+void *
73644+acl_alloc(unsigned long len)
73645+{
73646+ void *ret = NULL;
73647+
73648+ if (!len || len > PAGE_SIZE)
73649+ goto out;
73650+
73651+ ret = kmalloc(len, GFP_KERNEL);
73652+
73653+ if (ret) {
73654+ if (alloc_push(ret)) {
73655+ kfree(ret);
73656+ ret = NULL;
73657+ }
73658+ }
73659+
73660+out:
73661+ return ret;
73662+}
73663+
73664+void *
73665+acl_alloc_num(unsigned long num, unsigned long len)
73666+{
73667+ if (!len || (num > (PAGE_SIZE / len)))
73668+ return NULL;
73669+
73670+ return acl_alloc(num * len);
73671+}
73672+
73673+void
73674+acl_free_all(void)
73675+{
73676+ if (!current_alloc_state->alloc_stack)
73677+ return;
73678+
73679+ while (alloc_pop()) ;
73680+
73681+ if (current_alloc_state->alloc_stack) {
73682+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
73683+ kfree(current_alloc_state->alloc_stack);
73684+ else
73685+ vfree(current_alloc_state->alloc_stack);
73686+ }
73687+
73688+ current_alloc_state->alloc_stack = NULL;
73689+ current_alloc_state->alloc_stack_size = 1;
73690+ current_alloc_state->alloc_stack_next = 1;
73691+
73692+ return;
73693+}
73694+
73695+int
73696+acl_alloc_stack_init(unsigned long size)
73697+{
73698+ if ((size * sizeof (void *)) <= PAGE_SIZE)
73699+ current_alloc_state->alloc_stack =
73700+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
73701+ else
73702+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
73703+
73704+ current_alloc_state->alloc_stack_size = size;
73705+ current_alloc_state->alloc_stack_next = 1;
73706+
73707+ if (!current_alloc_state->alloc_stack)
73708+ return 0;
73709+ else
73710+ return 1;
73711+}
73712diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
73713new file mode 100644
73714index 0000000..1a94c11
73715--- /dev/null
73716+++ b/grsecurity/gracl_cap.c
73717@@ -0,0 +1,127 @@
73718+#include <linux/kernel.h>
73719+#include <linux/module.h>
73720+#include <linux/sched.h>
73721+#include <linux/gracl.h>
73722+#include <linux/grsecurity.h>
73723+#include <linux/grinternal.h>
73724+
73725+extern const char *captab_log[];
73726+extern int captab_log_entries;
73727+
73728+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
73729+{
73730+ struct acl_subject_label *curracl;
73731+
73732+ if (!gr_acl_is_enabled())
73733+ return 1;
73734+
73735+ curracl = task->acl;
73736+
73737+ if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
73738+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
73739+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
73740+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
73741+ gr_to_filename(task->exec_file->f_path.dentry,
73742+ task->exec_file->f_path.mnt) : curracl->filename,
73743+ curracl->filename, 0UL,
73744+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
73745+ return 1;
73746+ }
73747+
73748+ return 0;
73749+}
73750+
73751+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
73752+{
73753+ struct acl_subject_label *curracl;
73754+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
73755+ kernel_cap_t cap_audit = __cap_empty_set;
73756+
73757+ if (!gr_acl_is_enabled())
73758+ return 1;
73759+
73760+ curracl = task->acl;
73761+
73762+ cap_drop = curracl->cap_lower;
73763+ cap_mask = curracl->cap_mask;
73764+ cap_audit = curracl->cap_invert_audit;
73765+
73766+ while ((curracl = curracl->parent_subject)) {
73767+ /* if the cap isn't specified in the current computed mask but is specified in the
73768+ current level subject, and is lowered in the current level subject, then add
73769+ it to the set of dropped capabilities
73770+ otherwise, add the current level subject's mask to the current computed mask
73771+ */
73772+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
73773+ cap_raise(cap_mask, cap);
73774+ if (cap_raised(curracl->cap_lower, cap))
73775+ cap_raise(cap_drop, cap);
73776+ if (cap_raised(curracl->cap_invert_audit, cap))
73777+ cap_raise(cap_audit, cap);
73778+ }
73779+ }
73780+
73781+ if (!cap_raised(cap_drop, cap)) {
73782+ if (cap_raised(cap_audit, cap))
73783+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
73784+ return 1;
73785+ }
73786+
73787+ /* only learn the capability use if the process has the capability in the
73788+ general case, the two uses in sys.c of gr_learn_cap are an exception
73789+ to this rule to ensure any role transition involves what the full-learned
73790+ policy believes in a privileged process
73791+ */
73792+ if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap))
73793+ return 1;
73794+
73795+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
73796+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
73797+
73798+ return 0;
73799+}
73800+
73801+int
73802+gr_acl_is_capable(const int cap)
73803+{
73804+ return gr_task_acl_is_capable(current, current_cred(), cap);
73805+}
73806+
73807+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
73808+{
73809+ struct acl_subject_label *curracl;
73810+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
73811+
73812+ if (!gr_acl_is_enabled())
73813+ return 1;
73814+
73815+ curracl = task->acl;
73816+
73817+ cap_drop = curracl->cap_lower;
73818+ cap_mask = curracl->cap_mask;
73819+
73820+ while ((curracl = curracl->parent_subject)) {
73821+ /* if the cap isn't specified in the current computed mask but is specified in the
73822+ current level subject, and is lowered in the current level subject, then add
73823+ it to the set of dropped capabilities
73824+ otherwise, add the current level subject's mask to the current computed mask
73825+ */
73826+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
73827+ cap_raise(cap_mask, cap);
73828+ if (cap_raised(curracl->cap_lower, cap))
73829+ cap_raise(cap_drop, cap);
73830+ }
73831+ }
73832+
73833+ if (!cap_raised(cap_drop, cap))
73834+ return 1;
73835+
73836+ return 0;
73837+}
73838+
73839+int
73840+gr_acl_is_capable_nolog(const int cap)
73841+{
73842+ return gr_task_acl_is_capable_nolog(current, cap);
73843+}
73844+
73845diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
73846new file mode 100644
73847index 0000000..ca25605
73848--- /dev/null
73849+++ b/grsecurity/gracl_compat.c
73850@@ -0,0 +1,270 @@
73851+#include <linux/kernel.h>
73852+#include <linux/gracl.h>
73853+#include <linux/compat.h>
73854+#include <linux/gracl_compat.h>
73855+
73856+#include <asm/uaccess.h>
73857+
73858+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
73859+{
73860+ struct gr_arg_wrapper_compat uwrapcompat;
73861+
73862+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
73863+ return -EFAULT;
73864+
73865+ if (((uwrapcompat.version != GRSECURITY_VERSION) &&
73866+ (uwrapcompat.version != 0x2901)) ||
73867+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
73868+ return -EINVAL;
73869+
73870+ uwrap->arg = compat_ptr(uwrapcompat.arg);
73871+ uwrap->version = uwrapcompat.version;
73872+ uwrap->size = sizeof(struct gr_arg);
73873+
73874+ return 0;
73875+}
73876+
73877+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
73878+{
73879+ struct gr_arg_compat argcompat;
73880+
73881+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
73882+ return -EFAULT;
73883+
73884+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
73885+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
73886+ arg->role_db.num_roles = argcompat.role_db.num_roles;
73887+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
73888+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
73889+ arg->role_db.num_objects = argcompat.role_db.num_objects;
73890+
73891+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
73892+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
73893+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
73894+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
73895+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
73896+ arg->segv_device = argcompat.segv_device;
73897+ arg->segv_inode = argcompat.segv_inode;
73898+ arg->segv_uid = argcompat.segv_uid;
73899+ arg->num_sprole_pws = argcompat.num_sprole_pws;
73900+ arg->mode = argcompat.mode;
73901+
73902+ return 0;
73903+}
73904+
73905+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
73906+{
73907+ struct acl_object_label_compat objcompat;
73908+
73909+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
73910+ return -EFAULT;
73911+
73912+ obj->filename = compat_ptr(objcompat.filename);
73913+ obj->inode = objcompat.inode;
73914+ obj->device = objcompat.device;
73915+ obj->mode = objcompat.mode;
73916+
73917+ obj->nested = compat_ptr(objcompat.nested);
73918+ obj->globbed = compat_ptr(objcompat.globbed);
73919+
73920+ obj->prev = compat_ptr(objcompat.prev);
73921+ obj->next = compat_ptr(objcompat.next);
73922+
73923+ return 0;
73924+}
73925+
73926+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
73927+{
73928+ unsigned int i;
73929+ struct acl_subject_label_compat subjcompat;
73930+
73931+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
73932+ return -EFAULT;
73933+
73934+ subj->filename = compat_ptr(subjcompat.filename);
73935+ subj->inode = subjcompat.inode;
73936+ subj->device = subjcompat.device;
73937+ subj->mode = subjcompat.mode;
73938+ subj->cap_mask = subjcompat.cap_mask;
73939+ subj->cap_lower = subjcompat.cap_lower;
73940+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
73941+
73942+ for (i = 0; i < GR_NLIMITS; i++) {
73943+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
73944+ subj->res[i].rlim_cur = RLIM_INFINITY;
73945+ else
73946+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
73947+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
73948+ subj->res[i].rlim_max = RLIM_INFINITY;
73949+ else
73950+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
73951+ }
73952+ subj->resmask = subjcompat.resmask;
73953+
73954+ subj->user_trans_type = subjcompat.user_trans_type;
73955+ subj->group_trans_type = subjcompat.group_trans_type;
73956+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
73957+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
73958+ subj->user_trans_num = subjcompat.user_trans_num;
73959+ subj->group_trans_num = subjcompat.group_trans_num;
73960+
73961+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
73962+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
73963+ subj->ip_type = subjcompat.ip_type;
73964+ subj->ips = compat_ptr(subjcompat.ips);
73965+ subj->ip_num = subjcompat.ip_num;
73966+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
73967+
73968+ subj->crashes = subjcompat.crashes;
73969+ subj->expires = subjcompat.expires;
73970+
73971+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
73972+ subj->hash = compat_ptr(subjcompat.hash);
73973+ subj->prev = compat_ptr(subjcompat.prev);
73974+ subj->next = compat_ptr(subjcompat.next);
73975+
73976+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
73977+ subj->obj_hash_size = subjcompat.obj_hash_size;
73978+ subj->pax_flags = subjcompat.pax_flags;
73979+
73980+ return 0;
73981+}
73982+
73983+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
73984+{
73985+ struct acl_role_label_compat rolecompat;
73986+
73987+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
73988+ return -EFAULT;
73989+
73990+ role->rolename = compat_ptr(rolecompat.rolename);
73991+ role->uidgid = rolecompat.uidgid;
73992+ role->roletype = rolecompat.roletype;
73993+
73994+ role->auth_attempts = rolecompat.auth_attempts;
73995+ role->expires = rolecompat.expires;
73996+
73997+ role->root_label = compat_ptr(rolecompat.root_label);
73998+ role->hash = compat_ptr(rolecompat.hash);
73999+
74000+ role->prev = compat_ptr(rolecompat.prev);
74001+ role->next = compat_ptr(rolecompat.next);
74002+
74003+ role->transitions = compat_ptr(rolecompat.transitions);
74004+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
74005+ role->domain_children = compat_ptr(rolecompat.domain_children);
74006+ role->domain_child_num = rolecompat.domain_child_num;
74007+
74008+ role->umask = rolecompat.umask;
74009+
74010+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
74011+ role->subj_hash_size = rolecompat.subj_hash_size;
74012+
74013+ return 0;
74014+}
74015+
74016+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
74017+{
74018+ struct role_allowed_ip_compat roleip_compat;
74019+
74020+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
74021+ return -EFAULT;
74022+
74023+ roleip->addr = roleip_compat.addr;
74024+ roleip->netmask = roleip_compat.netmask;
74025+
74026+ roleip->prev = compat_ptr(roleip_compat.prev);
74027+ roleip->next = compat_ptr(roleip_compat.next);
74028+
74029+ return 0;
74030+}
74031+
74032+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
74033+{
74034+ struct role_transition_compat trans_compat;
74035+
74036+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
74037+ return -EFAULT;
74038+
74039+ trans->rolename = compat_ptr(trans_compat.rolename);
74040+
74041+ trans->prev = compat_ptr(trans_compat.prev);
74042+ trans->next = compat_ptr(trans_compat.next);
74043+
74044+ return 0;
74045+
74046+}
74047+
74048+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
74049+{
74050+ struct gr_hash_struct_compat hash_compat;
74051+
74052+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
74053+ return -EFAULT;
74054+
74055+ hash->table = compat_ptr(hash_compat.table);
74056+ hash->nametable = compat_ptr(hash_compat.nametable);
74057+ hash->first = compat_ptr(hash_compat.first);
74058+
74059+ hash->table_size = hash_compat.table_size;
74060+ hash->used_size = hash_compat.used_size;
74061+
74062+ hash->type = hash_compat.type;
74063+
74064+ return 0;
74065+}
74066+
74067+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
74068+{
74069+ compat_uptr_t ptrcompat;
74070+
74071+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
74072+ return -EFAULT;
74073+
74074+ *(void **)ptr = compat_ptr(ptrcompat);
74075+
74076+ return 0;
74077+}
74078+
74079+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
74080+{
74081+ struct acl_ip_label_compat ip_compat;
74082+
74083+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
74084+ return -EFAULT;
74085+
74086+ ip->iface = compat_ptr(ip_compat.iface);
74087+ ip->addr = ip_compat.addr;
74088+ ip->netmask = ip_compat.netmask;
74089+ ip->low = ip_compat.low;
74090+ ip->high = ip_compat.high;
74091+ ip->mode = ip_compat.mode;
74092+ ip->type = ip_compat.type;
74093+
74094+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
74095+
74096+ ip->prev = compat_ptr(ip_compat.prev);
74097+ ip->next = compat_ptr(ip_compat.next);
74098+
74099+ return 0;
74100+}
74101+
74102+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
74103+{
74104+ struct sprole_pw_compat pw_compat;
74105+
74106+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
74107+ return -EFAULT;
74108+
74109+ pw->rolename = compat_ptr(pw_compat.rolename);
74110+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
74111+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
74112+
74113+ return 0;
74114+}
74115+
74116+size_t get_gr_arg_wrapper_size_compat(void)
74117+{
74118+ return sizeof(struct gr_arg_wrapper_compat);
74119+}
74120+
74121diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
74122new file mode 100644
74123index 0000000..4008fdc
74124--- /dev/null
74125+++ b/grsecurity/gracl_fs.c
74126@@ -0,0 +1,445 @@
74127+#include <linux/kernel.h>
74128+#include <linux/sched.h>
74129+#include <linux/types.h>
74130+#include <linux/fs.h>
74131+#include <linux/file.h>
74132+#include <linux/stat.h>
74133+#include <linux/grsecurity.h>
74134+#include <linux/grinternal.h>
74135+#include <linux/gracl.h>
74136+
74137+umode_t
74138+gr_acl_umask(void)
74139+{
74140+ if (unlikely(!gr_acl_is_enabled()))
74141+ return 0;
74142+
74143+ return current->role->umask;
74144+}
74145+
74146+__u32
74147+gr_acl_handle_hidden_file(const struct dentry * dentry,
74148+ const struct vfsmount * mnt)
74149+{
74150+ __u32 mode;
74151+
74152+ if (unlikely(d_is_negative(dentry)))
74153+ return GR_FIND;
74154+
74155+ mode =
74156+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
74157+
74158+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
74159+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
74160+ return mode;
74161+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
74162+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
74163+ return 0;
74164+ } else if (unlikely(!(mode & GR_FIND)))
74165+ return 0;
74166+
74167+ return GR_FIND;
74168+}
74169+
74170+__u32
74171+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
74172+ int acc_mode)
74173+{
74174+ __u32 reqmode = GR_FIND;
74175+ __u32 mode;
74176+
74177+ if (unlikely(d_is_negative(dentry)))
74178+ return reqmode;
74179+
74180+ if (acc_mode & MAY_APPEND)
74181+ reqmode |= GR_APPEND;
74182+ else if (acc_mode & MAY_WRITE)
74183+ reqmode |= GR_WRITE;
74184+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
74185+ reqmode |= GR_READ;
74186+
74187+ mode =
74188+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
74189+ mnt);
74190+
74191+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
74192+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
74193+ reqmode & GR_READ ? " reading" : "",
74194+ reqmode & GR_WRITE ? " writing" : reqmode &
74195+ GR_APPEND ? " appending" : "");
74196+ return reqmode;
74197+ } else
74198+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
74199+ {
74200+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
74201+ reqmode & GR_READ ? " reading" : "",
74202+ reqmode & GR_WRITE ? " writing" : reqmode &
74203+ GR_APPEND ? " appending" : "");
74204+ return 0;
74205+ } else if (unlikely((mode & reqmode) != reqmode))
74206+ return 0;
74207+
74208+ return reqmode;
74209+}
74210+
74211+__u32
74212+gr_acl_handle_creat(const struct dentry * dentry,
74213+ const struct dentry * p_dentry,
74214+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
74215+ const int imode)
74216+{
74217+ __u32 reqmode = GR_WRITE | GR_CREATE;
74218+ __u32 mode;
74219+
74220+ if (acc_mode & MAY_APPEND)
74221+ reqmode |= GR_APPEND;
74222+ // if a directory was required or the directory already exists, then
74223+ // don't count this open as a read
74224+ if ((acc_mode & MAY_READ) &&
74225+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
74226+ reqmode |= GR_READ;
74227+ if ((open_flags & O_CREAT) &&
74228+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
74229+ reqmode |= GR_SETID;
74230+
74231+ mode =
74232+ gr_check_create(dentry, p_dentry, p_mnt,
74233+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
74234+
74235+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
74236+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
74237+ reqmode & GR_READ ? " reading" : "",
74238+ reqmode & GR_WRITE ? " writing" : reqmode &
74239+ GR_APPEND ? " appending" : "");
74240+ return reqmode;
74241+ } else
74242+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
74243+ {
74244+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
74245+ reqmode & GR_READ ? " reading" : "",
74246+ reqmode & GR_WRITE ? " writing" : reqmode &
74247+ GR_APPEND ? " appending" : "");
74248+ return 0;
74249+ } else if (unlikely((mode & reqmode) != reqmode))
74250+ return 0;
74251+
74252+ return reqmode;
74253+}
74254+
74255+__u32
74256+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
74257+ const int fmode)
74258+{
74259+ __u32 mode, reqmode = GR_FIND;
74260+
74261+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
74262+ reqmode |= GR_EXEC;
74263+ if (fmode & S_IWOTH)
74264+ reqmode |= GR_WRITE;
74265+ if (fmode & S_IROTH)
74266+ reqmode |= GR_READ;
74267+
74268+ mode =
74269+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
74270+ mnt);
74271+
74272+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
74273+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
74274+ reqmode & GR_READ ? " reading" : "",
74275+ reqmode & GR_WRITE ? " writing" : "",
74276+ reqmode & GR_EXEC ? " executing" : "");
74277+ return reqmode;
74278+ } else
74279+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
74280+ {
74281+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
74282+ reqmode & GR_READ ? " reading" : "",
74283+ reqmode & GR_WRITE ? " writing" : "",
74284+ reqmode & GR_EXEC ? " executing" : "");
74285+ return 0;
74286+ } else if (unlikely((mode & reqmode) != reqmode))
74287+ return 0;
74288+
74289+ return reqmode;
74290+}
74291+
74292+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
74293+{
74294+ __u32 mode;
74295+
74296+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
74297+
74298+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
74299+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
74300+ return mode;
74301+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
74302+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
74303+ return 0;
74304+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
74305+ return 0;
74306+
74307+ return (reqmode);
74308+}
74309+
74310+__u32
74311+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
74312+{
74313+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
74314+}
74315+
74316+__u32
74317+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
74318+{
74319+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
74320+}
74321+
74322+__u32
74323+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
74324+{
74325+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
74326+}
74327+
74328+__u32
74329+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
74330+{
74331+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
74332+}
74333+
74334+__u32
74335+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
74336+ umode_t *modeptr)
74337+{
74338+ umode_t mode;
74339+
74340+ *modeptr &= ~gr_acl_umask();
74341+ mode = *modeptr;
74342+
74343+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
74344+ return 1;
74345+
74346+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
74347+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
74348+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
74349+ GR_CHMOD_ACL_MSG);
74350+ } else {
74351+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
74352+ }
74353+}
74354+
74355+__u32
74356+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
74357+{
74358+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
74359+}
74360+
74361+__u32
74362+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
74363+{
74364+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
74365+}
74366+
74367+__u32
74368+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
74369+{
74370+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
74371+}
74372+
74373+__u32
74374+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
74375+{
74376+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
74377+}
74378+
74379+__u32
74380+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
74381+{
74382+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
74383+ GR_UNIXCONNECT_ACL_MSG);
74384+}
74385+
74386+/* hardlinks require at minimum create and link permission,
74387+ any additional privilege required is based on the
74388+ privilege of the file being linked to
74389+*/
74390+__u32
74391+gr_acl_handle_link(const struct dentry * new_dentry,
74392+ const struct dentry * parent_dentry,
74393+ const struct vfsmount * parent_mnt,
74394+ const struct dentry * old_dentry,
74395+ const struct vfsmount * old_mnt, const struct filename *to)
74396+{
74397+ __u32 mode;
74398+ __u32 needmode = GR_CREATE | GR_LINK;
74399+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
74400+
74401+ mode =
74402+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
74403+ old_mnt);
74404+
74405+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
74406+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
74407+ return mode;
74408+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
74409+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
74410+ return 0;
74411+ } else if (unlikely((mode & needmode) != needmode))
74412+ return 0;
74413+
74414+ return 1;
74415+}
74416+
74417+__u32
74418+gr_acl_handle_symlink(const struct dentry * new_dentry,
74419+ const struct dentry * parent_dentry,
74420+ const struct vfsmount * parent_mnt, const struct filename *from)
74421+{
74422+ __u32 needmode = GR_WRITE | GR_CREATE;
74423+ __u32 mode;
74424+
74425+ mode =
74426+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
74427+ GR_CREATE | GR_AUDIT_CREATE |
74428+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
74429+
74430+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
74431+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
74432+ return mode;
74433+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
74434+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
74435+ return 0;
74436+ } else if (unlikely((mode & needmode) != needmode))
74437+ return 0;
74438+
74439+ return (GR_WRITE | GR_CREATE);
74440+}
74441+
74442+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
74443+{
74444+ __u32 mode;
74445+
74446+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
74447+
74448+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
74449+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
74450+ return mode;
74451+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
74452+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
74453+ return 0;
74454+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
74455+ return 0;
74456+
74457+ return (reqmode);
74458+}
74459+
74460+__u32
74461+gr_acl_handle_mknod(const struct dentry * new_dentry,
74462+ const struct dentry * parent_dentry,
74463+ const struct vfsmount * parent_mnt,
74464+ const int mode)
74465+{
74466+ __u32 reqmode = GR_WRITE | GR_CREATE;
74467+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
74468+ reqmode |= GR_SETID;
74469+
74470+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
74471+ reqmode, GR_MKNOD_ACL_MSG);
74472+}
74473+
74474+__u32
74475+gr_acl_handle_mkdir(const struct dentry *new_dentry,
74476+ const struct dentry *parent_dentry,
74477+ const struct vfsmount *parent_mnt)
74478+{
74479+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
74480+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
74481+}
74482+
74483+#define RENAME_CHECK_SUCCESS(old, new) \
74484+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
74485+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
74486+
74487+int
74488+gr_acl_handle_rename(struct dentry *new_dentry,
74489+ struct dentry *parent_dentry,
74490+ const struct vfsmount *parent_mnt,
74491+ struct dentry *old_dentry,
74492+ struct inode *old_parent_inode,
74493+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags)
74494+{
74495+ __u32 comp1, comp2;
74496+ int error = 0;
74497+
74498+ if (unlikely(!gr_acl_is_enabled()))
74499+ return 0;
74500+
74501+ if (flags & RENAME_EXCHANGE) {
74502+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
74503+ GR_AUDIT_READ | GR_AUDIT_WRITE |
74504+ GR_SUPPRESS, parent_mnt);
74505+ comp2 =
74506+ gr_search_file(old_dentry,
74507+ GR_READ | GR_WRITE | GR_AUDIT_READ |
74508+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
74509+ } else if (d_is_negative(new_dentry)) {
74510+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
74511+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
74512+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
74513+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
74514+ GR_DELETE | GR_AUDIT_DELETE |
74515+ GR_AUDIT_READ | GR_AUDIT_WRITE |
74516+ GR_SUPPRESS, old_mnt);
74517+ } else {
74518+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
74519+ GR_CREATE | GR_DELETE |
74520+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
74521+ GR_AUDIT_READ | GR_AUDIT_WRITE |
74522+ GR_SUPPRESS, parent_mnt);
74523+ comp2 =
74524+ gr_search_file(old_dentry,
74525+ GR_READ | GR_WRITE | GR_AUDIT_READ |
74526+ GR_DELETE | GR_AUDIT_DELETE |
74527+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
74528+ }
74529+
74530+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
74531+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
74532+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
74533+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
74534+ && !(comp2 & GR_SUPPRESS)) {
74535+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
74536+ error = -EACCES;
74537+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
74538+ error = -EACCES;
74539+
74540+ return error;
74541+}
74542+
74543+void
74544+gr_acl_handle_exit(void)
74545+{
74546+ u16 id;
74547+ char *rolename;
74548+
74549+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
74550+ !(current->role->roletype & GR_ROLE_PERSIST))) {
74551+ id = current->acl_role_id;
74552+ rolename = current->role->rolename;
74553+ gr_set_acls(1);
74554+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
74555+ }
74556+
74557+ gr_put_exec_file(current);
74558+ return;
74559+}
74560+
74561+int
74562+gr_acl_handle_procpidmem(const struct task_struct *task)
74563+{
74564+ if (unlikely(!gr_acl_is_enabled()))
74565+ return 0;
74566+
74567+ if (task != current && task->acl->mode & GR_PROTPROCFD)
74568+ return -EACCES;
74569+
74570+ return 0;
74571+}
74572diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
74573new file mode 100644
74574index 0000000..f056b81
74575--- /dev/null
74576+++ b/grsecurity/gracl_ip.c
74577@@ -0,0 +1,386 @@
74578+#include <linux/kernel.h>
74579+#include <asm/uaccess.h>
74580+#include <asm/errno.h>
74581+#include <net/sock.h>
74582+#include <linux/file.h>
74583+#include <linux/fs.h>
74584+#include <linux/net.h>
74585+#include <linux/in.h>
74586+#include <linux/skbuff.h>
74587+#include <linux/ip.h>
74588+#include <linux/udp.h>
74589+#include <linux/types.h>
74590+#include <linux/sched.h>
74591+#include <linux/netdevice.h>
74592+#include <linux/inetdevice.h>
74593+#include <linux/gracl.h>
74594+#include <linux/grsecurity.h>
74595+#include <linux/grinternal.h>
74596+
74597+#define GR_BIND 0x01
74598+#define GR_CONNECT 0x02
74599+#define GR_INVERT 0x04
74600+#define GR_BINDOVERRIDE 0x08
74601+#define GR_CONNECTOVERRIDE 0x10
74602+#define GR_SOCK_FAMILY 0x20
74603+
74604+static const char * gr_protocols[IPPROTO_MAX] = {
74605+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
74606+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
74607+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
74608+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
74609+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
74610+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
74611+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
74612+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
74613+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
74614+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
74615+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
74616+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
74617+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
74618+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
74619+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
74620+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
74621+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
74622+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
74623+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
74624+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
74625+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
74626+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
74627+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
74628+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
74629+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
74630+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
74631+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
74632+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
74633+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
74634+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
74635+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
74636+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
74637+ };
74638+
74639+static const char * gr_socktypes[SOCK_MAX] = {
74640+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
74641+ "unknown:7", "unknown:8", "unknown:9", "packet"
74642+ };
74643+
74644+static const char * gr_sockfamilies[AF_MAX+1] = {
74645+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
74646+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
74647+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
74648+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
74649+ };
74650+
74651+const char *
74652+gr_proto_to_name(unsigned char proto)
74653+{
74654+ return gr_protocols[proto];
74655+}
74656+
74657+const char *
74658+gr_socktype_to_name(unsigned char type)
74659+{
74660+ return gr_socktypes[type];
74661+}
74662+
74663+const char *
74664+gr_sockfamily_to_name(unsigned char family)
74665+{
74666+ return gr_sockfamilies[family];
74667+}
74668+
74669+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
74670+
74671+int
74672+gr_search_socket(const int domain, const int type, const int protocol)
74673+{
74674+ struct acl_subject_label *curr;
74675+ const struct cred *cred = current_cred();
74676+
74677+ if (unlikely(!gr_acl_is_enabled()))
74678+ goto exit;
74679+
74680+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
74681+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
74682+ goto exit; // let the kernel handle it
74683+
74684+ curr = current->acl;
74685+
74686+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
74687+ /* the family is allowed, if this is PF_INET allow it only if
74688+ the extra sock type/protocol checks pass */
74689+ if (domain == PF_INET)
74690+ goto inet_check;
74691+ goto exit;
74692+ } else {
74693+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
74694+ __u32 fakeip = 0;
74695+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74696+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74697+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74698+ gr_to_filename(current->exec_file->f_path.dentry,
74699+ current->exec_file->f_path.mnt) :
74700+ curr->filename, curr->filename,
74701+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
74702+ &current->signal->saved_ip);
74703+ goto exit;
74704+ }
74705+ goto exit_fail;
74706+ }
74707+
74708+inet_check:
74709+ /* the rest of this checking is for IPv4 only */
74710+ if (!curr->ips)
74711+ goto exit;
74712+
74713+ if ((curr->ip_type & (1U << type)) &&
74714+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
74715+ goto exit;
74716+
74717+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
74718+ /* we don't place acls on raw sockets , and sometimes
74719+ dgram/ip sockets are opened for ioctl and not
74720+ bind/connect, so we'll fake a bind learn log */
74721+ if (type == SOCK_RAW || type == SOCK_PACKET) {
74722+ __u32 fakeip = 0;
74723+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74724+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74725+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74726+ gr_to_filename(current->exec_file->f_path.dentry,
74727+ current->exec_file->f_path.mnt) :
74728+ curr->filename, curr->filename,
74729+ &fakeip, 0, type,
74730+ protocol, GR_CONNECT, &current->signal->saved_ip);
74731+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
74732+ __u32 fakeip = 0;
74733+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74734+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74735+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74736+ gr_to_filename(current->exec_file->f_path.dentry,
74737+ current->exec_file->f_path.mnt) :
74738+ curr->filename, curr->filename,
74739+ &fakeip, 0, type,
74740+ protocol, GR_BIND, &current->signal->saved_ip);
74741+ }
74742+ /* we'll log when they use connect or bind */
74743+ goto exit;
74744+ }
74745+
74746+exit_fail:
74747+ if (domain == PF_INET)
74748+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
74749+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
74750+ else if (rcu_access_pointer(net_families[domain]) != NULL)
74751+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
74752+ gr_socktype_to_name(type), protocol);
74753+
74754+ return 0;
74755+exit:
74756+ return 1;
74757+}
74758+
74759+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
74760+{
74761+ if ((ip->mode & mode) &&
74762+ (ip_port >= ip->low) &&
74763+ (ip_port <= ip->high) &&
74764+ ((ntohl(ip_addr) & our_netmask) ==
74765+ (ntohl(our_addr) & our_netmask))
74766+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
74767+ && (ip->type & (1U << type))) {
74768+ if (ip->mode & GR_INVERT)
74769+ return 2; // specifically denied
74770+ else
74771+ return 1; // allowed
74772+ }
74773+
74774+ return 0; // not specifically allowed, may continue parsing
74775+}
74776+
74777+static int
74778+gr_search_connectbind(const int full_mode, struct sock *sk,
74779+ struct sockaddr_in *addr, const int type)
74780+{
74781+ char iface[IFNAMSIZ] = {0};
74782+ struct acl_subject_label *curr;
74783+ struct acl_ip_label *ip;
74784+ struct inet_sock *isk;
74785+ struct net_device *dev;
74786+ struct in_device *idev;
74787+ unsigned long i;
74788+ int ret;
74789+ int mode = full_mode & (GR_BIND | GR_CONNECT);
74790+ __u32 ip_addr = 0;
74791+ __u32 our_addr;
74792+ __u32 our_netmask;
74793+ char *p;
74794+ __u16 ip_port = 0;
74795+ const struct cred *cred = current_cred();
74796+
74797+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
74798+ return 0;
74799+
74800+ curr = current->acl;
74801+ isk = inet_sk(sk);
74802+
74803+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
74804+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
74805+ addr->sin_addr.s_addr = curr->inaddr_any_override;
74806+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
74807+ struct sockaddr_in saddr;
74808+ int err;
74809+
74810+ saddr.sin_family = AF_INET;
74811+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
74812+ saddr.sin_port = isk->inet_sport;
74813+
74814+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
74815+ if (err)
74816+ return err;
74817+
74818+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
74819+ if (err)
74820+ return err;
74821+ }
74822+
74823+ if (!curr->ips)
74824+ return 0;
74825+
74826+ ip_addr = addr->sin_addr.s_addr;
74827+ ip_port = ntohs(addr->sin_port);
74828+
74829+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
74830+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74831+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74832+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74833+ gr_to_filename(current->exec_file->f_path.dentry,
74834+ current->exec_file->f_path.mnt) :
74835+ curr->filename, curr->filename,
74836+ &ip_addr, ip_port, type,
74837+ sk->sk_protocol, mode, &current->signal->saved_ip);
74838+ return 0;
74839+ }
74840+
74841+ for (i = 0; i < curr->ip_num; i++) {
74842+ ip = *(curr->ips + i);
74843+ if (ip->iface != NULL) {
74844+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
74845+ p = strchr(iface, ':');
74846+ if (p != NULL)
74847+ *p = '\0';
74848+ dev = dev_get_by_name(sock_net(sk), iface);
74849+ if (dev == NULL)
74850+ continue;
74851+ idev = in_dev_get(dev);
74852+ if (idev == NULL) {
74853+ dev_put(dev);
74854+ continue;
74855+ }
74856+ rcu_read_lock();
74857+ for_ifa(idev) {
74858+ if (!strcmp(ip->iface, ifa->ifa_label)) {
74859+ our_addr = ifa->ifa_address;
74860+ our_netmask = 0xffffffff;
74861+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
74862+ if (ret == 1) {
74863+ rcu_read_unlock();
74864+ in_dev_put(idev);
74865+ dev_put(dev);
74866+ return 0;
74867+ } else if (ret == 2) {
74868+ rcu_read_unlock();
74869+ in_dev_put(idev);
74870+ dev_put(dev);
74871+ goto denied;
74872+ }
74873+ }
74874+ } endfor_ifa(idev);
74875+ rcu_read_unlock();
74876+ in_dev_put(idev);
74877+ dev_put(dev);
74878+ } else {
74879+ our_addr = ip->addr;
74880+ our_netmask = ip->netmask;
74881+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
74882+ if (ret == 1)
74883+ return 0;
74884+ else if (ret == 2)
74885+ goto denied;
74886+ }
74887+ }
74888+
74889+denied:
74890+ if (mode == GR_BIND)
74891+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
74892+ else if (mode == GR_CONNECT)
74893+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
74894+
74895+ return -EACCES;
74896+}
74897+
74898+int
74899+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
74900+{
74901+ /* always allow disconnection of dgram sockets with connect */
74902+ if (addr->sin_family == AF_UNSPEC)
74903+ return 0;
74904+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
74905+}
74906+
74907+int
74908+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
74909+{
74910+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
74911+}
74912+
74913+int gr_search_listen(struct socket *sock)
74914+{
74915+ struct sock *sk = sock->sk;
74916+ struct sockaddr_in addr;
74917+
74918+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
74919+ addr.sin_port = inet_sk(sk)->inet_sport;
74920+
74921+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
74922+}
74923+
74924+int gr_search_accept(struct socket *sock)
74925+{
74926+ struct sock *sk = sock->sk;
74927+ struct sockaddr_in addr;
74928+
74929+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
74930+ addr.sin_port = inet_sk(sk)->inet_sport;
74931+
74932+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
74933+}
74934+
74935+int
74936+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
74937+{
74938+ if (addr)
74939+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
74940+ else {
74941+ struct sockaddr_in sin;
74942+ const struct inet_sock *inet = inet_sk(sk);
74943+
74944+ sin.sin_addr.s_addr = inet->inet_daddr;
74945+ sin.sin_port = inet->inet_dport;
74946+
74947+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
74948+ }
74949+}
74950+
74951+int
74952+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
74953+{
74954+ struct sockaddr_in sin;
74955+
74956+ if (unlikely(skb->len < sizeof (struct udphdr)))
74957+ return 0; // skip this packet
74958+
74959+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
74960+ sin.sin_port = udp_hdr(skb)->source;
74961+
74962+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
74963+}
74964diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
74965new file mode 100644
74966index 0000000..25f54ef
74967--- /dev/null
74968+++ b/grsecurity/gracl_learn.c
74969@@ -0,0 +1,207 @@
74970+#include <linux/kernel.h>
74971+#include <linux/mm.h>
74972+#include <linux/sched.h>
74973+#include <linux/poll.h>
74974+#include <linux/string.h>
74975+#include <linux/file.h>
74976+#include <linux/types.h>
74977+#include <linux/vmalloc.h>
74978+#include <linux/grinternal.h>
74979+
74980+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
74981+ size_t count, loff_t *ppos);
74982+extern int gr_acl_is_enabled(void);
74983+
74984+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
74985+static int gr_learn_attached;
74986+
74987+/* use a 512k buffer */
74988+#define LEARN_BUFFER_SIZE (512 * 1024)
74989+
74990+static DEFINE_SPINLOCK(gr_learn_lock);
74991+static DEFINE_MUTEX(gr_learn_user_mutex);
74992+
74993+/* we need to maintain two buffers, so that the kernel context of grlearn
74994+ uses a semaphore around the userspace copying, and the other kernel contexts
74995+ use a spinlock when copying into the buffer, since they cannot sleep
74996+*/
74997+static char *learn_buffer;
74998+static char *learn_buffer_user;
74999+static int learn_buffer_len;
75000+static int learn_buffer_user_len;
75001+
75002+static ssize_t
75003+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
75004+{
75005+ DECLARE_WAITQUEUE(wait, current);
75006+ ssize_t retval = 0;
75007+
75008+ add_wait_queue(&learn_wait, &wait);
75009+ set_current_state(TASK_INTERRUPTIBLE);
75010+ do {
75011+ mutex_lock(&gr_learn_user_mutex);
75012+ spin_lock(&gr_learn_lock);
75013+ if (learn_buffer_len)
75014+ break;
75015+ spin_unlock(&gr_learn_lock);
75016+ mutex_unlock(&gr_learn_user_mutex);
75017+ if (file->f_flags & O_NONBLOCK) {
75018+ retval = -EAGAIN;
75019+ goto out;
75020+ }
75021+ if (signal_pending(current)) {
75022+ retval = -ERESTARTSYS;
75023+ goto out;
75024+ }
75025+
75026+ schedule();
75027+ } while (1);
75028+
75029+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
75030+ learn_buffer_user_len = learn_buffer_len;
75031+ retval = learn_buffer_len;
75032+ learn_buffer_len = 0;
75033+
75034+ spin_unlock(&gr_learn_lock);
75035+
75036+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
75037+ retval = -EFAULT;
75038+
75039+ mutex_unlock(&gr_learn_user_mutex);
75040+out:
75041+ set_current_state(TASK_RUNNING);
75042+ remove_wait_queue(&learn_wait, &wait);
75043+ return retval;
75044+}
75045+
75046+static unsigned int
75047+poll_learn(struct file * file, poll_table * wait)
75048+{
75049+ poll_wait(file, &learn_wait, wait);
75050+
75051+ if (learn_buffer_len)
75052+ return (POLLIN | POLLRDNORM);
75053+
75054+ return 0;
75055+}
75056+
75057+void
75058+gr_clear_learn_entries(void)
75059+{
75060+ char *tmp;
75061+
75062+ mutex_lock(&gr_learn_user_mutex);
75063+ spin_lock(&gr_learn_lock);
75064+ tmp = learn_buffer;
75065+ learn_buffer = NULL;
75066+ spin_unlock(&gr_learn_lock);
75067+ if (tmp)
75068+ vfree(tmp);
75069+ if (learn_buffer_user != NULL) {
75070+ vfree(learn_buffer_user);
75071+ learn_buffer_user = NULL;
75072+ }
75073+ learn_buffer_len = 0;
75074+ mutex_unlock(&gr_learn_user_mutex);
75075+
75076+ return;
75077+}
75078+
75079+void
75080+gr_add_learn_entry(const char *fmt, ...)
75081+{
75082+ va_list args;
75083+ unsigned int len;
75084+
75085+ if (!gr_learn_attached)
75086+ return;
75087+
75088+ spin_lock(&gr_learn_lock);
75089+
75090+ /* leave a gap at the end so we know when it's "full" but don't have to
75091+ compute the exact length of the string we're trying to append
75092+ */
75093+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
75094+ spin_unlock(&gr_learn_lock);
75095+ wake_up_interruptible(&learn_wait);
75096+ return;
75097+ }
75098+ if (learn_buffer == NULL) {
75099+ spin_unlock(&gr_learn_lock);
75100+ return;
75101+ }
75102+
75103+ va_start(args, fmt);
75104+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
75105+ va_end(args);
75106+
75107+ learn_buffer_len += len + 1;
75108+
75109+ spin_unlock(&gr_learn_lock);
75110+ wake_up_interruptible(&learn_wait);
75111+
75112+ return;
75113+}
75114+
75115+static int
75116+open_learn(struct inode *inode, struct file *file)
75117+{
75118+ if (file->f_mode & FMODE_READ && gr_learn_attached)
75119+ return -EBUSY;
75120+ if (file->f_mode & FMODE_READ) {
75121+ int retval = 0;
75122+ mutex_lock(&gr_learn_user_mutex);
75123+ if (learn_buffer == NULL)
75124+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
75125+ if (learn_buffer_user == NULL)
75126+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
75127+ if (learn_buffer == NULL) {
75128+ retval = -ENOMEM;
75129+ goto out_error;
75130+ }
75131+ if (learn_buffer_user == NULL) {
75132+ retval = -ENOMEM;
75133+ goto out_error;
75134+ }
75135+ learn_buffer_len = 0;
75136+ learn_buffer_user_len = 0;
75137+ gr_learn_attached = 1;
75138+out_error:
75139+ mutex_unlock(&gr_learn_user_mutex);
75140+ return retval;
75141+ }
75142+ return 0;
75143+}
75144+
75145+static int
75146+close_learn(struct inode *inode, struct file *file)
75147+{
75148+ if (file->f_mode & FMODE_READ) {
75149+ char *tmp = NULL;
75150+ mutex_lock(&gr_learn_user_mutex);
75151+ spin_lock(&gr_learn_lock);
75152+ tmp = learn_buffer;
75153+ learn_buffer = NULL;
75154+ spin_unlock(&gr_learn_lock);
75155+ if (tmp)
75156+ vfree(tmp);
75157+ if (learn_buffer_user != NULL) {
75158+ vfree(learn_buffer_user);
75159+ learn_buffer_user = NULL;
75160+ }
75161+ learn_buffer_len = 0;
75162+ learn_buffer_user_len = 0;
75163+ gr_learn_attached = 0;
75164+ mutex_unlock(&gr_learn_user_mutex);
75165+ }
75166+
75167+ return 0;
75168+}
75169+
75170+const struct file_operations grsec_fops = {
75171+ .read = read_learn,
75172+ .write = write_grsec_handler,
75173+ .open = open_learn,
75174+ .release = close_learn,
75175+ .poll = poll_learn,
75176+};
75177diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
75178new file mode 100644
75179index 0000000..3f8ade0
75180--- /dev/null
75181+++ b/grsecurity/gracl_policy.c
75182@@ -0,0 +1,1782 @@
75183+#include <linux/kernel.h>
75184+#include <linux/module.h>
75185+#include <linux/sched.h>
75186+#include <linux/mm.h>
75187+#include <linux/file.h>
75188+#include <linux/fs.h>
75189+#include <linux/namei.h>
75190+#include <linux/mount.h>
75191+#include <linux/tty.h>
75192+#include <linux/proc_fs.h>
75193+#include <linux/lglock.h>
75194+#include <linux/slab.h>
75195+#include <linux/vmalloc.h>
75196+#include <linux/types.h>
75197+#include <linux/sysctl.h>
75198+#include <linux/netdevice.h>
75199+#include <linux/ptrace.h>
75200+#include <linux/gracl.h>
75201+#include <linux/gralloc.h>
75202+#include <linux/security.h>
75203+#include <linux/grinternal.h>
75204+#include <linux/pid_namespace.h>
75205+#include <linux/stop_machine.h>
75206+#include <linux/fdtable.h>
75207+#include <linux/percpu.h>
75208+#include <linux/lglock.h>
75209+#include <linux/hugetlb.h>
75210+#include <linux/posix-timers.h>
75211+#include "../fs/mount.h"
75212+
75213+#include <asm/uaccess.h>
75214+#include <asm/errno.h>
75215+#include <asm/mman.h>
75216+
75217+extern struct gr_policy_state *polstate;
75218+
75219+#define FOR_EACH_ROLE_START(role) \
75220+ role = polstate->role_list; \
75221+ while (role) {
75222+
75223+#define FOR_EACH_ROLE_END(role) \
75224+ role = role->prev; \
75225+ }
75226+
75227+struct path gr_real_root;
75228+
75229+extern struct gr_alloc_state *current_alloc_state;
75230+
75231+u16 acl_sp_role_value;
75232+
75233+static DEFINE_MUTEX(gr_dev_mutex);
75234+
75235+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
75236+extern void gr_clear_learn_entries(void);
75237+
75238+struct gr_arg *gr_usermode __read_only;
75239+unsigned char *gr_system_salt __read_only;
75240+unsigned char *gr_system_sum __read_only;
75241+
75242+static unsigned int gr_auth_attempts = 0;
75243+static unsigned long gr_auth_expires = 0UL;
75244+
75245+struct acl_object_label *fakefs_obj_rw;
75246+struct acl_object_label *fakefs_obj_rwx;
75247+
75248+extern int gr_init_uidset(void);
75249+extern void gr_free_uidset(void);
75250+extern void gr_remove_uid(uid_t uid);
75251+extern int gr_find_uid(uid_t uid);
75252+
75253+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename);
75254+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
75255+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
75256+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
75257+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
75258+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
75259+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
75260+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
75261+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
75262+extern struct acl_subject_label *lookup_acl_subj_label(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
75263+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
75264+extern void assign_special_role(const char *rolename);
75265+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
75266+extern int gr_rbac_disable(void *unused);
75267+extern void gr_enable_rbac_system(void);
75268+
75269+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
75270+{
75271+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
75272+ return -EFAULT;
75273+
75274+ return 0;
75275+}
75276+
75277+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
75278+{
75279+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
75280+ return -EFAULT;
75281+
75282+ return 0;
75283+}
75284+
75285+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
75286+{
75287+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
75288+ return -EFAULT;
75289+
75290+ return 0;
75291+}
75292+
75293+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
75294+{
75295+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
75296+ return -EFAULT;
75297+
75298+ return 0;
75299+}
75300+
75301+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
75302+{
75303+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
75304+ return -EFAULT;
75305+
75306+ return 0;
75307+}
75308+
75309+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
75310+{
75311+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
75312+ return -EFAULT;
75313+
75314+ return 0;
75315+}
75316+
75317+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
75318+{
75319+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
75320+ return -EFAULT;
75321+
75322+ return 0;
75323+}
75324+
75325+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
75326+{
75327+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
75328+ return -EFAULT;
75329+
75330+ return 0;
75331+}
75332+
75333+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
75334+{
75335+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
75336+ return -EFAULT;
75337+
75338+ return 0;
75339+}
75340+
75341+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
75342+{
75343+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
75344+ return -EFAULT;
75345+
75346+ if (((uwrap->version != GRSECURITY_VERSION) &&
75347+ (uwrap->version != 0x2901)) ||
75348+ (uwrap->size != sizeof(struct gr_arg)))
75349+ return -EINVAL;
75350+
75351+ return 0;
75352+}
75353+
75354+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
75355+{
75356+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
75357+ return -EFAULT;
75358+
75359+ return 0;
75360+}
75361+
75362+static size_t get_gr_arg_wrapper_size_normal(void)
75363+{
75364+ return sizeof(struct gr_arg_wrapper);
75365+}
75366+
75367+#ifdef CONFIG_COMPAT
75368+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
75369+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
75370+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
75371+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
75372+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
75373+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
75374+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
75375+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
75376+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
75377+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
75378+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
75379+extern size_t get_gr_arg_wrapper_size_compat(void);
75380+
75381+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
75382+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
75383+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
75384+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
75385+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
75386+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
75387+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
75388+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
75389+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
75390+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
75391+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
75392+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
75393+
75394+#else
75395+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
75396+#define copy_gr_arg copy_gr_arg_normal
75397+#define copy_gr_hash_struct copy_gr_hash_struct_normal
75398+#define copy_acl_object_label copy_acl_object_label_normal
75399+#define copy_acl_subject_label copy_acl_subject_label_normal
75400+#define copy_acl_role_label copy_acl_role_label_normal
75401+#define copy_acl_ip_label copy_acl_ip_label_normal
75402+#define copy_pointer_from_array copy_pointer_from_array_normal
75403+#define copy_sprole_pw copy_sprole_pw_normal
75404+#define copy_role_transition copy_role_transition_normal
75405+#define copy_role_allowed_ip copy_role_allowed_ip_normal
75406+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
75407+#endif
75408+
75409+static struct acl_subject_label *
75410+lookup_subject_map(const struct acl_subject_label *userp)
75411+{
75412+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
75413+ struct subject_map *match;
75414+
75415+ match = polstate->subj_map_set.s_hash[index];
75416+
75417+ while (match && match->user != userp)
75418+ match = match->next;
75419+
75420+ if (match != NULL)
75421+ return match->kernel;
75422+ else
75423+ return NULL;
75424+}
75425+
75426+static void
75427+insert_subj_map_entry(struct subject_map *subjmap)
75428+{
75429+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
75430+ struct subject_map **curr;
75431+
75432+ subjmap->prev = NULL;
75433+
75434+ curr = &polstate->subj_map_set.s_hash[index];
75435+ if (*curr != NULL)
75436+ (*curr)->prev = subjmap;
75437+
75438+ subjmap->next = *curr;
75439+ *curr = subjmap;
75440+
75441+ return;
75442+}
75443+
75444+static void
75445+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
75446+{
75447+ unsigned int index =
75448+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
75449+ struct acl_role_label **curr;
75450+ struct acl_role_label *tmp, *tmp2;
75451+
75452+ curr = &polstate->acl_role_set.r_hash[index];
75453+
75454+ /* simple case, slot is empty, just set it to our role */
75455+ if (*curr == NULL) {
75456+ *curr = role;
75457+ } else {
75458+ /* example:
75459+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
75460+ 2 -> 3
75461+ */
75462+ /* first check to see if we can already be reached via this slot */
75463+ tmp = *curr;
75464+ while (tmp && tmp != role)
75465+ tmp = tmp->next;
75466+ if (tmp == role) {
75467+ /* we don't need to add ourselves to this slot's chain */
75468+ return;
75469+ }
75470+ /* we need to add ourselves to this chain, two cases */
75471+ if (role->next == NULL) {
75472+ /* simple case, append the current chain to our role */
75473+ role->next = *curr;
75474+ *curr = role;
75475+ } else {
75476+ /* 1 -> 2 -> 3 -> 4
75477+ 2 -> 3 -> 4
75478+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
75479+ */
75480+ /* trickier case: walk our role's chain until we find
75481+ the role for the start of the current slot's chain */
75482+ tmp = role;
75483+ tmp2 = *curr;
75484+ while (tmp->next && tmp->next != tmp2)
75485+ tmp = tmp->next;
75486+ if (tmp->next == tmp2) {
75487+ /* from example above, we found 3, so just
75488+ replace this slot's chain with ours */
75489+ *curr = role;
75490+ } else {
75491+ /* we didn't find a subset of our role's chain
75492+ in the current slot's chain, so append their
75493+ chain to ours, and set us as the first role in
75494+ the slot's chain
75495+
75496+ we could fold this case with the case above,
75497+ but making it explicit for clarity
75498+ */
75499+ tmp->next = tmp2;
75500+ *curr = role;
75501+ }
75502+ }
75503+ }
75504+
75505+ return;
75506+}
75507+
75508+static void
75509+insert_acl_role_label(struct acl_role_label *role)
75510+{
75511+ int i;
75512+
75513+ if (polstate->role_list == NULL) {
75514+ polstate->role_list = role;
75515+ role->prev = NULL;
75516+ } else {
75517+ role->prev = polstate->role_list;
75518+ polstate->role_list = role;
75519+ }
75520+
75521+ /* used for hash chains */
75522+ role->next = NULL;
75523+
75524+ if (role->roletype & GR_ROLE_DOMAIN) {
75525+ for (i = 0; i < role->domain_child_num; i++)
75526+ __insert_acl_role_label(role, role->domain_children[i]);
75527+ } else
75528+ __insert_acl_role_label(role, role->uidgid);
75529+}
75530+
75531+static int
75532+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
75533+{
75534+ struct name_entry **curr, *nentry;
75535+ struct inodev_entry *ientry;
75536+ unsigned int len = strlen(name);
75537+ unsigned int key = full_name_hash(name, len);
75538+ unsigned int index = key % polstate->name_set.n_size;
75539+
75540+ curr = &polstate->name_set.n_hash[index];
75541+
75542+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
75543+ curr = &((*curr)->next);
75544+
75545+ if (*curr != NULL)
75546+ return 1;
75547+
75548+ nentry = acl_alloc(sizeof (struct name_entry));
75549+ if (nentry == NULL)
75550+ return 0;
75551+ ientry = acl_alloc(sizeof (struct inodev_entry));
75552+ if (ientry == NULL)
75553+ return 0;
75554+ ientry->nentry = nentry;
75555+
75556+ nentry->key = key;
75557+ nentry->name = name;
75558+ nentry->inode = inode;
75559+ nentry->device = device;
75560+ nentry->len = len;
75561+ nentry->deleted = deleted;
75562+
75563+ nentry->prev = NULL;
75564+ curr = &polstate->name_set.n_hash[index];
75565+ if (*curr != NULL)
75566+ (*curr)->prev = nentry;
75567+ nentry->next = *curr;
75568+ *curr = nentry;
75569+
75570+ /* insert us into the table searchable by inode/dev */
75571+ __insert_inodev_entry(polstate, ientry);
75572+
75573+ return 1;
75574+}
75575+
75576+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
75577+
75578+static void *
75579+create_table(__u32 * len, int elementsize)
75580+{
75581+ unsigned int table_sizes[] = {
75582+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
75583+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
75584+ 4194301, 8388593, 16777213, 33554393, 67108859
75585+ };
75586+ void *newtable = NULL;
75587+ unsigned int pwr = 0;
75588+
75589+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
75590+ table_sizes[pwr] <= *len)
75591+ pwr++;
75592+
75593+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
75594+ return newtable;
75595+
75596+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
75597+ newtable =
75598+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
75599+ else
75600+ newtable = vmalloc(table_sizes[pwr] * elementsize);
75601+
75602+ *len = table_sizes[pwr];
75603+
75604+ return newtable;
75605+}
75606+
75607+static int
75608+init_variables(const struct gr_arg *arg, bool reload)
75609+{
75610+ struct task_struct *reaper = init_pid_ns.child_reaper;
75611+ unsigned int stacksize;
75612+
75613+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
75614+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
75615+ polstate->name_set.n_size = arg->role_db.num_objects;
75616+ polstate->inodev_set.i_size = arg->role_db.num_objects;
75617+
75618+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
75619+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
75620+ return 1;
75621+
75622+ if (!reload) {
75623+ if (!gr_init_uidset())
75624+ return 1;
75625+ }
75626+
75627+ /* set up the stack that holds allocation info */
75628+
75629+ stacksize = arg->role_db.num_pointers + 5;
75630+
75631+ if (!acl_alloc_stack_init(stacksize))
75632+ return 1;
75633+
75634+ if (!reload) {
75635+ /* grab reference for the real root dentry and vfsmount */
75636+ get_fs_root(reaper->fs, &gr_real_root);
75637+
75638+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
75639+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
75640+#endif
75641+
75642+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
75643+ if (fakefs_obj_rw == NULL)
75644+ return 1;
75645+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
75646+
75647+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
75648+ if (fakefs_obj_rwx == NULL)
75649+ return 1;
75650+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
75651+ }
75652+
75653+ polstate->subj_map_set.s_hash =
75654+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
75655+ polstate->acl_role_set.r_hash =
75656+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
75657+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
75658+ polstate->inodev_set.i_hash =
75659+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
75660+
75661+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
75662+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
75663+ return 1;
75664+
75665+ memset(polstate->subj_map_set.s_hash, 0,
75666+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
75667+ memset(polstate->acl_role_set.r_hash, 0,
75668+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
75669+ memset(polstate->name_set.n_hash, 0,
75670+ sizeof (struct name_entry *) * polstate->name_set.n_size);
75671+ memset(polstate->inodev_set.i_hash, 0,
75672+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
75673+
75674+ return 0;
75675+}
75676+
75677+/* free information not needed after startup
75678+ currently contains user->kernel pointer mappings for subjects
75679+*/
75680+
75681+static void
75682+free_init_variables(void)
75683+{
75684+ __u32 i;
75685+
75686+ if (polstate->subj_map_set.s_hash) {
75687+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
75688+ if (polstate->subj_map_set.s_hash[i]) {
75689+ kfree(polstate->subj_map_set.s_hash[i]);
75690+ polstate->subj_map_set.s_hash[i] = NULL;
75691+ }
75692+ }
75693+
75694+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
75695+ PAGE_SIZE)
75696+ kfree(polstate->subj_map_set.s_hash);
75697+ else
75698+ vfree(polstate->subj_map_set.s_hash);
75699+ }
75700+
75701+ return;
75702+}
75703+
75704+static void
75705+free_variables(bool reload)
75706+{
75707+ struct acl_subject_label *s;
75708+ struct acl_role_label *r;
75709+ struct task_struct *task, *task2;
75710+ unsigned int x;
75711+
75712+ if (!reload) {
75713+ gr_clear_learn_entries();
75714+
75715+ read_lock(&tasklist_lock);
75716+ do_each_thread(task2, task) {
75717+ task->acl_sp_role = 0;
75718+ task->acl_role_id = 0;
75719+ task->inherited = 0;
75720+ task->acl = NULL;
75721+ task->role = NULL;
75722+ } while_each_thread(task2, task);
75723+ read_unlock(&tasklist_lock);
75724+
75725+ kfree(fakefs_obj_rw);
75726+ fakefs_obj_rw = NULL;
75727+ kfree(fakefs_obj_rwx);
75728+ fakefs_obj_rwx = NULL;
75729+
75730+ /* release the reference to the real root dentry and vfsmount */
75731+ path_put(&gr_real_root);
75732+ memset(&gr_real_root, 0, sizeof(gr_real_root));
75733+ }
75734+
75735+ /* free all object hash tables */
75736+
75737+ FOR_EACH_ROLE_START(r)
75738+ if (r->subj_hash == NULL)
75739+ goto next_role;
75740+ FOR_EACH_SUBJECT_START(r, s, x)
75741+ if (s->obj_hash == NULL)
75742+ break;
75743+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75744+ kfree(s->obj_hash);
75745+ else
75746+ vfree(s->obj_hash);
75747+ FOR_EACH_SUBJECT_END(s, x)
75748+ FOR_EACH_NESTED_SUBJECT_START(r, s)
75749+ if (s->obj_hash == NULL)
75750+ break;
75751+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75752+ kfree(s->obj_hash);
75753+ else
75754+ vfree(s->obj_hash);
75755+ FOR_EACH_NESTED_SUBJECT_END(s)
75756+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
75757+ kfree(r->subj_hash);
75758+ else
75759+ vfree(r->subj_hash);
75760+ r->subj_hash = NULL;
75761+next_role:
75762+ FOR_EACH_ROLE_END(r)
75763+
75764+ acl_free_all();
75765+
75766+ if (polstate->acl_role_set.r_hash) {
75767+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
75768+ PAGE_SIZE)
75769+ kfree(polstate->acl_role_set.r_hash);
75770+ else
75771+ vfree(polstate->acl_role_set.r_hash);
75772+ }
75773+ if (polstate->name_set.n_hash) {
75774+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
75775+ PAGE_SIZE)
75776+ kfree(polstate->name_set.n_hash);
75777+ else
75778+ vfree(polstate->name_set.n_hash);
75779+ }
75780+
75781+ if (polstate->inodev_set.i_hash) {
75782+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
75783+ PAGE_SIZE)
75784+ kfree(polstate->inodev_set.i_hash);
75785+ else
75786+ vfree(polstate->inodev_set.i_hash);
75787+ }
75788+
75789+ if (!reload)
75790+ gr_free_uidset();
75791+
75792+ memset(&polstate->name_set, 0, sizeof (struct name_db));
75793+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
75794+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
75795+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
75796+
75797+ polstate->default_role = NULL;
75798+ polstate->kernel_role = NULL;
75799+ polstate->role_list = NULL;
75800+
75801+ return;
75802+}
75803+
75804+static struct acl_subject_label *
75805+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
75806+
75807+static int alloc_and_copy_string(char **name, unsigned int maxlen)
75808+{
75809+ unsigned int len = strnlen_user(*name, maxlen);
75810+ char *tmp;
75811+
75812+ if (!len || len >= maxlen)
75813+ return -EINVAL;
75814+
75815+ if ((tmp = (char *) acl_alloc(len)) == NULL)
75816+ return -ENOMEM;
75817+
75818+ if (copy_from_user(tmp, *name, len))
75819+ return -EFAULT;
75820+
75821+ tmp[len-1] = '\0';
75822+ *name = tmp;
75823+
75824+ return 0;
75825+}
75826+
75827+static int
75828+copy_user_glob(struct acl_object_label *obj)
75829+{
75830+ struct acl_object_label *g_tmp, **guser;
75831+ int error;
75832+
75833+ if (obj->globbed == NULL)
75834+ return 0;
75835+
75836+ guser = &obj->globbed;
75837+ while (*guser) {
75838+ g_tmp = (struct acl_object_label *)
75839+ acl_alloc(sizeof (struct acl_object_label));
75840+ if (g_tmp == NULL)
75841+ return -ENOMEM;
75842+
75843+ if (copy_acl_object_label(g_tmp, *guser))
75844+ return -EFAULT;
75845+
75846+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
75847+ if (error)
75848+ return error;
75849+
75850+ *guser = g_tmp;
75851+ guser = &(g_tmp->next);
75852+ }
75853+
75854+ return 0;
75855+}
75856+
75857+static int
75858+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
75859+ struct acl_role_label *role)
75860+{
75861+ struct acl_object_label *o_tmp;
75862+ int ret;
75863+
75864+ while (userp) {
75865+ if ((o_tmp = (struct acl_object_label *)
75866+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
75867+ return -ENOMEM;
75868+
75869+ if (copy_acl_object_label(o_tmp, userp))
75870+ return -EFAULT;
75871+
75872+ userp = o_tmp->prev;
75873+
75874+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
75875+ if (ret)
75876+ return ret;
75877+
75878+ insert_acl_obj_label(o_tmp, subj);
75879+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
75880+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
75881+ return -ENOMEM;
75882+
75883+ ret = copy_user_glob(o_tmp);
75884+ if (ret)
75885+ return ret;
75886+
75887+ if (o_tmp->nested) {
75888+ int already_copied;
75889+
75890+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
75891+ if (IS_ERR(o_tmp->nested))
75892+ return PTR_ERR(o_tmp->nested);
75893+
75894+ /* insert into nested subject list if we haven't copied this one yet
75895+ to prevent duplicate entries */
75896+ if (!already_copied) {
75897+ o_tmp->nested->next = role->hash->first;
75898+ role->hash->first = o_tmp->nested;
75899+ }
75900+ }
75901+ }
75902+
75903+ return 0;
75904+}
75905+
75906+static __u32
75907+count_user_subjs(struct acl_subject_label *userp)
75908+{
75909+ struct acl_subject_label s_tmp;
75910+ __u32 num = 0;
75911+
75912+ while (userp) {
75913+ if (copy_acl_subject_label(&s_tmp, userp))
75914+ break;
75915+
75916+ userp = s_tmp.prev;
75917+ }
75918+
75919+ return num;
75920+}
75921+
75922+static int
75923+copy_user_allowedips(struct acl_role_label *rolep)
75924+{
75925+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
75926+
75927+ ruserip = rolep->allowed_ips;
75928+
75929+ while (ruserip) {
75930+ rlast = rtmp;
75931+
75932+ if ((rtmp = (struct role_allowed_ip *)
75933+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
75934+ return -ENOMEM;
75935+
75936+ if (copy_role_allowed_ip(rtmp, ruserip))
75937+ return -EFAULT;
75938+
75939+ ruserip = rtmp->prev;
75940+
75941+ if (!rlast) {
75942+ rtmp->prev = NULL;
75943+ rolep->allowed_ips = rtmp;
75944+ } else {
75945+ rlast->next = rtmp;
75946+ rtmp->prev = rlast;
75947+ }
75948+
75949+ if (!ruserip)
75950+ rtmp->next = NULL;
75951+ }
75952+
75953+ return 0;
75954+}
75955+
75956+static int
75957+copy_user_transitions(struct acl_role_label *rolep)
75958+{
75959+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
75960+ int error;
75961+
75962+ rusertp = rolep->transitions;
75963+
75964+ while (rusertp) {
75965+ rlast = rtmp;
75966+
75967+ if ((rtmp = (struct role_transition *)
75968+ acl_alloc(sizeof (struct role_transition))) == NULL)
75969+ return -ENOMEM;
75970+
75971+ if (copy_role_transition(rtmp, rusertp))
75972+ return -EFAULT;
75973+
75974+ rusertp = rtmp->prev;
75975+
75976+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
75977+ if (error)
75978+ return error;
75979+
75980+ if (!rlast) {
75981+ rtmp->prev = NULL;
75982+ rolep->transitions = rtmp;
75983+ } else {
75984+ rlast->next = rtmp;
75985+ rtmp->prev = rlast;
75986+ }
75987+
75988+ if (!rusertp)
75989+ rtmp->next = NULL;
75990+ }
75991+
75992+ return 0;
75993+}
75994+
75995+static __u32 count_user_objs(const struct acl_object_label __user *userp)
75996+{
75997+ struct acl_object_label o_tmp;
75998+ __u32 num = 0;
75999+
76000+ while (userp) {
76001+ if (copy_acl_object_label(&o_tmp, userp))
76002+ break;
76003+
76004+ userp = o_tmp.prev;
76005+ num++;
76006+ }
76007+
76008+ return num;
76009+}
76010+
76011+static struct acl_subject_label *
76012+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
76013+{
76014+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
76015+ __u32 num_objs;
76016+ struct acl_ip_label **i_tmp, *i_utmp2;
76017+ struct gr_hash_struct ghash;
76018+ struct subject_map *subjmap;
76019+ unsigned int i_num;
76020+ int err;
76021+
76022+ if (already_copied != NULL)
76023+ *already_copied = 0;
76024+
76025+ s_tmp = lookup_subject_map(userp);
76026+
76027+ /* we've already copied this subject into the kernel, just return
76028+ the reference to it, and don't copy it over again
76029+ */
76030+ if (s_tmp) {
76031+ if (already_copied != NULL)
76032+ *already_copied = 1;
76033+ return(s_tmp);
76034+ }
76035+
76036+ if ((s_tmp = (struct acl_subject_label *)
76037+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
76038+ return ERR_PTR(-ENOMEM);
76039+
76040+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
76041+ if (subjmap == NULL)
76042+ return ERR_PTR(-ENOMEM);
76043+
76044+ subjmap->user = userp;
76045+ subjmap->kernel = s_tmp;
76046+ insert_subj_map_entry(subjmap);
76047+
76048+ if (copy_acl_subject_label(s_tmp, userp))
76049+ return ERR_PTR(-EFAULT);
76050+
76051+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
76052+ if (err)
76053+ return ERR_PTR(err);
76054+
76055+ if (!strcmp(s_tmp->filename, "/"))
76056+ role->root_label = s_tmp;
76057+
76058+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
76059+ return ERR_PTR(-EFAULT);
76060+
76061+ /* copy user and group transition tables */
76062+
76063+ if (s_tmp->user_trans_num) {
76064+ uid_t *uidlist;
76065+
76066+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
76067+ if (uidlist == NULL)
76068+ return ERR_PTR(-ENOMEM);
76069+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
76070+ return ERR_PTR(-EFAULT);
76071+
76072+ s_tmp->user_transitions = uidlist;
76073+ }
76074+
76075+ if (s_tmp->group_trans_num) {
76076+ gid_t *gidlist;
76077+
76078+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
76079+ if (gidlist == NULL)
76080+ return ERR_PTR(-ENOMEM);
76081+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
76082+ return ERR_PTR(-EFAULT);
76083+
76084+ s_tmp->group_transitions = gidlist;
76085+ }
76086+
76087+ /* set up object hash table */
76088+ num_objs = count_user_objs(ghash.first);
76089+
76090+ s_tmp->obj_hash_size = num_objs;
76091+ s_tmp->obj_hash =
76092+ (struct acl_object_label **)
76093+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
76094+
76095+ if (!s_tmp->obj_hash)
76096+ return ERR_PTR(-ENOMEM);
76097+
76098+ memset(s_tmp->obj_hash, 0,
76099+ s_tmp->obj_hash_size *
76100+ sizeof (struct acl_object_label *));
76101+
76102+ /* add in objects */
76103+ err = copy_user_objs(ghash.first, s_tmp, role);
76104+
76105+ if (err)
76106+ return ERR_PTR(err);
76107+
76108+ /* set pointer for parent subject */
76109+ if (s_tmp->parent_subject) {
76110+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
76111+
76112+ if (IS_ERR(s_tmp2))
76113+ return s_tmp2;
76114+
76115+ s_tmp->parent_subject = s_tmp2;
76116+ }
76117+
76118+ /* add in ip acls */
76119+
76120+ if (!s_tmp->ip_num) {
76121+ s_tmp->ips = NULL;
76122+ goto insert;
76123+ }
76124+
76125+ i_tmp =
76126+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
76127+ sizeof (struct acl_ip_label *));
76128+
76129+ if (!i_tmp)
76130+ return ERR_PTR(-ENOMEM);
76131+
76132+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
76133+ *(i_tmp + i_num) =
76134+ (struct acl_ip_label *)
76135+ acl_alloc(sizeof (struct acl_ip_label));
76136+ if (!*(i_tmp + i_num))
76137+ return ERR_PTR(-ENOMEM);
76138+
76139+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
76140+ return ERR_PTR(-EFAULT);
76141+
76142+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
76143+ return ERR_PTR(-EFAULT);
76144+
76145+ if ((*(i_tmp + i_num))->iface == NULL)
76146+ continue;
76147+
76148+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
76149+ if (err)
76150+ return ERR_PTR(err);
76151+ }
76152+
76153+ s_tmp->ips = i_tmp;
76154+
76155+insert:
76156+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
76157+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
76158+ return ERR_PTR(-ENOMEM);
76159+
76160+ return s_tmp;
76161+}
76162+
76163+static int
76164+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
76165+{
76166+ struct acl_subject_label s_pre;
76167+ struct acl_subject_label * ret;
76168+ int err;
76169+
76170+ while (userp) {
76171+ if (copy_acl_subject_label(&s_pre, userp))
76172+ return -EFAULT;
76173+
76174+ ret = do_copy_user_subj(userp, role, NULL);
76175+
76176+ err = PTR_ERR(ret);
76177+ if (IS_ERR(ret))
76178+ return err;
76179+
76180+ insert_acl_subj_label(ret, role);
76181+
76182+ userp = s_pre.prev;
76183+ }
76184+
76185+ return 0;
76186+}
76187+
76188+static int
76189+copy_user_acl(struct gr_arg *arg)
76190+{
76191+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
76192+ struct acl_subject_label *subj_list;
76193+ struct sprole_pw *sptmp;
76194+ struct gr_hash_struct *ghash;
76195+ uid_t *domainlist;
76196+ unsigned int r_num;
76197+ int err = 0;
76198+ __u16 i;
76199+ __u32 num_subjs;
76200+
76201+ /* we need a default and kernel role */
76202+ if (arg->role_db.num_roles < 2)
76203+ return -EINVAL;
76204+
76205+ /* copy special role authentication info from userspace */
76206+
76207+ polstate->num_sprole_pws = arg->num_sprole_pws;
76208+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
76209+
76210+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
76211+ return -ENOMEM;
76212+
76213+ for (i = 0; i < polstate->num_sprole_pws; i++) {
76214+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
76215+ if (!sptmp)
76216+ return -ENOMEM;
76217+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
76218+ return -EFAULT;
76219+
76220+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
76221+ if (err)
76222+ return err;
76223+
76224+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
76225+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
76226+#endif
76227+
76228+ polstate->acl_special_roles[i] = sptmp;
76229+ }
76230+
76231+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
76232+
76233+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
76234+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
76235+
76236+ if (!r_tmp)
76237+ return -ENOMEM;
76238+
76239+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
76240+ return -EFAULT;
76241+
76242+ if (copy_acl_role_label(r_tmp, r_utmp2))
76243+ return -EFAULT;
76244+
76245+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
76246+ if (err)
76247+ return err;
76248+
76249+ if (!strcmp(r_tmp->rolename, "default")
76250+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
76251+ polstate->default_role = r_tmp;
76252+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
76253+ polstate->kernel_role = r_tmp;
76254+ }
76255+
76256+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
76257+ return -ENOMEM;
76258+
76259+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
76260+ return -EFAULT;
76261+
76262+ r_tmp->hash = ghash;
76263+
76264+ num_subjs = count_user_subjs(r_tmp->hash->first);
76265+
76266+ r_tmp->subj_hash_size = num_subjs;
76267+ r_tmp->subj_hash =
76268+ (struct acl_subject_label **)
76269+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
76270+
76271+ if (!r_tmp->subj_hash)
76272+ return -ENOMEM;
76273+
76274+ err = copy_user_allowedips(r_tmp);
76275+ if (err)
76276+ return err;
76277+
76278+ /* copy domain info */
76279+ if (r_tmp->domain_children != NULL) {
76280+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
76281+ if (domainlist == NULL)
76282+ return -ENOMEM;
76283+
76284+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
76285+ return -EFAULT;
76286+
76287+ r_tmp->domain_children = domainlist;
76288+ }
76289+
76290+ err = copy_user_transitions(r_tmp);
76291+ if (err)
76292+ return err;
76293+
76294+ memset(r_tmp->subj_hash, 0,
76295+ r_tmp->subj_hash_size *
76296+ sizeof (struct acl_subject_label *));
76297+
76298+ /* acquire the list of subjects, then NULL out
76299+ the list prior to parsing the subjects for this role,
76300+ as during this parsing the list is replaced with a list
76301+ of *nested* subjects for the role
76302+ */
76303+ subj_list = r_tmp->hash->first;
76304+
76305+ /* set nested subject list to null */
76306+ r_tmp->hash->first = NULL;
76307+
76308+ err = copy_user_subjs(subj_list, r_tmp);
76309+
76310+ if (err)
76311+ return err;
76312+
76313+ insert_acl_role_label(r_tmp);
76314+ }
76315+
76316+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
76317+ return -EINVAL;
76318+
76319+ return err;
76320+}
76321+
76322+static int gracl_reload_apply_policies(void *reload)
76323+{
76324+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
76325+ struct task_struct *task, *task2;
76326+ struct acl_role_label *role, *rtmp;
76327+ struct acl_subject_label *subj;
76328+ const struct cred *cred;
76329+ int role_applied;
76330+ int ret = 0;
76331+
76332+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
76333+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
76334+
76335+ /* first make sure we'll be able to apply the new policy cleanly */
76336+ do_each_thread(task2, task) {
76337+ if (task->exec_file == NULL)
76338+ continue;
76339+ role_applied = 0;
76340+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
76341+ /* preserve special roles */
76342+ FOR_EACH_ROLE_START(role)
76343+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
76344+ rtmp = task->role;
76345+ task->role = role;
76346+ role_applied = 1;
76347+ break;
76348+ }
76349+ FOR_EACH_ROLE_END(role)
76350+ }
76351+ if (!role_applied) {
76352+ cred = __task_cred(task);
76353+ rtmp = task->role;
76354+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
76355+ }
76356+ /* this handles non-nested inherited subjects, nested subjects will still
76357+ be dropped currently */
76358+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
76359+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL);
76360+ /* change the role back so that we've made no modifications to the policy */
76361+ task->role = rtmp;
76362+
76363+ if (subj == NULL || task->tmpacl == NULL) {
76364+ ret = -EINVAL;
76365+ goto out;
76366+ }
76367+ } while_each_thread(task2, task);
76368+
76369+ /* now actually apply the policy */
76370+
76371+ do_each_thread(task2, task) {
76372+ if (task->exec_file) {
76373+ role_applied = 0;
76374+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
76375+ /* preserve special roles */
76376+ FOR_EACH_ROLE_START(role)
76377+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
76378+ task->role = role;
76379+ role_applied = 1;
76380+ break;
76381+ }
76382+ FOR_EACH_ROLE_END(role)
76383+ }
76384+ if (!role_applied) {
76385+ cred = __task_cred(task);
76386+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
76387+ }
76388+ /* this handles non-nested inherited subjects, nested subjects will still
76389+ be dropped currently */
76390+ if (!reload_state->oldmode && task->inherited)
76391+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
76392+ else {
76393+ /* looked up and tagged to the task previously */
76394+ subj = task->tmpacl;
76395+ }
76396+ /* subj will be non-null */
76397+ __gr_apply_subject_to_task(polstate, task, subj);
76398+ if (reload_state->oldmode) {
76399+ task->acl_role_id = 0;
76400+ task->acl_sp_role = 0;
76401+ task->inherited = 0;
76402+ }
76403+ } else {
76404+ // it's a kernel process
76405+ task->role = polstate->kernel_role;
76406+ task->acl = polstate->kernel_role->root_label;
76407+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
76408+ task->acl->mode &= ~GR_PROCFIND;
76409+#endif
76410+ }
76411+ } while_each_thread(task2, task);
76412+
76413+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
76414+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
76415+
76416+out:
76417+
76418+ return ret;
76419+}
76420+
76421+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
76422+{
76423+ struct gr_reload_state new_reload_state = { };
76424+ int err;
76425+
76426+ new_reload_state.oldpolicy_ptr = polstate;
76427+ new_reload_state.oldalloc_ptr = current_alloc_state;
76428+ new_reload_state.oldmode = oldmode;
76429+
76430+ current_alloc_state = &new_reload_state.newalloc;
76431+ polstate = &new_reload_state.newpolicy;
76432+
76433+ /* everything relevant is now saved off, copy in the new policy */
76434+ if (init_variables(args, true)) {
76435+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
76436+ err = -ENOMEM;
76437+ goto error;
76438+ }
76439+
76440+ err = copy_user_acl(args);
76441+ free_init_variables();
76442+ if (err)
76443+ goto error;
76444+ /* the new policy is copied in, with the old policy available via saved_state
76445+ first go through applying roles, making sure to preserve special roles
76446+ then apply new subjects, making sure to preserve inherited and nested subjects,
76447+ though currently only inherited subjects will be preserved
76448+ */
76449+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
76450+ if (err)
76451+ goto error;
76452+
76453+ /* we've now applied the new policy, so restore the old policy state to free it */
76454+ polstate = &new_reload_state.oldpolicy;
76455+ current_alloc_state = &new_reload_state.oldalloc;
76456+ free_variables(true);
76457+
76458+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
76459+ to running_polstate/current_alloc_state inside stop_machine
76460+ */
76461+ err = 0;
76462+ goto out;
76463+error:
76464+ /* on error of loading the new policy, we'll just keep the previous
76465+ policy set around
76466+ */
76467+ free_variables(true);
76468+
76469+ /* doesn't affect runtime, but maintains consistent state */
76470+out:
76471+ polstate = new_reload_state.oldpolicy_ptr;
76472+ current_alloc_state = new_reload_state.oldalloc_ptr;
76473+
76474+ return err;
76475+}
76476+
76477+static int
76478+gracl_init(struct gr_arg *args)
76479+{
76480+ int error = 0;
76481+
76482+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
76483+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
76484+
76485+ if (init_variables(args, false)) {
76486+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
76487+ error = -ENOMEM;
76488+ goto out;
76489+ }
76490+
76491+ error = copy_user_acl(args);
76492+ free_init_variables();
76493+ if (error)
76494+ goto out;
76495+
76496+ error = gr_set_acls(0);
76497+ if (error)
76498+ goto out;
76499+
76500+ gr_enable_rbac_system();
76501+
76502+ return 0;
76503+
76504+out:
76505+ free_variables(false);
76506+ return error;
76507+}
76508+
76509+static int
76510+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
76511+ unsigned char **sum)
76512+{
76513+ struct acl_role_label *r;
76514+ struct role_allowed_ip *ipp;
76515+ struct role_transition *trans;
76516+ unsigned int i;
76517+ int found = 0;
76518+ u32 curr_ip = current->signal->curr_ip;
76519+
76520+ current->signal->saved_ip = curr_ip;
76521+
76522+ /* check transition table */
76523+
76524+ for (trans = current->role->transitions; trans; trans = trans->next) {
76525+ if (!strcmp(rolename, trans->rolename)) {
76526+ found = 1;
76527+ break;
76528+ }
76529+ }
76530+
76531+ if (!found)
76532+ return 0;
76533+
76534+ /* handle special roles that do not require authentication
76535+ and check ip */
76536+
76537+ FOR_EACH_ROLE_START(r)
76538+ if (!strcmp(rolename, r->rolename) &&
76539+ (r->roletype & GR_ROLE_SPECIAL)) {
76540+ found = 0;
76541+ if (r->allowed_ips != NULL) {
76542+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
76543+ if ((ntohl(curr_ip) & ipp->netmask) ==
76544+ (ntohl(ipp->addr) & ipp->netmask))
76545+ found = 1;
76546+ }
76547+ } else
76548+ found = 2;
76549+ if (!found)
76550+ return 0;
76551+
76552+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
76553+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
76554+ *salt = NULL;
76555+ *sum = NULL;
76556+ return 1;
76557+ }
76558+ }
76559+ FOR_EACH_ROLE_END(r)
76560+
76561+ for (i = 0; i < polstate->num_sprole_pws; i++) {
76562+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
76563+ *salt = polstate->acl_special_roles[i]->salt;
76564+ *sum = polstate->acl_special_roles[i]->sum;
76565+ return 1;
76566+ }
76567+ }
76568+
76569+ return 0;
76570+}
76571+
76572+int gr_check_secure_terminal(struct task_struct *task)
76573+{
76574+ struct task_struct *p, *p2, *p3;
76575+ struct files_struct *files;
76576+ struct fdtable *fdt;
76577+ struct file *our_file = NULL, *file;
76578+ int i;
76579+
76580+ if (task->signal->tty == NULL)
76581+ return 1;
76582+
76583+ files = get_files_struct(task);
76584+ if (files != NULL) {
76585+ rcu_read_lock();
76586+ fdt = files_fdtable(files);
76587+ for (i=0; i < fdt->max_fds; i++) {
76588+ file = fcheck_files(files, i);
76589+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
76590+ get_file(file);
76591+ our_file = file;
76592+ }
76593+ }
76594+ rcu_read_unlock();
76595+ put_files_struct(files);
76596+ }
76597+
76598+ if (our_file == NULL)
76599+ return 1;
76600+
76601+ read_lock(&tasklist_lock);
76602+ do_each_thread(p2, p) {
76603+ files = get_files_struct(p);
76604+ if (files == NULL ||
76605+ (p->signal && p->signal->tty == task->signal->tty)) {
76606+ if (files != NULL)
76607+ put_files_struct(files);
76608+ continue;
76609+ }
76610+ rcu_read_lock();
76611+ fdt = files_fdtable(files);
76612+ for (i=0; i < fdt->max_fds; i++) {
76613+ file = fcheck_files(files, i);
76614+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
76615+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
76616+ p3 = task;
76617+ while (task_pid_nr(p3) > 0) {
76618+ if (p3 == p)
76619+ break;
76620+ p3 = p3->real_parent;
76621+ }
76622+ if (p3 == p)
76623+ break;
76624+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
76625+ gr_handle_alertkill(p);
76626+ rcu_read_unlock();
76627+ put_files_struct(files);
76628+ read_unlock(&tasklist_lock);
76629+ fput(our_file);
76630+ return 0;
76631+ }
76632+ }
76633+ rcu_read_unlock();
76634+ put_files_struct(files);
76635+ } while_each_thread(p2, p);
76636+ read_unlock(&tasklist_lock);
76637+
76638+ fput(our_file);
76639+ return 1;
76640+}
76641+
76642+ssize_t
76643+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
76644+{
76645+ struct gr_arg_wrapper uwrap;
76646+ unsigned char *sprole_salt = NULL;
76647+ unsigned char *sprole_sum = NULL;
76648+ int error = 0;
76649+ int error2 = 0;
76650+ size_t req_count = 0;
76651+ unsigned char oldmode = 0;
76652+
76653+ mutex_lock(&gr_dev_mutex);
76654+
76655+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
76656+ error = -EPERM;
76657+ goto out;
76658+ }
76659+
76660+#ifdef CONFIG_COMPAT
76661+ pax_open_kernel();
76662+ if (is_compat_task()) {
76663+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
76664+ copy_gr_arg = &copy_gr_arg_compat;
76665+ copy_acl_object_label = &copy_acl_object_label_compat;
76666+ copy_acl_subject_label = &copy_acl_subject_label_compat;
76667+ copy_acl_role_label = &copy_acl_role_label_compat;
76668+ copy_acl_ip_label = &copy_acl_ip_label_compat;
76669+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
76670+ copy_role_transition = &copy_role_transition_compat;
76671+ copy_sprole_pw = &copy_sprole_pw_compat;
76672+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
76673+ copy_pointer_from_array = &copy_pointer_from_array_compat;
76674+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
76675+ } else {
76676+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
76677+ copy_gr_arg = &copy_gr_arg_normal;
76678+ copy_acl_object_label = &copy_acl_object_label_normal;
76679+ copy_acl_subject_label = &copy_acl_subject_label_normal;
76680+ copy_acl_role_label = &copy_acl_role_label_normal;
76681+ copy_acl_ip_label = &copy_acl_ip_label_normal;
76682+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
76683+ copy_role_transition = &copy_role_transition_normal;
76684+ copy_sprole_pw = &copy_sprole_pw_normal;
76685+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
76686+ copy_pointer_from_array = &copy_pointer_from_array_normal;
76687+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
76688+ }
76689+ pax_close_kernel();
76690+#endif
76691+
76692+ req_count = get_gr_arg_wrapper_size();
76693+
76694+ if (count != req_count) {
76695+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
76696+ error = -EINVAL;
76697+ goto out;
76698+ }
76699+
76700+
76701+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
76702+ gr_auth_expires = 0;
76703+ gr_auth_attempts = 0;
76704+ }
76705+
76706+ error = copy_gr_arg_wrapper(buf, &uwrap);
76707+ if (error)
76708+ goto out;
76709+
76710+ error = copy_gr_arg(uwrap.arg, gr_usermode);
76711+ if (error)
76712+ goto out;
76713+
76714+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
76715+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
76716+ time_after(gr_auth_expires, get_seconds())) {
76717+ error = -EBUSY;
76718+ goto out;
76719+ }
76720+
76721+ /* if non-root trying to do anything other than use a special role,
76722+ do not attempt authentication, do not count towards authentication
76723+ locking
76724+ */
76725+
76726+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
76727+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
76728+ gr_is_global_nonroot(current_uid())) {
76729+ error = -EPERM;
76730+ goto out;
76731+ }
76732+
76733+ /* ensure pw and special role name are null terminated */
76734+
76735+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
76736+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
76737+
76738+ /* Okay.
76739+ * We have our enough of the argument structure..(we have yet
76740+ * to copy_from_user the tables themselves) . Copy the tables
76741+ * only if we need them, i.e. for loading operations. */
76742+
76743+ switch (gr_usermode->mode) {
76744+ case GR_STATUS:
76745+ if (gr_acl_is_enabled()) {
76746+ error = 1;
76747+ if (!gr_check_secure_terminal(current))
76748+ error = 3;
76749+ } else
76750+ error = 2;
76751+ goto out;
76752+ case GR_SHUTDOWN:
76753+ if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
76754+ stop_machine(gr_rbac_disable, NULL, NULL);
76755+ free_variables(false);
76756+ memset(gr_usermode, 0, sizeof(struct gr_arg));
76757+ memset(gr_system_salt, 0, GR_SALT_LEN);
76758+ memset(gr_system_sum, 0, GR_SHA_LEN);
76759+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
76760+ } else if (gr_acl_is_enabled()) {
76761+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
76762+ error = -EPERM;
76763+ } else {
76764+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
76765+ error = -EAGAIN;
76766+ }
76767+ break;
76768+ case GR_ENABLE:
76769+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode)))
76770+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
76771+ else {
76772+ if (gr_acl_is_enabled())
76773+ error = -EAGAIN;
76774+ else
76775+ error = error2;
76776+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
76777+ }
76778+ break;
76779+ case GR_OLDRELOAD:
76780+ oldmode = 1;
76781+ case GR_RELOAD:
76782+ if (!gr_acl_is_enabled()) {
76783+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
76784+ error = -EAGAIN;
76785+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
76786+ error2 = gracl_reload(gr_usermode, oldmode);
76787+ if (!error2)
76788+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
76789+ else {
76790+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
76791+ error = error2;
76792+ }
76793+ } else {
76794+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
76795+ error = -EPERM;
76796+ }
76797+ break;
76798+ case GR_SEGVMOD:
76799+ if (unlikely(!gr_acl_is_enabled())) {
76800+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
76801+ error = -EAGAIN;
76802+ break;
76803+ }
76804+
76805+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
76806+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
76807+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
76808+ struct acl_subject_label *segvacl;
76809+ segvacl =
76810+ lookup_acl_subj_label(gr_usermode->segv_inode,
76811+ gr_usermode->segv_device,
76812+ current->role);
76813+ if (segvacl) {
76814+ segvacl->crashes = 0;
76815+ segvacl->expires = 0;
76816+ }
76817+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
76818+ gr_remove_uid(gr_usermode->segv_uid);
76819+ }
76820+ } else {
76821+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
76822+ error = -EPERM;
76823+ }
76824+ break;
76825+ case GR_SPROLE:
76826+ case GR_SPROLEPAM:
76827+ if (unlikely(!gr_acl_is_enabled())) {
76828+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
76829+ error = -EAGAIN;
76830+ break;
76831+ }
76832+
76833+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
76834+ current->role->expires = 0;
76835+ current->role->auth_attempts = 0;
76836+ }
76837+
76838+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
76839+ time_after(current->role->expires, get_seconds())) {
76840+ error = -EBUSY;
76841+ goto out;
76842+ }
76843+
76844+ if (lookup_special_role_auth
76845+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
76846+ && ((!sprole_salt && !sprole_sum)
76847+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
76848+ char *p = "";
76849+ assign_special_role(gr_usermode->sp_role);
76850+ read_lock(&tasklist_lock);
76851+ if (current->real_parent)
76852+ p = current->real_parent->role->rolename;
76853+ read_unlock(&tasklist_lock);
76854+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
76855+ p, acl_sp_role_value);
76856+ } else {
76857+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
76858+ error = -EPERM;
76859+ if(!(current->role->auth_attempts++))
76860+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
76861+
76862+ goto out;
76863+ }
76864+ break;
76865+ case GR_UNSPROLE:
76866+ if (unlikely(!gr_acl_is_enabled())) {
76867+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
76868+ error = -EAGAIN;
76869+ break;
76870+ }
76871+
76872+ if (current->role->roletype & GR_ROLE_SPECIAL) {
76873+ char *p = "";
76874+ int i = 0;
76875+
76876+ read_lock(&tasklist_lock);
76877+ if (current->real_parent) {
76878+ p = current->real_parent->role->rolename;
76879+ i = current->real_parent->acl_role_id;
76880+ }
76881+ read_unlock(&tasklist_lock);
76882+
76883+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
76884+ gr_set_acls(1);
76885+ } else {
76886+ error = -EPERM;
76887+ goto out;
76888+ }
76889+ break;
76890+ default:
76891+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
76892+ error = -EINVAL;
76893+ break;
76894+ }
76895+
76896+ if (error != -EPERM)
76897+ goto out;
76898+
76899+ if(!(gr_auth_attempts++))
76900+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
76901+
76902+ out:
76903+ mutex_unlock(&gr_dev_mutex);
76904+
76905+ if (!error)
76906+ error = req_count;
76907+
76908+ return error;
76909+}
76910+
76911+int
76912+gr_set_acls(const int type)
76913+{
76914+ struct task_struct *task, *task2;
76915+ struct acl_role_label *role = current->role;
76916+ struct acl_subject_label *subj;
76917+ __u16 acl_role_id = current->acl_role_id;
76918+ const struct cred *cred;
76919+ int ret;
76920+
76921+ rcu_read_lock();
76922+ read_lock(&tasklist_lock);
76923+ read_lock(&grsec_exec_file_lock);
76924+ do_each_thread(task2, task) {
76925+ /* check to see if we're called from the exit handler,
76926+ if so, only replace ACLs that have inherited the admin
76927+ ACL */
76928+
76929+ if (type && (task->role != role ||
76930+ task->acl_role_id != acl_role_id))
76931+ continue;
76932+
76933+ task->acl_role_id = 0;
76934+ task->acl_sp_role = 0;
76935+ task->inherited = 0;
76936+
76937+ if (task->exec_file) {
76938+ cred = __task_cred(task);
76939+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
76940+ subj = __gr_get_subject_for_task(polstate, task, NULL);
76941+ if (subj == NULL) {
76942+ ret = -EINVAL;
76943+ read_unlock(&grsec_exec_file_lock);
76944+ read_unlock(&tasklist_lock);
76945+ rcu_read_unlock();
76946+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
76947+ return ret;
76948+ }
76949+ __gr_apply_subject_to_task(polstate, task, subj);
76950+ } else {
76951+ // it's a kernel process
76952+ task->role = polstate->kernel_role;
76953+ task->acl = polstate->kernel_role->root_label;
76954+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
76955+ task->acl->mode &= ~GR_PROCFIND;
76956+#endif
76957+ }
76958+ } while_each_thread(task2, task);
76959+ read_unlock(&grsec_exec_file_lock);
76960+ read_unlock(&tasklist_lock);
76961+ rcu_read_unlock();
76962+
76963+ return 0;
76964+}
76965diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
76966new file mode 100644
76967index 0000000..39645c9
76968--- /dev/null
76969+++ b/grsecurity/gracl_res.c
76970@@ -0,0 +1,68 @@
76971+#include <linux/kernel.h>
76972+#include <linux/sched.h>
76973+#include <linux/gracl.h>
76974+#include <linux/grinternal.h>
76975+
76976+static const char *restab_log[] = {
76977+ [RLIMIT_CPU] = "RLIMIT_CPU",
76978+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
76979+ [RLIMIT_DATA] = "RLIMIT_DATA",
76980+ [RLIMIT_STACK] = "RLIMIT_STACK",
76981+ [RLIMIT_CORE] = "RLIMIT_CORE",
76982+ [RLIMIT_RSS] = "RLIMIT_RSS",
76983+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
76984+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
76985+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
76986+ [RLIMIT_AS] = "RLIMIT_AS",
76987+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
76988+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
76989+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
76990+ [RLIMIT_NICE] = "RLIMIT_NICE",
76991+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
76992+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
76993+ [GR_CRASH_RES] = "RLIMIT_CRASH"
76994+};
76995+
76996+void
76997+gr_log_resource(const struct task_struct *task,
76998+ const int res, const unsigned long wanted, const int gt)
76999+{
77000+ const struct cred *cred;
77001+ unsigned long rlim;
77002+
77003+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
77004+ return;
77005+
77006+ // not yet supported resource
77007+ if (unlikely(!restab_log[res]))
77008+ return;
77009+
77010+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
77011+ rlim = task_rlimit_max(task, res);
77012+ else
77013+ rlim = task_rlimit(task, res);
77014+
77015+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
77016+ return;
77017+
77018+ rcu_read_lock();
77019+ cred = __task_cred(task);
77020+
77021+ if (res == RLIMIT_NPROC &&
77022+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
77023+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
77024+ goto out_rcu_unlock;
77025+ else if (res == RLIMIT_MEMLOCK &&
77026+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
77027+ goto out_rcu_unlock;
77028+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
77029+ goto out_rcu_unlock;
77030+ rcu_read_unlock();
77031+
77032+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
77033+
77034+ return;
77035+out_rcu_unlock:
77036+ rcu_read_unlock();
77037+ return;
77038+}
77039diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
77040new file mode 100644
77041index 0000000..2040e61
77042--- /dev/null
77043+++ b/grsecurity/gracl_segv.c
77044@@ -0,0 +1,313 @@
77045+#include <linux/kernel.h>
77046+#include <linux/mm.h>
77047+#include <asm/uaccess.h>
77048+#include <asm/errno.h>
77049+#include <asm/mman.h>
77050+#include <net/sock.h>
77051+#include <linux/file.h>
77052+#include <linux/fs.h>
77053+#include <linux/net.h>
77054+#include <linux/in.h>
77055+#include <linux/slab.h>
77056+#include <linux/types.h>
77057+#include <linux/sched.h>
77058+#include <linux/timer.h>
77059+#include <linux/gracl.h>
77060+#include <linux/grsecurity.h>
77061+#include <linux/grinternal.h>
77062+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
77063+#include <linux/magic.h>
77064+#include <linux/pagemap.h>
77065+#include "../fs/btrfs/async-thread.h"
77066+#include "../fs/btrfs/ctree.h"
77067+#include "../fs/btrfs/btrfs_inode.h"
77068+#endif
77069+
77070+static struct crash_uid *uid_set;
77071+static unsigned short uid_used;
77072+static DEFINE_SPINLOCK(gr_uid_lock);
77073+extern rwlock_t gr_inode_lock;
77074+extern struct acl_subject_label *
77075+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
77076+ struct acl_role_label *role);
77077+
77078+static inline dev_t __get_dev(const struct dentry *dentry)
77079+{
77080+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
77081+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
77082+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
77083+ else
77084+#endif
77085+ return dentry->d_sb->s_dev;
77086+}
77087+
77088+int
77089+gr_init_uidset(void)
77090+{
77091+ uid_set =
77092+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
77093+ uid_used = 0;
77094+
77095+ return uid_set ? 1 : 0;
77096+}
77097+
77098+void
77099+gr_free_uidset(void)
77100+{
77101+ if (uid_set) {
77102+ struct crash_uid *tmpset;
77103+ spin_lock(&gr_uid_lock);
77104+ tmpset = uid_set;
77105+ uid_set = NULL;
77106+ uid_used = 0;
77107+ spin_unlock(&gr_uid_lock);
77108+ if (tmpset)
77109+ kfree(tmpset);
77110+ }
77111+
77112+ return;
77113+}
77114+
77115+int
77116+gr_find_uid(const uid_t uid)
77117+{
77118+ struct crash_uid *tmp = uid_set;
77119+ uid_t buid;
77120+ int low = 0, high = uid_used - 1, mid;
77121+
77122+ while (high >= low) {
77123+ mid = (low + high) >> 1;
77124+ buid = tmp[mid].uid;
77125+ if (buid == uid)
77126+ return mid;
77127+ if (buid > uid)
77128+ high = mid - 1;
77129+ if (buid < uid)
77130+ low = mid + 1;
77131+ }
77132+
77133+ return -1;
77134+}
77135+
77136+static __inline__ void
77137+gr_insertsort(void)
77138+{
77139+ unsigned short i, j;
77140+ struct crash_uid index;
77141+
77142+ for (i = 1; i < uid_used; i++) {
77143+ index = uid_set[i];
77144+ j = i;
77145+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
77146+ uid_set[j] = uid_set[j - 1];
77147+ j--;
77148+ }
77149+ uid_set[j] = index;
77150+ }
77151+
77152+ return;
77153+}
77154+
77155+static __inline__ void
77156+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
77157+{
77158+ int loc;
77159+ uid_t uid = GR_GLOBAL_UID(kuid);
77160+
77161+ if (uid_used == GR_UIDTABLE_MAX)
77162+ return;
77163+
77164+ loc = gr_find_uid(uid);
77165+
77166+ if (loc >= 0) {
77167+ uid_set[loc].expires = expires;
77168+ return;
77169+ }
77170+
77171+ uid_set[uid_used].uid = uid;
77172+ uid_set[uid_used].expires = expires;
77173+ uid_used++;
77174+
77175+ gr_insertsort();
77176+
77177+ return;
77178+}
77179+
77180+void
77181+gr_remove_uid(const unsigned short loc)
77182+{
77183+ unsigned short i;
77184+
77185+ for (i = loc + 1; i < uid_used; i++)
77186+ uid_set[i - 1] = uid_set[i];
77187+
77188+ uid_used--;
77189+
77190+ return;
77191+}
77192+
77193+int
77194+gr_check_crash_uid(const kuid_t kuid)
77195+{
77196+ int loc;
77197+ int ret = 0;
77198+ uid_t uid;
77199+
77200+ if (unlikely(!gr_acl_is_enabled()))
77201+ return 0;
77202+
77203+ uid = GR_GLOBAL_UID(kuid);
77204+
77205+ spin_lock(&gr_uid_lock);
77206+ loc = gr_find_uid(uid);
77207+
77208+ if (loc < 0)
77209+ goto out_unlock;
77210+
77211+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
77212+ gr_remove_uid(loc);
77213+ else
77214+ ret = 1;
77215+
77216+out_unlock:
77217+ spin_unlock(&gr_uid_lock);
77218+ return ret;
77219+}
77220+
77221+static __inline__ int
77222+proc_is_setxid(const struct cred *cred)
77223+{
77224+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
77225+ !uid_eq(cred->uid, cred->fsuid))
77226+ return 1;
77227+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
77228+ !gid_eq(cred->gid, cred->fsgid))
77229+ return 1;
77230+
77231+ return 0;
77232+}
77233+
77234+extern int gr_fake_force_sig(int sig, struct task_struct *t);
77235+
77236+void
77237+gr_handle_crash(struct task_struct *task, const int sig)
77238+{
77239+ struct acl_subject_label *curr;
77240+ struct task_struct *tsk, *tsk2;
77241+ const struct cred *cred;
77242+ const struct cred *cred2;
77243+
77244+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
77245+ return;
77246+
77247+ if (unlikely(!gr_acl_is_enabled()))
77248+ return;
77249+
77250+ curr = task->acl;
77251+
77252+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
77253+ return;
77254+
77255+ if (time_before_eq(curr->expires, get_seconds())) {
77256+ curr->expires = 0;
77257+ curr->crashes = 0;
77258+ }
77259+
77260+ curr->crashes++;
77261+
77262+ if (!curr->expires)
77263+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
77264+
77265+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
77266+ time_after(curr->expires, get_seconds())) {
77267+ rcu_read_lock();
77268+ cred = __task_cred(task);
77269+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
77270+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
77271+ spin_lock(&gr_uid_lock);
77272+ gr_insert_uid(cred->uid, curr->expires);
77273+ spin_unlock(&gr_uid_lock);
77274+ curr->expires = 0;
77275+ curr->crashes = 0;
77276+ read_lock(&tasklist_lock);
77277+ do_each_thread(tsk2, tsk) {
77278+ cred2 = __task_cred(tsk);
77279+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
77280+ gr_fake_force_sig(SIGKILL, tsk);
77281+ } while_each_thread(tsk2, tsk);
77282+ read_unlock(&tasklist_lock);
77283+ } else {
77284+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
77285+ read_lock(&tasklist_lock);
77286+ read_lock(&grsec_exec_file_lock);
77287+ do_each_thread(tsk2, tsk) {
77288+ if (likely(tsk != task)) {
77289+ // if this thread has the same subject as the one that triggered
77290+ // RES_CRASH and it's the same binary, kill it
77291+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
77292+ gr_fake_force_sig(SIGKILL, tsk);
77293+ }
77294+ } while_each_thread(tsk2, tsk);
77295+ read_unlock(&grsec_exec_file_lock);
77296+ read_unlock(&tasklist_lock);
77297+ }
77298+ rcu_read_unlock();
77299+ }
77300+
77301+ return;
77302+}
77303+
77304+int
77305+gr_check_crash_exec(const struct file *filp)
77306+{
77307+ struct acl_subject_label *curr;
77308+
77309+ if (unlikely(!gr_acl_is_enabled()))
77310+ return 0;
77311+
77312+ read_lock(&gr_inode_lock);
77313+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
77314+ __get_dev(filp->f_path.dentry),
77315+ current->role);
77316+ read_unlock(&gr_inode_lock);
77317+
77318+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
77319+ (!curr->crashes && !curr->expires))
77320+ return 0;
77321+
77322+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
77323+ time_after(curr->expires, get_seconds()))
77324+ return 1;
77325+ else if (time_before_eq(curr->expires, get_seconds())) {
77326+ curr->crashes = 0;
77327+ curr->expires = 0;
77328+ }
77329+
77330+ return 0;
77331+}
77332+
77333+void
77334+gr_handle_alertkill(struct task_struct *task)
77335+{
77336+ struct acl_subject_label *curracl;
77337+ __u32 curr_ip;
77338+ struct task_struct *p, *p2;
77339+
77340+ if (unlikely(!gr_acl_is_enabled()))
77341+ return;
77342+
77343+ curracl = task->acl;
77344+ curr_ip = task->signal->curr_ip;
77345+
77346+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
77347+ read_lock(&tasklist_lock);
77348+ do_each_thread(p2, p) {
77349+ if (p->signal->curr_ip == curr_ip)
77350+ gr_fake_force_sig(SIGKILL, p);
77351+ } while_each_thread(p2, p);
77352+ read_unlock(&tasklist_lock);
77353+ } else if (curracl->mode & GR_KILLPROC)
77354+ gr_fake_force_sig(SIGKILL, task);
77355+
77356+ return;
77357+}
77358diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
77359new file mode 100644
77360index 0000000..98011b0
77361--- /dev/null
77362+++ b/grsecurity/gracl_shm.c
77363@@ -0,0 +1,40 @@
77364+#include <linux/kernel.h>
77365+#include <linux/mm.h>
77366+#include <linux/sched.h>
77367+#include <linux/file.h>
77368+#include <linux/ipc.h>
77369+#include <linux/gracl.h>
77370+#include <linux/grsecurity.h>
77371+#include <linux/grinternal.h>
77372+
77373+int
77374+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
77375+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
77376+{
77377+ struct task_struct *task;
77378+
77379+ if (!gr_acl_is_enabled())
77380+ return 1;
77381+
77382+ rcu_read_lock();
77383+ read_lock(&tasklist_lock);
77384+
77385+ task = find_task_by_vpid(shm_cprid);
77386+
77387+ if (unlikely(!task))
77388+ task = find_task_by_vpid(shm_lapid);
77389+
77390+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
77391+ (task_pid_nr(task) == shm_lapid)) &&
77392+ (task->acl->mode & GR_PROTSHM) &&
77393+ (task->acl != current->acl))) {
77394+ read_unlock(&tasklist_lock);
77395+ rcu_read_unlock();
77396+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
77397+ return 0;
77398+ }
77399+ read_unlock(&tasklist_lock);
77400+ rcu_read_unlock();
77401+
77402+ return 1;
77403+}
77404diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
77405new file mode 100644
77406index 0000000..bc0be01
77407--- /dev/null
77408+++ b/grsecurity/grsec_chdir.c
77409@@ -0,0 +1,19 @@
77410+#include <linux/kernel.h>
77411+#include <linux/sched.h>
77412+#include <linux/fs.h>
77413+#include <linux/file.h>
77414+#include <linux/grsecurity.h>
77415+#include <linux/grinternal.h>
77416+
77417+void
77418+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
77419+{
77420+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
77421+ if ((grsec_enable_chdir && grsec_enable_group &&
77422+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
77423+ !grsec_enable_group)) {
77424+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
77425+ }
77426+#endif
77427+ return;
77428+}
77429diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
77430new file mode 100644
77431index 0000000..baa635c
77432--- /dev/null
77433+++ b/grsecurity/grsec_chroot.c
77434@@ -0,0 +1,387 @@
77435+#include <linux/kernel.h>
77436+#include <linux/module.h>
77437+#include <linux/sched.h>
77438+#include <linux/file.h>
77439+#include <linux/fs.h>
77440+#include <linux/mount.h>
77441+#include <linux/types.h>
77442+#include "../fs/mount.h"
77443+#include <linux/grsecurity.h>
77444+#include <linux/grinternal.h>
77445+
77446+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
77447+int gr_init_ran;
77448+#endif
77449+
77450+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
77451+{
77452+#ifdef CONFIG_GRKERNSEC
77453+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
77454+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
77455+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
77456+ && gr_init_ran
77457+#endif
77458+ )
77459+ task->gr_is_chrooted = 1;
77460+ else {
77461+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
77462+ if (task_pid_nr(task) == 1 && !gr_init_ran)
77463+ gr_init_ran = 1;
77464+#endif
77465+ task->gr_is_chrooted = 0;
77466+ }
77467+
77468+ task->gr_chroot_dentry = path->dentry;
77469+#endif
77470+ return;
77471+}
77472+
77473+void gr_clear_chroot_entries(struct task_struct *task)
77474+{
77475+#ifdef CONFIG_GRKERNSEC
77476+ task->gr_is_chrooted = 0;
77477+ task->gr_chroot_dentry = NULL;
77478+#endif
77479+ return;
77480+}
77481+
77482+int
77483+gr_handle_chroot_unix(const pid_t pid)
77484+{
77485+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
77486+ struct task_struct *p;
77487+
77488+ if (unlikely(!grsec_enable_chroot_unix))
77489+ return 1;
77490+
77491+ if (likely(!proc_is_chrooted(current)))
77492+ return 1;
77493+
77494+ rcu_read_lock();
77495+ read_lock(&tasklist_lock);
77496+ p = find_task_by_vpid_unrestricted(pid);
77497+ if (unlikely(p && !have_same_root(current, p))) {
77498+ read_unlock(&tasklist_lock);
77499+ rcu_read_unlock();
77500+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
77501+ return 0;
77502+ }
77503+ read_unlock(&tasklist_lock);
77504+ rcu_read_unlock();
77505+#endif
77506+ return 1;
77507+}
77508+
77509+int
77510+gr_handle_chroot_nice(void)
77511+{
77512+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
77513+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
77514+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
77515+ return -EPERM;
77516+ }
77517+#endif
77518+ return 0;
77519+}
77520+
77521+int
77522+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
77523+{
77524+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
77525+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
77526+ && proc_is_chrooted(current)) {
77527+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
77528+ return -EACCES;
77529+ }
77530+#endif
77531+ return 0;
77532+}
77533+
77534+int
77535+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
77536+{
77537+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
77538+ struct task_struct *p;
77539+ int ret = 0;
77540+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
77541+ return ret;
77542+
77543+ read_lock(&tasklist_lock);
77544+ do_each_pid_task(pid, type, p) {
77545+ if (!have_same_root(current, p)) {
77546+ ret = 1;
77547+ goto out;
77548+ }
77549+ } while_each_pid_task(pid, type, p);
77550+out:
77551+ read_unlock(&tasklist_lock);
77552+ return ret;
77553+#endif
77554+ return 0;
77555+}
77556+
77557+int
77558+gr_pid_is_chrooted(struct task_struct *p)
77559+{
77560+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
77561+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
77562+ return 0;
77563+
77564+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
77565+ !have_same_root(current, p)) {
77566+ return 1;
77567+ }
77568+#endif
77569+ return 0;
77570+}
77571+
77572+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
77573+
77574+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
77575+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
77576+{
77577+ struct path path, currentroot;
77578+ int ret = 0;
77579+
77580+ path.dentry = (struct dentry *)u_dentry;
77581+ path.mnt = (struct vfsmount *)u_mnt;
77582+ get_fs_root(current->fs, &currentroot);
77583+ if (path_is_under(&path, &currentroot))
77584+ ret = 1;
77585+ path_put(&currentroot);
77586+
77587+ return ret;
77588+}
77589+#endif
77590+
77591+int
77592+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
77593+{
77594+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
77595+ if (!grsec_enable_chroot_fchdir)
77596+ return 1;
77597+
77598+ if (!proc_is_chrooted(current))
77599+ return 1;
77600+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
77601+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
77602+ return 0;
77603+ }
77604+#endif
77605+ return 1;
77606+}
77607+
77608+int
77609+gr_chroot_fhandle(void)
77610+{
77611+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
77612+ if (!grsec_enable_chroot_fchdir)
77613+ return 1;
77614+
77615+ if (!proc_is_chrooted(current))
77616+ return 1;
77617+ else {
77618+ gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG);
77619+ return 0;
77620+ }
77621+#endif
77622+ return 1;
77623+}
77624+
77625+int
77626+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
77627+ const time_t shm_createtime)
77628+{
77629+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
77630+ struct task_struct *p;
77631+ time_t starttime;
77632+
77633+ if (unlikely(!grsec_enable_chroot_shmat))
77634+ return 1;
77635+
77636+ if (likely(!proc_is_chrooted(current)))
77637+ return 1;
77638+
77639+ rcu_read_lock();
77640+ read_lock(&tasklist_lock);
77641+
77642+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
77643+ starttime = p->start_time.tv_sec;
77644+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
77645+ if (have_same_root(current, p)) {
77646+ goto allow;
77647+ } else {
77648+ read_unlock(&tasklist_lock);
77649+ rcu_read_unlock();
77650+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
77651+ return 0;
77652+ }
77653+ }
77654+ /* creator exited, pid reuse, fall through to next check */
77655+ }
77656+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
77657+ if (unlikely(!have_same_root(current, p))) {
77658+ read_unlock(&tasklist_lock);
77659+ rcu_read_unlock();
77660+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
77661+ return 0;
77662+ }
77663+ }
77664+
77665+allow:
77666+ read_unlock(&tasklist_lock);
77667+ rcu_read_unlock();
77668+#endif
77669+ return 1;
77670+}
77671+
77672+void
77673+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
77674+{
77675+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
77676+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
77677+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
77678+#endif
77679+ return;
77680+}
77681+
77682+int
77683+gr_handle_chroot_mknod(const struct dentry *dentry,
77684+ const struct vfsmount *mnt, const int mode)
77685+{
77686+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
77687+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
77688+ proc_is_chrooted(current)) {
77689+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
77690+ return -EPERM;
77691+ }
77692+#endif
77693+ return 0;
77694+}
77695+
77696+int
77697+gr_handle_chroot_mount(const struct dentry *dentry,
77698+ const struct vfsmount *mnt, const char *dev_name)
77699+{
77700+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
77701+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
77702+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
77703+ return -EPERM;
77704+ }
77705+#endif
77706+ return 0;
77707+}
77708+
77709+int
77710+gr_handle_chroot_pivot(void)
77711+{
77712+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
77713+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
77714+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
77715+ return -EPERM;
77716+ }
77717+#endif
77718+ return 0;
77719+}
77720+
77721+int
77722+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
77723+{
77724+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
77725+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
77726+ !gr_is_outside_chroot(dentry, mnt)) {
77727+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
77728+ return -EPERM;
77729+ }
77730+#endif
77731+ return 0;
77732+}
77733+
77734+extern const char *captab_log[];
77735+extern int captab_log_entries;
77736+
77737+int
77738+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
77739+{
77740+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77741+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
77742+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
77743+ if (cap_raised(chroot_caps, cap)) {
77744+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
77745+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
77746+ }
77747+ return 0;
77748+ }
77749+ }
77750+#endif
77751+ return 1;
77752+}
77753+
77754+int
77755+gr_chroot_is_capable(const int cap)
77756+{
77757+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77758+ return gr_task_chroot_is_capable(current, current_cred(), cap);
77759+#endif
77760+ return 1;
77761+}
77762+
77763+int
77764+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
77765+{
77766+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77767+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
77768+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
77769+ if (cap_raised(chroot_caps, cap)) {
77770+ return 0;
77771+ }
77772+ }
77773+#endif
77774+ return 1;
77775+}
77776+
77777+int
77778+gr_chroot_is_capable_nolog(const int cap)
77779+{
77780+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77781+ return gr_task_chroot_is_capable_nolog(current, cap);
77782+#endif
77783+ return 1;
77784+}
77785+
77786+int
77787+gr_handle_chroot_sysctl(const int op)
77788+{
77789+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
77790+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
77791+ proc_is_chrooted(current))
77792+ return -EACCES;
77793+#endif
77794+ return 0;
77795+}
77796+
77797+void
77798+gr_handle_chroot_chdir(const struct path *path)
77799+{
77800+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
77801+ if (grsec_enable_chroot_chdir)
77802+ set_fs_pwd(current->fs, path);
77803+#endif
77804+ return;
77805+}
77806+
77807+int
77808+gr_handle_chroot_chmod(const struct dentry *dentry,
77809+ const struct vfsmount *mnt, const int mode)
77810+{
77811+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
77812+ /* allow chmod +s on directories, but not files */
77813+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
77814+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
77815+ proc_is_chrooted(current)) {
77816+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
77817+ return -EPERM;
77818+ }
77819+#endif
77820+ return 0;
77821+}
77822diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
77823new file mode 100644
77824index 0000000..2d3bcb7
77825--- /dev/null
77826+++ b/grsecurity/grsec_disabled.c
77827@@ -0,0 +1,440 @@
77828+#include <linux/kernel.h>
77829+#include <linux/module.h>
77830+#include <linux/sched.h>
77831+#include <linux/file.h>
77832+#include <linux/fs.h>
77833+#include <linux/kdev_t.h>
77834+#include <linux/net.h>
77835+#include <linux/in.h>
77836+#include <linux/ip.h>
77837+#include <linux/skbuff.h>
77838+#include <linux/sysctl.h>
77839+
77840+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
77841+void
77842+pax_set_initial_flags(struct linux_binprm *bprm)
77843+{
77844+ return;
77845+}
77846+#endif
77847+
77848+#ifdef CONFIG_SYSCTL
77849+__u32
77850+gr_handle_sysctl(const struct ctl_table * table, const int op)
77851+{
77852+ return 0;
77853+}
77854+#endif
77855+
77856+#ifdef CONFIG_TASKSTATS
77857+int gr_is_taskstats_denied(int pid)
77858+{
77859+ return 0;
77860+}
77861+#endif
77862+
77863+int
77864+gr_acl_is_enabled(void)
77865+{
77866+ return 0;
77867+}
77868+
77869+int
77870+gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
77871+{
77872+ return 0;
77873+}
77874+
77875+void
77876+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
77877+{
77878+ return;
77879+}
77880+
77881+int
77882+gr_handle_rawio(const struct inode *inode)
77883+{
77884+ return 0;
77885+}
77886+
77887+void
77888+gr_acl_handle_psacct(struct task_struct *task, const long code)
77889+{
77890+ return;
77891+}
77892+
77893+int
77894+gr_handle_ptrace(struct task_struct *task, const long request)
77895+{
77896+ return 0;
77897+}
77898+
77899+int
77900+gr_handle_proc_ptrace(struct task_struct *task)
77901+{
77902+ return 0;
77903+}
77904+
77905+int
77906+gr_set_acls(const int type)
77907+{
77908+ return 0;
77909+}
77910+
77911+int
77912+gr_check_hidden_task(const struct task_struct *tsk)
77913+{
77914+ return 0;
77915+}
77916+
77917+int
77918+gr_check_protected_task(const struct task_struct *task)
77919+{
77920+ return 0;
77921+}
77922+
77923+int
77924+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
77925+{
77926+ return 0;
77927+}
77928+
77929+void
77930+gr_copy_label(struct task_struct *tsk)
77931+{
77932+ return;
77933+}
77934+
77935+void
77936+gr_set_pax_flags(struct task_struct *task)
77937+{
77938+ return;
77939+}
77940+
77941+int
77942+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
77943+ const int unsafe_share)
77944+{
77945+ return 0;
77946+}
77947+
77948+void
77949+gr_handle_delete(const ino_t ino, const dev_t dev)
77950+{
77951+ return;
77952+}
77953+
77954+void
77955+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
77956+{
77957+ return;
77958+}
77959+
77960+void
77961+gr_handle_crash(struct task_struct *task, const int sig)
77962+{
77963+ return;
77964+}
77965+
77966+int
77967+gr_check_crash_exec(const struct file *filp)
77968+{
77969+ return 0;
77970+}
77971+
77972+int
77973+gr_check_crash_uid(const kuid_t uid)
77974+{
77975+ return 0;
77976+}
77977+
77978+void
77979+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
77980+ struct dentry *old_dentry,
77981+ struct dentry *new_dentry,
77982+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
77983+{
77984+ return;
77985+}
77986+
77987+int
77988+gr_search_socket(const int family, const int type, const int protocol)
77989+{
77990+ return 1;
77991+}
77992+
77993+int
77994+gr_search_connectbind(const int mode, const struct socket *sock,
77995+ const struct sockaddr_in *addr)
77996+{
77997+ return 0;
77998+}
77999+
78000+void
78001+gr_handle_alertkill(struct task_struct *task)
78002+{
78003+ return;
78004+}
78005+
78006+__u32
78007+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
78008+{
78009+ return 1;
78010+}
78011+
78012+__u32
78013+gr_acl_handle_hidden_file(const struct dentry * dentry,
78014+ const struct vfsmount * mnt)
78015+{
78016+ return 1;
78017+}
78018+
78019+__u32
78020+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
78021+ int acc_mode)
78022+{
78023+ return 1;
78024+}
78025+
78026+__u32
78027+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
78028+{
78029+ return 1;
78030+}
78031+
78032+__u32
78033+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
78034+{
78035+ return 1;
78036+}
78037+
78038+int
78039+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
78040+ unsigned int *vm_flags)
78041+{
78042+ return 1;
78043+}
78044+
78045+__u32
78046+gr_acl_handle_truncate(const struct dentry * dentry,
78047+ const struct vfsmount * mnt)
78048+{
78049+ return 1;
78050+}
78051+
78052+__u32
78053+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
78054+{
78055+ return 1;
78056+}
78057+
78058+__u32
78059+gr_acl_handle_access(const struct dentry * dentry,
78060+ const struct vfsmount * mnt, const int fmode)
78061+{
78062+ return 1;
78063+}
78064+
78065+__u32
78066+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
78067+ umode_t *mode)
78068+{
78069+ return 1;
78070+}
78071+
78072+__u32
78073+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
78074+{
78075+ return 1;
78076+}
78077+
78078+__u32
78079+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
78080+{
78081+ return 1;
78082+}
78083+
78084+__u32
78085+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
78086+{
78087+ return 1;
78088+}
78089+
78090+void
78091+grsecurity_init(void)
78092+{
78093+ return;
78094+}
78095+
78096+umode_t gr_acl_umask(void)
78097+{
78098+ return 0;
78099+}
78100+
78101+__u32
78102+gr_acl_handle_mknod(const struct dentry * new_dentry,
78103+ const struct dentry * parent_dentry,
78104+ const struct vfsmount * parent_mnt,
78105+ const int mode)
78106+{
78107+ return 1;
78108+}
78109+
78110+__u32
78111+gr_acl_handle_mkdir(const struct dentry * new_dentry,
78112+ const struct dentry * parent_dentry,
78113+ const struct vfsmount * parent_mnt)
78114+{
78115+ return 1;
78116+}
78117+
78118+__u32
78119+gr_acl_handle_symlink(const struct dentry * new_dentry,
78120+ const struct dentry * parent_dentry,
78121+ const struct vfsmount * parent_mnt, const struct filename *from)
78122+{
78123+ return 1;
78124+}
78125+
78126+__u32
78127+gr_acl_handle_link(const struct dentry * new_dentry,
78128+ const struct dentry * parent_dentry,
78129+ const struct vfsmount * parent_mnt,
78130+ const struct dentry * old_dentry,
78131+ const struct vfsmount * old_mnt, const struct filename *to)
78132+{
78133+ return 1;
78134+}
78135+
78136+int
78137+gr_acl_handle_rename(const struct dentry *new_dentry,
78138+ const struct dentry *parent_dentry,
78139+ const struct vfsmount *parent_mnt,
78140+ const struct dentry *old_dentry,
78141+ const struct inode *old_parent_inode,
78142+ const struct vfsmount *old_mnt, const struct filename *newname,
78143+ unsigned int flags)
78144+{
78145+ return 0;
78146+}
78147+
78148+int
78149+gr_acl_handle_filldir(const struct file *file, const char *name,
78150+ const int namelen, const ino_t ino)
78151+{
78152+ return 1;
78153+}
78154+
78155+int
78156+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
78157+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
78158+{
78159+ return 1;
78160+}
78161+
78162+int
78163+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
78164+{
78165+ return 0;
78166+}
78167+
78168+int
78169+gr_search_accept(const struct socket *sock)
78170+{
78171+ return 0;
78172+}
78173+
78174+int
78175+gr_search_listen(const struct socket *sock)
78176+{
78177+ return 0;
78178+}
78179+
78180+int
78181+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
78182+{
78183+ return 0;
78184+}
78185+
78186+__u32
78187+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
78188+{
78189+ return 1;
78190+}
78191+
78192+__u32
78193+gr_acl_handle_creat(const struct dentry * dentry,
78194+ const struct dentry * p_dentry,
78195+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
78196+ const int imode)
78197+{
78198+ return 1;
78199+}
78200+
78201+void
78202+gr_acl_handle_exit(void)
78203+{
78204+ return;
78205+}
78206+
78207+int
78208+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
78209+{
78210+ return 1;
78211+}
78212+
78213+void
78214+gr_set_role_label(const kuid_t uid, const kgid_t gid)
78215+{
78216+ return;
78217+}
78218+
78219+int
78220+gr_acl_handle_procpidmem(const struct task_struct *task)
78221+{
78222+ return 0;
78223+}
78224+
78225+int
78226+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
78227+{
78228+ return 0;
78229+}
78230+
78231+int
78232+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
78233+{
78234+ return 0;
78235+}
78236+
78237+int
78238+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
78239+{
78240+ return 0;
78241+}
78242+
78243+int
78244+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
78245+{
78246+ return 0;
78247+}
78248+
78249+int gr_acl_enable_at_secure(void)
78250+{
78251+ return 0;
78252+}
78253+
78254+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
78255+{
78256+ return dentry->d_sb->s_dev;
78257+}
78258+
78259+void gr_put_exec_file(struct task_struct *task)
78260+{
78261+ return;
78262+}
78263+
78264+#ifdef CONFIG_SECURITY
78265+EXPORT_SYMBOL_GPL(gr_check_user_change);
78266+EXPORT_SYMBOL_GPL(gr_check_group_change);
78267+#endif
78268diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
78269new file mode 100644
78270index 0000000..14638ff
78271--- /dev/null
78272+++ b/grsecurity/grsec_exec.c
78273@@ -0,0 +1,188 @@
78274+#include <linux/kernel.h>
78275+#include <linux/sched.h>
78276+#include <linux/file.h>
78277+#include <linux/binfmts.h>
78278+#include <linux/fs.h>
78279+#include <linux/types.h>
78280+#include <linux/grdefs.h>
78281+#include <linux/grsecurity.h>
78282+#include <linux/grinternal.h>
78283+#include <linux/capability.h>
78284+#include <linux/module.h>
78285+#include <linux/compat.h>
78286+
78287+#include <asm/uaccess.h>
78288+
78289+#ifdef CONFIG_GRKERNSEC_EXECLOG
78290+static char gr_exec_arg_buf[132];
78291+static DEFINE_MUTEX(gr_exec_arg_mutex);
78292+#endif
78293+
78294+struct user_arg_ptr {
78295+#ifdef CONFIG_COMPAT
78296+ bool is_compat;
78297+#endif
78298+ union {
78299+ const char __user *const __user *native;
78300+#ifdef CONFIG_COMPAT
78301+ const compat_uptr_t __user *compat;
78302+#endif
78303+ } ptr;
78304+};
78305+
78306+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
78307+
78308+void
78309+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
78310+{
78311+#ifdef CONFIG_GRKERNSEC_EXECLOG
78312+ char *grarg = gr_exec_arg_buf;
78313+ unsigned int i, x, execlen = 0;
78314+ char c;
78315+
78316+ if (!((grsec_enable_execlog && grsec_enable_group &&
78317+ in_group_p(grsec_audit_gid))
78318+ || (grsec_enable_execlog && !grsec_enable_group)))
78319+ return;
78320+
78321+ mutex_lock(&gr_exec_arg_mutex);
78322+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
78323+
78324+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
78325+ const char __user *p;
78326+ unsigned int len;
78327+
78328+ p = get_user_arg_ptr(argv, i);
78329+ if (IS_ERR(p))
78330+ goto log;
78331+
78332+ len = strnlen_user(p, 128 - execlen);
78333+ if (len > 128 - execlen)
78334+ len = 128 - execlen;
78335+ else if (len > 0)
78336+ len--;
78337+ if (copy_from_user(grarg + execlen, p, len))
78338+ goto log;
78339+
78340+ /* rewrite unprintable characters */
78341+ for (x = 0; x < len; x++) {
78342+ c = *(grarg + execlen + x);
78343+ if (c < 32 || c > 126)
78344+ *(grarg + execlen + x) = ' ';
78345+ }
78346+
78347+ execlen += len;
78348+ *(grarg + execlen) = ' ';
78349+ *(grarg + execlen + 1) = '\0';
78350+ execlen++;
78351+ }
78352+
78353+ log:
78354+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
78355+ bprm->file->f_path.mnt, grarg);
78356+ mutex_unlock(&gr_exec_arg_mutex);
78357+#endif
78358+ return;
78359+}
78360+
78361+#ifdef CONFIG_GRKERNSEC
78362+extern int gr_acl_is_capable(const int cap);
78363+extern int gr_acl_is_capable_nolog(const int cap);
78364+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
78365+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
78366+extern int gr_chroot_is_capable(const int cap);
78367+extern int gr_chroot_is_capable_nolog(const int cap);
78368+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
78369+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
78370+#endif
78371+
78372+const char *captab_log[] = {
78373+ "CAP_CHOWN",
78374+ "CAP_DAC_OVERRIDE",
78375+ "CAP_DAC_READ_SEARCH",
78376+ "CAP_FOWNER",
78377+ "CAP_FSETID",
78378+ "CAP_KILL",
78379+ "CAP_SETGID",
78380+ "CAP_SETUID",
78381+ "CAP_SETPCAP",
78382+ "CAP_LINUX_IMMUTABLE",
78383+ "CAP_NET_BIND_SERVICE",
78384+ "CAP_NET_BROADCAST",
78385+ "CAP_NET_ADMIN",
78386+ "CAP_NET_RAW",
78387+ "CAP_IPC_LOCK",
78388+ "CAP_IPC_OWNER",
78389+ "CAP_SYS_MODULE",
78390+ "CAP_SYS_RAWIO",
78391+ "CAP_SYS_CHROOT",
78392+ "CAP_SYS_PTRACE",
78393+ "CAP_SYS_PACCT",
78394+ "CAP_SYS_ADMIN",
78395+ "CAP_SYS_BOOT",
78396+ "CAP_SYS_NICE",
78397+ "CAP_SYS_RESOURCE",
78398+ "CAP_SYS_TIME",
78399+ "CAP_SYS_TTY_CONFIG",
78400+ "CAP_MKNOD",
78401+ "CAP_LEASE",
78402+ "CAP_AUDIT_WRITE",
78403+ "CAP_AUDIT_CONTROL",
78404+ "CAP_SETFCAP",
78405+ "CAP_MAC_OVERRIDE",
78406+ "CAP_MAC_ADMIN",
78407+ "CAP_SYSLOG",
78408+ "CAP_WAKE_ALARM",
78409+ "CAP_BLOCK_SUSPEND"
78410+};
78411+
78412+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
78413+
78414+int gr_is_capable(const int cap)
78415+{
78416+#ifdef CONFIG_GRKERNSEC
78417+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
78418+ return 1;
78419+ return 0;
78420+#else
78421+ return 1;
78422+#endif
78423+}
78424+
78425+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
78426+{
78427+#ifdef CONFIG_GRKERNSEC
78428+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
78429+ return 1;
78430+ return 0;
78431+#else
78432+ return 1;
78433+#endif
78434+}
78435+
78436+int gr_is_capable_nolog(const int cap)
78437+{
78438+#ifdef CONFIG_GRKERNSEC
78439+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
78440+ return 1;
78441+ return 0;
78442+#else
78443+ return 1;
78444+#endif
78445+}
78446+
78447+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
78448+{
78449+#ifdef CONFIG_GRKERNSEC
78450+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
78451+ return 1;
78452+ return 0;
78453+#else
78454+ return 1;
78455+#endif
78456+}
78457+
78458+EXPORT_SYMBOL_GPL(gr_is_capable);
78459+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
78460+EXPORT_SYMBOL_GPL(gr_task_is_capable);
78461+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
78462diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
78463new file mode 100644
78464index 0000000..06cc6ea
78465--- /dev/null
78466+++ b/grsecurity/grsec_fifo.c
78467@@ -0,0 +1,24 @@
78468+#include <linux/kernel.h>
78469+#include <linux/sched.h>
78470+#include <linux/fs.h>
78471+#include <linux/file.h>
78472+#include <linux/grinternal.h>
78473+
78474+int
78475+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
78476+ const struct dentry *dir, const int flag, const int acc_mode)
78477+{
78478+#ifdef CONFIG_GRKERNSEC_FIFO
78479+ const struct cred *cred = current_cred();
78480+
78481+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
78482+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
78483+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
78484+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
78485+ if (!inode_permission(dentry->d_inode, acc_mode))
78486+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
78487+ return -EACCES;
78488+ }
78489+#endif
78490+ return 0;
78491+}
78492diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
78493new file mode 100644
78494index 0000000..8ca18bf
78495--- /dev/null
78496+++ b/grsecurity/grsec_fork.c
78497@@ -0,0 +1,23 @@
78498+#include <linux/kernel.h>
78499+#include <linux/sched.h>
78500+#include <linux/grsecurity.h>
78501+#include <linux/grinternal.h>
78502+#include <linux/errno.h>
78503+
78504+void
78505+gr_log_forkfail(const int retval)
78506+{
78507+#ifdef CONFIG_GRKERNSEC_FORKFAIL
78508+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
78509+ switch (retval) {
78510+ case -EAGAIN:
78511+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
78512+ break;
78513+ case -ENOMEM:
78514+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
78515+ break;
78516+ }
78517+ }
78518+#endif
78519+ return;
78520+}
78521diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
78522new file mode 100644
78523index 0000000..b7cb191
78524--- /dev/null
78525+++ b/grsecurity/grsec_init.c
78526@@ -0,0 +1,286 @@
78527+#include <linux/kernel.h>
78528+#include <linux/sched.h>
78529+#include <linux/mm.h>
78530+#include <linux/gracl.h>
78531+#include <linux/slab.h>
78532+#include <linux/vmalloc.h>
78533+#include <linux/percpu.h>
78534+#include <linux/module.h>
78535+
78536+int grsec_enable_ptrace_readexec;
78537+int grsec_enable_setxid;
78538+int grsec_enable_symlinkown;
78539+kgid_t grsec_symlinkown_gid;
78540+int grsec_enable_brute;
78541+int grsec_enable_link;
78542+int grsec_enable_dmesg;
78543+int grsec_enable_harden_ptrace;
78544+int grsec_enable_harden_ipc;
78545+int grsec_enable_fifo;
78546+int grsec_enable_execlog;
78547+int grsec_enable_signal;
78548+int grsec_enable_forkfail;
78549+int grsec_enable_audit_ptrace;
78550+int grsec_enable_time;
78551+int grsec_enable_group;
78552+kgid_t grsec_audit_gid;
78553+int grsec_enable_chdir;
78554+int grsec_enable_mount;
78555+int grsec_enable_rofs;
78556+int grsec_deny_new_usb;
78557+int grsec_enable_chroot_findtask;
78558+int grsec_enable_chroot_mount;
78559+int grsec_enable_chroot_shmat;
78560+int grsec_enable_chroot_fchdir;
78561+int grsec_enable_chroot_double;
78562+int grsec_enable_chroot_pivot;
78563+int grsec_enable_chroot_chdir;
78564+int grsec_enable_chroot_chmod;
78565+int grsec_enable_chroot_mknod;
78566+int grsec_enable_chroot_nice;
78567+int grsec_enable_chroot_execlog;
78568+int grsec_enable_chroot_caps;
78569+int grsec_enable_chroot_sysctl;
78570+int grsec_enable_chroot_unix;
78571+int grsec_enable_tpe;
78572+kgid_t grsec_tpe_gid;
78573+int grsec_enable_blackhole;
78574+#ifdef CONFIG_IPV6_MODULE
78575+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
78576+#endif
78577+int grsec_lastack_retries;
78578+int grsec_enable_tpe_all;
78579+int grsec_enable_tpe_invert;
78580+int grsec_enable_socket_all;
78581+kgid_t grsec_socket_all_gid;
78582+int grsec_enable_socket_client;
78583+kgid_t grsec_socket_client_gid;
78584+int grsec_enable_socket_server;
78585+kgid_t grsec_socket_server_gid;
78586+int grsec_resource_logging;
78587+int grsec_disable_privio;
78588+int grsec_enable_log_rwxmaps;
78589+int grsec_lock;
78590+
78591+DEFINE_SPINLOCK(grsec_alert_lock);
78592+unsigned long grsec_alert_wtime = 0;
78593+unsigned long grsec_alert_fyet = 0;
78594+
78595+DEFINE_SPINLOCK(grsec_audit_lock);
78596+
78597+DEFINE_RWLOCK(grsec_exec_file_lock);
78598+
78599+char *gr_shared_page[4];
78600+
78601+char *gr_alert_log_fmt;
78602+char *gr_audit_log_fmt;
78603+char *gr_alert_log_buf;
78604+char *gr_audit_log_buf;
78605+
78606+extern struct gr_arg *gr_usermode;
78607+extern unsigned char *gr_system_salt;
78608+extern unsigned char *gr_system_sum;
78609+
78610+void __init
78611+grsecurity_init(void)
78612+{
78613+ int j;
78614+ /* create the per-cpu shared pages */
78615+
78616+#ifdef CONFIG_X86
78617+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
78618+#endif
78619+
78620+ for (j = 0; j < 4; j++) {
78621+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
78622+ if (gr_shared_page[j] == NULL) {
78623+ panic("Unable to allocate grsecurity shared page");
78624+ return;
78625+ }
78626+ }
78627+
78628+ /* allocate log buffers */
78629+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
78630+ if (!gr_alert_log_fmt) {
78631+ panic("Unable to allocate grsecurity alert log format buffer");
78632+ return;
78633+ }
78634+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
78635+ if (!gr_audit_log_fmt) {
78636+ panic("Unable to allocate grsecurity audit log format buffer");
78637+ return;
78638+ }
78639+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
78640+ if (!gr_alert_log_buf) {
78641+ panic("Unable to allocate grsecurity alert log buffer");
78642+ return;
78643+ }
78644+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
78645+ if (!gr_audit_log_buf) {
78646+ panic("Unable to allocate grsecurity audit log buffer");
78647+ return;
78648+ }
78649+
78650+ /* allocate memory for authentication structure */
78651+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
78652+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
78653+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
78654+
78655+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
78656+ panic("Unable to allocate grsecurity authentication structure");
78657+ return;
78658+ }
78659+
78660+#ifdef CONFIG_GRKERNSEC_IO
78661+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
78662+ grsec_disable_privio = 1;
78663+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
78664+ grsec_disable_privio = 1;
78665+#else
78666+ grsec_disable_privio = 0;
78667+#endif
78668+#endif
78669+
78670+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
78671+ /* for backward compatibility, tpe_invert always defaults to on if
78672+ enabled in the kernel
78673+ */
78674+ grsec_enable_tpe_invert = 1;
78675+#endif
78676+
78677+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
78678+#ifndef CONFIG_GRKERNSEC_SYSCTL
78679+ grsec_lock = 1;
78680+#endif
78681+
78682+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78683+ grsec_enable_log_rwxmaps = 1;
78684+#endif
78685+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
78686+ grsec_enable_group = 1;
78687+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
78688+#endif
78689+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78690+ grsec_enable_ptrace_readexec = 1;
78691+#endif
78692+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
78693+ grsec_enable_chdir = 1;
78694+#endif
78695+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
78696+ grsec_enable_harden_ptrace = 1;
78697+#endif
78698+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
78699+ grsec_enable_harden_ipc = 1;
78700+#endif
78701+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78702+ grsec_enable_mount = 1;
78703+#endif
78704+#ifdef CONFIG_GRKERNSEC_LINK
78705+ grsec_enable_link = 1;
78706+#endif
78707+#ifdef CONFIG_GRKERNSEC_BRUTE
78708+ grsec_enable_brute = 1;
78709+#endif
78710+#ifdef CONFIG_GRKERNSEC_DMESG
78711+ grsec_enable_dmesg = 1;
78712+#endif
78713+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
78714+ grsec_enable_blackhole = 1;
78715+ grsec_lastack_retries = 4;
78716+#endif
78717+#ifdef CONFIG_GRKERNSEC_FIFO
78718+ grsec_enable_fifo = 1;
78719+#endif
78720+#ifdef CONFIG_GRKERNSEC_EXECLOG
78721+ grsec_enable_execlog = 1;
78722+#endif
78723+#ifdef CONFIG_GRKERNSEC_SETXID
78724+ grsec_enable_setxid = 1;
78725+#endif
78726+#ifdef CONFIG_GRKERNSEC_SIGNAL
78727+ grsec_enable_signal = 1;
78728+#endif
78729+#ifdef CONFIG_GRKERNSEC_FORKFAIL
78730+ grsec_enable_forkfail = 1;
78731+#endif
78732+#ifdef CONFIG_GRKERNSEC_TIME
78733+ grsec_enable_time = 1;
78734+#endif
78735+#ifdef CONFIG_GRKERNSEC_RESLOG
78736+ grsec_resource_logging = 1;
78737+#endif
78738+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
78739+ grsec_enable_chroot_findtask = 1;
78740+#endif
78741+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
78742+ grsec_enable_chroot_unix = 1;
78743+#endif
78744+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
78745+ grsec_enable_chroot_mount = 1;
78746+#endif
78747+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
78748+ grsec_enable_chroot_fchdir = 1;
78749+#endif
78750+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
78751+ grsec_enable_chroot_shmat = 1;
78752+#endif
78753+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
78754+ grsec_enable_audit_ptrace = 1;
78755+#endif
78756+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
78757+ grsec_enable_chroot_double = 1;
78758+#endif
78759+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
78760+ grsec_enable_chroot_pivot = 1;
78761+#endif
78762+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
78763+ grsec_enable_chroot_chdir = 1;
78764+#endif
78765+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
78766+ grsec_enable_chroot_chmod = 1;
78767+#endif
78768+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
78769+ grsec_enable_chroot_mknod = 1;
78770+#endif
78771+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
78772+ grsec_enable_chroot_nice = 1;
78773+#endif
78774+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
78775+ grsec_enable_chroot_execlog = 1;
78776+#endif
78777+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
78778+ grsec_enable_chroot_caps = 1;
78779+#endif
78780+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
78781+ grsec_enable_chroot_sysctl = 1;
78782+#endif
78783+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
78784+ grsec_enable_symlinkown = 1;
78785+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
78786+#endif
78787+#ifdef CONFIG_GRKERNSEC_TPE
78788+ grsec_enable_tpe = 1;
78789+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
78790+#ifdef CONFIG_GRKERNSEC_TPE_ALL
78791+ grsec_enable_tpe_all = 1;
78792+#endif
78793+#endif
78794+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78795+ grsec_enable_socket_all = 1;
78796+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
78797+#endif
78798+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78799+ grsec_enable_socket_client = 1;
78800+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
78801+#endif
78802+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78803+ grsec_enable_socket_server = 1;
78804+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
78805+#endif
78806+#endif
78807+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
78808+ grsec_deny_new_usb = 1;
78809+#endif
78810+
78811+ return;
78812+}
78813diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
78814new file mode 100644
78815index 0000000..1773300
78816--- /dev/null
78817+++ b/grsecurity/grsec_ipc.c
78818@@ -0,0 +1,48 @@
78819+#include <linux/kernel.h>
78820+#include <linux/mm.h>
78821+#include <linux/sched.h>
78822+#include <linux/file.h>
78823+#include <linux/ipc.h>
78824+#include <linux/ipc_namespace.h>
78825+#include <linux/grsecurity.h>
78826+#include <linux/grinternal.h>
78827+
78828+int
78829+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
78830+{
78831+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
78832+ int write;
78833+ int orig_granted_mode;
78834+ kuid_t euid;
78835+ kgid_t egid;
78836+
78837+ if (!grsec_enable_harden_ipc)
78838+ return 1;
78839+
78840+ euid = current_euid();
78841+ egid = current_egid();
78842+
78843+ write = requested_mode & 00002;
78844+ orig_granted_mode = ipcp->mode;
78845+
78846+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
78847+ orig_granted_mode >>= 6;
78848+ else {
78849+ /* if likely wrong permissions, lock to user */
78850+ if (orig_granted_mode & 0007)
78851+ orig_granted_mode = 0;
78852+ /* otherwise do a egid-only check */
78853+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
78854+ orig_granted_mode >>= 3;
78855+ /* otherwise, no access */
78856+ else
78857+ orig_granted_mode = 0;
78858+ }
78859+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
78860+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
78861+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
78862+ return 0;
78863+ }
78864+#endif
78865+ return 1;
78866+}
78867diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
78868new file mode 100644
78869index 0000000..5e05e20
78870--- /dev/null
78871+++ b/grsecurity/grsec_link.c
78872@@ -0,0 +1,58 @@
78873+#include <linux/kernel.h>
78874+#include <linux/sched.h>
78875+#include <linux/fs.h>
78876+#include <linux/file.h>
78877+#include <linux/grinternal.h>
78878+
78879+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
78880+{
78881+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
78882+ const struct inode *link_inode = link->dentry->d_inode;
78883+
78884+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
78885+ /* ignore root-owned links, e.g. /proc/self */
78886+ gr_is_global_nonroot(link_inode->i_uid) && target &&
78887+ !uid_eq(link_inode->i_uid, target->i_uid)) {
78888+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
78889+ return 1;
78890+ }
78891+#endif
78892+ return 0;
78893+}
78894+
78895+int
78896+gr_handle_follow_link(const struct inode *parent,
78897+ const struct inode *inode,
78898+ const struct dentry *dentry, const struct vfsmount *mnt)
78899+{
78900+#ifdef CONFIG_GRKERNSEC_LINK
78901+ const struct cred *cred = current_cred();
78902+
78903+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
78904+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
78905+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
78906+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
78907+ return -EACCES;
78908+ }
78909+#endif
78910+ return 0;
78911+}
78912+
78913+int
78914+gr_handle_hardlink(const struct dentry *dentry,
78915+ const struct vfsmount *mnt,
78916+ struct inode *inode, const int mode, const struct filename *to)
78917+{
78918+#ifdef CONFIG_GRKERNSEC_LINK
78919+ const struct cred *cred = current_cred();
78920+
78921+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
78922+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
78923+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
78924+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
78925+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
78926+ return -EPERM;
78927+ }
78928+#endif
78929+ return 0;
78930+}
78931diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
78932new file mode 100644
78933index 0000000..dbe0a6b
78934--- /dev/null
78935+++ b/grsecurity/grsec_log.c
78936@@ -0,0 +1,341 @@
78937+#include <linux/kernel.h>
78938+#include <linux/sched.h>
78939+#include <linux/file.h>
78940+#include <linux/tty.h>
78941+#include <linux/fs.h>
78942+#include <linux/mm.h>
78943+#include <linux/grinternal.h>
78944+
78945+#ifdef CONFIG_TREE_PREEMPT_RCU
78946+#define DISABLE_PREEMPT() preempt_disable()
78947+#define ENABLE_PREEMPT() preempt_enable()
78948+#else
78949+#define DISABLE_PREEMPT()
78950+#define ENABLE_PREEMPT()
78951+#endif
78952+
78953+#define BEGIN_LOCKS(x) \
78954+ DISABLE_PREEMPT(); \
78955+ rcu_read_lock(); \
78956+ read_lock(&tasklist_lock); \
78957+ read_lock(&grsec_exec_file_lock); \
78958+ if (x != GR_DO_AUDIT) \
78959+ spin_lock(&grsec_alert_lock); \
78960+ else \
78961+ spin_lock(&grsec_audit_lock)
78962+
78963+#define END_LOCKS(x) \
78964+ if (x != GR_DO_AUDIT) \
78965+ spin_unlock(&grsec_alert_lock); \
78966+ else \
78967+ spin_unlock(&grsec_audit_lock); \
78968+ read_unlock(&grsec_exec_file_lock); \
78969+ read_unlock(&tasklist_lock); \
78970+ rcu_read_unlock(); \
78971+ ENABLE_PREEMPT(); \
78972+ if (x == GR_DONT_AUDIT) \
78973+ gr_handle_alertkill(current)
78974+
78975+enum {
78976+ FLOODING,
78977+ NO_FLOODING
78978+};
78979+
78980+extern char *gr_alert_log_fmt;
78981+extern char *gr_audit_log_fmt;
78982+extern char *gr_alert_log_buf;
78983+extern char *gr_audit_log_buf;
78984+
78985+static int gr_log_start(int audit)
78986+{
78987+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
78988+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
78989+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
78990+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
78991+ unsigned long curr_secs = get_seconds();
78992+
78993+ if (audit == GR_DO_AUDIT)
78994+ goto set_fmt;
78995+
78996+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
78997+ grsec_alert_wtime = curr_secs;
78998+ grsec_alert_fyet = 0;
78999+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
79000+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
79001+ grsec_alert_fyet++;
79002+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
79003+ grsec_alert_wtime = curr_secs;
79004+ grsec_alert_fyet++;
79005+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
79006+ return FLOODING;
79007+ }
79008+ else return FLOODING;
79009+
79010+set_fmt:
79011+#endif
79012+ memset(buf, 0, PAGE_SIZE);
79013+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
79014+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
79015+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
79016+ } else if (current->signal->curr_ip) {
79017+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
79018+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
79019+ } else if (gr_acl_is_enabled()) {
79020+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
79021+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
79022+ } else {
79023+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
79024+ strcpy(buf, fmt);
79025+ }
79026+
79027+ return NO_FLOODING;
79028+}
79029+
79030+static void gr_log_middle(int audit, const char *msg, va_list ap)
79031+ __attribute__ ((format (printf, 2, 0)));
79032+
79033+static void gr_log_middle(int audit, const char *msg, va_list ap)
79034+{
79035+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
79036+ unsigned int len = strlen(buf);
79037+
79038+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
79039+
79040+ return;
79041+}
79042+
79043+static void gr_log_middle_varargs(int audit, const char *msg, ...)
79044+ __attribute__ ((format (printf, 2, 3)));
79045+
79046+static void gr_log_middle_varargs(int audit, const char *msg, ...)
79047+{
79048+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
79049+ unsigned int len = strlen(buf);
79050+ va_list ap;
79051+
79052+ va_start(ap, msg);
79053+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
79054+ va_end(ap);
79055+
79056+ return;
79057+}
79058+
79059+static void gr_log_end(int audit, int append_default)
79060+{
79061+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
79062+ if (append_default) {
79063+ struct task_struct *task = current;
79064+ struct task_struct *parent = task->real_parent;
79065+ const struct cred *cred = __task_cred(task);
79066+ const struct cred *pcred = __task_cred(parent);
79067+ unsigned int len = strlen(buf);
79068+
79069+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
79070+ }
79071+
79072+ printk("%s\n", buf);
79073+
79074+ return;
79075+}
79076+
79077+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
79078+{
79079+ int logtype;
79080+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
79081+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
79082+ void *voidptr = NULL;
79083+ int num1 = 0, num2 = 0;
79084+ unsigned long ulong1 = 0, ulong2 = 0;
79085+ struct dentry *dentry = NULL;
79086+ struct vfsmount *mnt = NULL;
79087+ struct file *file = NULL;
79088+ struct task_struct *task = NULL;
79089+ struct vm_area_struct *vma = NULL;
79090+ const struct cred *cred, *pcred;
79091+ va_list ap;
79092+
79093+ BEGIN_LOCKS(audit);
79094+ logtype = gr_log_start(audit);
79095+ if (logtype == FLOODING) {
79096+ END_LOCKS(audit);
79097+ return;
79098+ }
79099+ va_start(ap, argtypes);
79100+ switch (argtypes) {
79101+ case GR_TTYSNIFF:
79102+ task = va_arg(ap, struct task_struct *);
79103+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
79104+ break;
79105+ case GR_SYSCTL_HIDDEN:
79106+ str1 = va_arg(ap, char *);
79107+ gr_log_middle_varargs(audit, msg, result, str1);
79108+ break;
79109+ case GR_RBAC:
79110+ dentry = va_arg(ap, struct dentry *);
79111+ mnt = va_arg(ap, struct vfsmount *);
79112+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
79113+ break;
79114+ case GR_RBAC_STR:
79115+ dentry = va_arg(ap, struct dentry *);
79116+ mnt = va_arg(ap, struct vfsmount *);
79117+ str1 = va_arg(ap, char *);
79118+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
79119+ break;
79120+ case GR_STR_RBAC:
79121+ str1 = va_arg(ap, char *);
79122+ dentry = va_arg(ap, struct dentry *);
79123+ mnt = va_arg(ap, struct vfsmount *);
79124+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
79125+ break;
79126+ case GR_RBAC_MODE2:
79127+ dentry = va_arg(ap, struct dentry *);
79128+ mnt = va_arg(ap, struct vfsmount *);
79129+ str1 = va_arg(ap, char *);
79130+ str2 = va_arg(ap, char *);
79131+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
79132+ break;
79133+ case GR_RBAC_MODE3:
79134+ dentry = va_arg(ap, struct dentry *);
79135+ mnt = va_arg(ap, struct vfsmount *);
79136+ str1 = va_arg(ap, char *);
79137+ str2 = va_arg(ap, char *);
79138+ str3 = va_arg(ap, char *);
79139+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
79140+ break;
79141+ case GR_FILENAME:
79142+ dentry = va_arg(ap, struct dentry *);
79143+ mnt = va_arg(ap, struct vfsmount *);
79144+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
79145+ break;
79146+ case GR_STR_FILENAME:
79147+ str1 = va_arg(ap, char *);
79148+ dentry = va_arg(ap, struct dentry *);
79149+ mnt = va_arg(ap, struct vfsmount *);
79150+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
79151+ break;
79152+ case GR_FILENAME_STR:
79153+ dentry = va_arg(ap, struct dentry *);
79154+ mnt = va_arg(ap, struct vfsmount *);
79155+ str1 = va_arg(ap, char *);
79156+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
79157+ break;
79158+ case GR_FILENAME_TWO_INT:
79159+ dentry = va_arg(ap, struct dentry *);
79160+ mnt = va_arg(ap, struct vfsmount *);
79161+ num1 = va_arg(ap, int);
79162+ num2 = va_arg(ap, int);
79163+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
79164+ break;
79165+ case GR_FILENAME_TWO_INT_STR:
79166+ dentry = va_arg(ap, struct dentry *);
79167+ mnt = va_arg(ap, struct vfsmount *);
79168+ num1 = va_arg(ap, int);
79169+ num2 = va_arg(ap, int);
79170+ str1 = va_arg(ap, char *);
79171+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
79172+ break;
79173+ case GR_TEXTREL:
79174+ file = va_arg(ap, struct file *);
79175+ ulong1 = va_arg(ap, unsigned long);
79176+ ulong2 = va_arg(ap, unsigned long);
79177+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
79178+ break;
79179+ case GR_PTRACE:
79180+ task = va_arg(ap, struct task_struct *);
79181+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
79182+ break;
79183+ case GR_RESOURCE:
79184+ task = va_arg(ap, struct task_struct *);
79185+ cred = __task_cred(task);
79186+ pcred = __task_cred(task->real_parent);
79187+ ulong1 = va_arg(ap, unsigned long);
79188+ str1 = va_arg(ap, char *);
79189+ ulong2 = va_arg(ap, unsigned long);
79190+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
79191+ break;
79192+ case GR_CAP:
79193+ task = va_arg(ap, struct task_struct *);
79194+ cred = __task_cred(task);
79195+ pcred = __task_cred(task->real_parent);
79196+ str1 = va_arg(ap, char *);
79197+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
79198+ break;
79199+ case GR_SIG:
79200+ str1 = va_arg(ap, char *);
79201+ voidptr = va_arg(ap, void *);
79202+ gr_log_middle_varargs(audit, msg, str1, voidptr);
79203+ break;
79204+ case GR_SIG2:
79205+ task = va_arg(ap, struct task_struct *);
79206+ cred = __task_cred(task);
79207+ pcred = __task_cred(task->real_parent);
79208+ num1 = va_arg(ap, int);
79209+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
79210+ break;
79211+ case GR_CRASH1:
79212+ task = va_arg(ap, struct task_struct *);
79213+ cred = __task_cred(task);
79214+ pcred = __task_cred(task->real_parent);
79215+ ulong1 = va_arg(ap, unsigned long);
79216+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
79217+ break;
79218+ case GR_CRASH2:
79219+ task = va_arg(ap, struct task_struct *);
79220+ cred = __task_cred(task);
79221+ pcred = __task_cred(task->real_parent);
79222+ ulong1 = va_arg(ap, unsigned long);
79223+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
79224+ break;
79225+ case GR_RWXMAP:
79226+ file = va_arg(ap, struct file *);
79227+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
79228+ break;
79229+ case GR_RWXMAPVMA:
79230+ vma = va_arg(ap, struct vm_area_struct *);
79231+ if (vma->vm_file)
79232+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
79233+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
79234+ str1 = "<stack>";
79235+ else if (vma->vm_start <= current->mm->brk &&
79236+ vma->vm_end >= current->mm->start_brk)
79237+ str1 = "<heap>";
79238+ else
79239+ str1 = "<anonymous mapping>";
79240+ gr_log_middle_varargs(audit, msg, str1);
79241+ break;
79242+ case GR_PSACCT:
79243+ {
79244+ unsigned int wday, cday;
79245+ __u8 whr, chr;
79246+ __u8 wmin, cmin;
79247+ __u8 wsec, csec;
79248+ char cur_tty[64] = { 0 };
79249+ char parent_tty[64] = { 0 };
79250+
79251+ task = va_arg(ap, struct task_struct *);
79252+ wday = va_arg(ap, unsigned int);
79253+ cday = va_arg(ap, unsigned int);
79254+ whr = va_arg(ap, int);
79255+ chr = va_arg(ap, int);
79256+ wmin = va_arg(ap, int);
79257+ cmin = va_arg(ap, int);
79258+ wsec = va_arg(ap, int);
79259+ csec = va_arg(ap, int);
79260+ ulong1 = va_arg(ap, unsigned long);
79261+ cred = __task_cred(task);
79262+ pcred = __task_cred(task->real_parent);
79263+
79264+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
79265+ }
79266+ break;
79267+ default:
79268+ gr_log_middle(audit, msg, ap);
79269+ }
79270+ va_end(ap);
79271+ // these don't need DEFAULTSECARGS printed on the end
79272+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
79273+ gr_log_end(audit, 0);
79274+ else
79275+ gr_log_end(audit, 1);
79276+ END_LOCKS(audit);
79277+}
79278diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
79279new file mode 100644
79280index 0000000..0e39d8c
79281--- /dev/null
79282+++ b/grsecurity/grsec_mem.c
79283@@ -0,0 +1,48 @@
79284+#include <linux/kernel.h>
79285+#include <linux/sched.h>
79286+#include <linux/mm.h>
79287+#include <linux/mman.h>
79288+#include <linux/module.h>
79289+#include <linux/grinternal.h>
79290+
79291+void gr_handle_msr_write(void)
79292+{
79293+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
79294+ return;
79295+}
79296+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
79297+
79298+void
79299+gr_handle_ioperm(void)
79300+{
79301+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
79302+ return;
79303+}
79304+
79305+void
79306+gr_handle_iopl(void)
79307+{
79308+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
79309+ return;
79310+}
79311+
79312+void
79313+gr_handle_mem_readwrite(u64 from, u64 to)
79314+{
79315+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
79316+ return;
79317+}
79318+
79319+void
79320+gr_handle_vm86(void)
79321+{
79322+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
79323+ return;
79324+}
79325+
79326+void
79327+gr_log_badprocpid(const char *entry)
79328+{
79329+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
79330+ return;
79331+}
79332diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
79333new file mode 100644
79334index 0000000..cd9e124
79335--- /dev/null
79336+++ b/grsecurity/grsec_mount.c
79337@@ -0,0 +1,65 @@
79338+#include <linux/kernel.h>
79339+#include <linux/sched.h>
79340+#include <linux/mount.h>
79341+#include <linux/major.h>
79342+#include <linux/grsecurity.h>
79343+#include <linux/grinternal.h>
79344+
79345+void
79346+gr_log_remount(const char *devname, const int retval)
79347+{
79348+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
79349+ if (grsec_enable_mount && (retval >= 0))
79350+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
79351+#endif
79352+ return;
79353+}
79354+
79355+void
79356+gr_log_unmount(const char *devname, const int retval)
79357+{
79358+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
79359+ if (grsec_enable_mount && (retval >= 0))
79360+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
79361+#endif
79362+ return;
79363+}
79364+
79365+void
79366+gr_log_mount(const char *from, const char *to, const int retval)
79367+{
79368+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
79369+ if (grsec_enable_mount && (retval >= 0))
79370+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
79371+#endif
79372+ return;
79373+}
79374+
79375+int
79376+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
79377+{
79378+#ifdef CONFIG_GRKERNSEC_ROFS
79379+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
79380+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
79381+ return -EPERM;
79382+ } else
79383+ return 0;
79384+#endif
79385+ return 0;
79386+}
79387+
79388+int
79389+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
79390+{
79391+#ifdef CONFIG_GRKERNSEC_ROFS
79392+ struct inode *inode = dentry->d_inode;
79393+
79394+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
79395+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
79396+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
79397+ return -EPERM;
79398+ } else
79399+ return 0;
79400+#endif
79401+ return 0;
79402+}
79403diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
79404new file mode 100644
79405index 0000000..6ee9d50
79406--- /dev/null
79407+++ b/grsecurity/grsec_pax.c
79408@@ -0,0 +1,45 @@
79409+#include <linux/kernel.h>
79410+#include <linux/sched.h>
79411+#include <linux/mm.h>
79412+#include <linux/file.h>
79413+#include <linux/grinternal.h>
79414+#include <linux/grsecurity.h>
79415+
79416+void
79417+gr_log_textrel(struct vm_area_struct * vma)
79418+{
79419+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79420+ if (grsec_enable_log_rwxmaps)
79421+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
79422+#endif
79423+ return;
79424+}
79425+
79426+void gr_log_ptgnustack(struct file *file)
79427+{
79428+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79429+ if (grsec_enable_log_rwxmaps)
79430+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
79431+#endif
79432+ return;
79433+}
79434+
79435+void
79436+gr_log_rwxmmap(struct file *file)
79437+{
79438+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79439+ if (grsec_enable_log_rwxmaps)
79440+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
79441+#endif
79442+ return;
79443+}
79444+
79445+void
79446+gr_log_rwxmprotect(struct vm_area_struct *vma)
79447+{
79448+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79449+ if (grsec_enable_log_rwxmaps)
79450+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
79451+#endif
79452+ return;
79453+}
79454diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
79455new file mode 100644
79456index 0000000..2005a3a
79457--- /dev/null
79458+++ b/grsecurity/grsec_proc.c
79459@@ -0,0 +1,20 @@
79460+#include <linux/kernel.h>
79461+#include <linux/sched.h>
79462+#include <linux/grsecurity.h>
79463+#include <linux/grinternal.h>
79464+
79465+int gr_proc_is_restricted(void)
79466+{
79467+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
79468+ const struct cred *cred = current_cred();
79469+#endif
79470+
79471+#ifdef CONFIG_GRKERNSEC_PROC_USER
79472+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
79473+ return -EACCES;
79474+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
79475+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
79476+ return -EACCES;
79477+#endif
79478+ return 0;
79479+}
79480diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
79481new file mode 100644
79482index 0000000..f7f29aa
79483--- /dev/null
79484+++ b/grsecurity/grsec_ptrace.c
79485@@ -0,0 +1,30 @@
79486+#include <linux/kernel.h>
79487+#include <linux/sched.h>
79488+#include <linux/grinternal.h>
79489+#include <linux/security.h>
79490+
79491+void
79492+gr_audit_ptrace(struct task_struct *task)
79493+{
79494+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
79495+ if (grsec_enable_audit_ptrace)
79496+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
79497+#endif
79498+ return;
79499+}
79500+
79501+int
79502+gr_ptrace_readexec(struct file *file, int unsafe_flags)
79503+{
79504+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
79505+ const struct dentry *dentry = file->f_path.dentry;
79506+ const struct vfsmount *mnt = file->f_path.mnt;
79507+
79508+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
79509+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
79510+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
79511+ return -EACCES;
79512+ }
79513+#endif
79514+ return 0;
79515+}
79516diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
79517new file mode 100644
79518index 0000000..3860c7e
79519--- /dev/null
79520+++ b/grsecurity/grsec_sig.c
79521@@ -0,0 +1,236 @@
79522+#include <linux/kernel.h>
79523+#include <linux/sched.h>
79524+#include <linux/fs.h>
79525+#include <linux/delay.h>
79526+#include <linux/grsecurity.h>
79527+#include <linux/grinternal.h>
79528+#include <linux/hardirq.h>
79529+
79530+char *signames[] = {
79531+ [SIGSEGV] = "Segmentation fault",
79532+ [SIGILL] = "Illegal instruction",
79533+ [SIGABRT] = "Abort",
79534+ [SIGBUS] = "Invalid alignment/Bus error"
79535+};
79536+
79537+void
79538+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
79539+{
79540+#ifdef CONFIG_GRKERNSEC_SIGNAL
79541+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
79542+ (sig == SIGABRT) || (sig == SIGBUS))) {
79543+ if (task_pid_nr(t) == task_pid_nr(current)) {
79544+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
79545+ } else {
79546+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
79547+ }
79548+ }
79549+#endif
79550+ return;
79551+}
79552+
79553+int
79554+gr_handle_signal(const struct task_struct *p, const int sig)
79555+{
79556+#ifdef CONFIG_GRKERNSEC
79557+ /* ignore the 0 signal for protected task checks */
79558+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
79559+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
79560+ return -EPERM;
79561+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
79562+ return -EPERM;
79563+ }
79564+#endif
79565+ return 0;
79566+}
79567+
79568+#ifdef CONFIG_GRKERNSEC
79569+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
79570+
79571+int gr_fake_force_sig(int sig, struct task_struct *t)
79572+{
79573+ unsigned long int flags;
79574+ int ret, blocked, ignored;
79575+ struct k_sigaction *action;
79576+
79577+ spin_lock_irqsave(&t->sighand->siglock, flags);
79578+ action = &t->sighand->action[sig-1];
79579+ ignored = action->sa.sa_handler == SIG_IGN;
79580+ blocked = sigismember(&t->blocked, sig);
79581+ if (blocked || ignored) {
79582+ action->sa.sa_handler = SIG_DFL;
79583+ if (blocked) {
79584+ sigdelset(&t->blocked, sig);
79585+ recalc_sigpending_and_wake(t);
79586+ }
79587+ }
79588+ if (action->sa.sa_handler == SIG_DFL)
79589+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
79590+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
79591+
79592+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
79593+
79594+ return ret;
79595+}
79596+#endif
79597+
79598+#define GR_USER_BAN_TIME (15 * 60)
79599+#define GR_DAEMON_BRUTE_TIME (30 * 60)
79600+
79601+void gr_handle_brute_attach(int dumpable)
79602+{
79603+#ifdef CONFIG_GRKERNSEC_BRUTE
79604+ struct task_struct *p = current;
79605+ kuid_t uid = GLOBAL_ROOT_UID;
79606+ int daemon = 0;
79607+
79608+ if (!grsec_enable_brute)
79609+ return;
79610+
79611+ rcu_read_lock();
79612+ read_lock(&tasklist_lock);
79613+ read_lock(&grsec_exec_file_lock);
79614+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
79615+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
79616+ p->real_parent->brute = 1;
79617+ daemon = 1;
79618+ } else {
79619+ const struct cred *cred = __task_cred(p), *cred2;
79620+ struct task_struct *tsk, *tsk2;
79621+
79622+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
79623+ struct user_struct *user;
79624+
79625+ uid = cred->uid;
79626+
79627+ /* this is put upon execution past expiration */
79628+ user = find_user(uid);
79629+ if (user == NULL)
79630+ goto unlock;
79631+ user->suid_banned = 1;
79632+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
79633+ if (user->suid_ban_expires == ~0UL)
79634+ user->suid_ban_expires--;
79635+
79636+ /* only kill other threads of the same binary, from the same user */
79637+ do_each_thread(tsk2, tsk) {
79638+ cred2 = __task_cred(tsk);
79639+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
79640+ gr_fake_force_sig(SIGKILL, tsk);
79641+ } while_each_thread(tsk2, tsk);
79642+ }
79643+ }
79644+unlock:
79645+ read_unlock(&grsec_exec_file_lock);
79646+ read_unlock(&tasklist_lock);
79647+ rcu_read_unlock();
79648+
79649+ if (gr_is_global_nonroot(uid))
79650+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
79651+ else if (daemon)
79652+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
79653+
79654+#endif
79655+ return;
79656+}
79657+
79658+void gr_handle_brute_check(void)
79659+{
79660+#ifdef CONFIG_GRKERNSEC_BRUTE
79661+ struct task_struct *p = current;
79662+
79663+ if (unlikely(p->brute)) {
79664+ if (!grsec_enable_brute)
79665+ p->brute = 0;
79666+ else if (time_before(get_seconds(), p->brute_expires))
79667+ msleep(30 * 1000);
79668+ }
79669+#endif
79670+ return;
79671+}
79672+
79673+void gr_handle_kernel_exploit(void)
79674+{
79675+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
79676+ const struct cred *cred;
79677+ struct task_struct *tsk, *tsk2;
79678+ struct user_struct *user;
79679+ kuid_t uid;
79680+
79681+ if (in_irq() || in_serving_softirq() || in_nmi())
79682+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
79683+
79684+ uid = current_uid();
79685+
79686+ if (gr_is_global_root(uid))
79687+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
79688+ else {
79689+ /* kill all the processes of this user, hold a reference
79690+ to their creds struct, and prevent them from creating
79691+ another process until system reset
79692+ */
79693+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
79694+ GR_GLOBAL_UID(uid));
79695+ /* we intentionally leak this ref */
79696+ user = get_uid(current->cred->user);
79697+ if (user)
79698+ user->kernel_banned = 1;
79699+
79700+ /* kill all processes of this user */
79701+ read_lock(&tasklist_lock);
79702+ do_each_thread(tsk2, tsk) {
79703+ cred = __task_cred(tsk);
79704+ if (uid_eq(cred->uid, uid))
79705+ gr_fake_force_sig(SIGKILL, tsk);
79706+ } while_each_thread(tsk2, tsk);
79707+ read_unlock(&tasklist_lock);
79708+ }
79709+#endif
79710+}
79711+
79712+#ifdef CONFIG_GRKERNSEC_BRUTE
79713+static bool suid_ban_expired(struct user_struct *user)
79714+{
79715+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
79716+ user->suid_banned = 0;
79717+ user->suid_ban_expires = 0;
79718+ free_uid(user);
79719+ return true;
79720+ }
79721+
79722+ return false;
79723+}
79724+#endif
79725+
79726+int gr_process_kernel_exec_ban(void)
79727+{
79728+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
79729+ if (unlikely(current->cred->user->kernel_banned))
79730+ return -EPERM;
79731+#endif
79732+ return 0;
79733+}
79734+
79735+int gr_process_kernel_setuid_ban(struct user_struct *user)
79736+{
79737+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
79738+ if (unlikely(user->kernel_banned))
79739+ gr_fake_force_sig(SIGKILL, current);
79740+#endif
79741+ return 0;
79742+}
79743+
79744+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
79745+{
79746+#ifdef CONFIG_GRKERNSEC_BRUTE
79747+ struct user_struct *user = current->cred->user;
79748+ if (unlikely(user->suid_banned)) {
79749+ if (suid_ban_expired(user))
79750+ return 0;
79751+ /* disallow execution of suid binaries only */
79752+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
79753+ return -EPERM;
79754+ }
79755+#endif
79756+ return 0;
79757+}
79758diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
79759new file mode 100644
79760index 0000000..c0aef3a
79761--- /dev/null
79762+++ b/grsecurity/grsec_sock.c
79763@@ -0,0 +1,244 @@
79764+#include <linux/kernel.h>
79765+#include <linux/module.h>
79766+#include <linux/sched.h>
79767+#include <linux/file.h>
79768+#include <linux/net.h>
79769+#include <linux/in.h>
79770+#include <linux/ip.h>
79771+#include <net/sock.h>
79772+#include <net/inet_sock.h>
79773+#include <linux/grsecurity.h>
79774+#include <linux/grinternal.h>
79775+#include <linux/gracl.h>
79776+
79777+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
79778+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
79779+
79780+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
79781+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
79782+
79783+#ifdef CONFIG_UNIX_MODULE
79784+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
79785+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
79786+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
79787+EXPORT_SYMBOL_GPL(gr_handle_create);
79788+#endif
79789+
79790+#ifdef CONFIG_GRKERNSEC
79791+#define gr_conn_table_size 32749
79792+struct conn_table_entry {
79793+ struct conn_table_entry *next;
79794+ struct signal_struct *sig;
79795+};
79796+
79797+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
79798+DEFINE_SPINLOCK(gr_conn_table_lock);
79799+
79800+extern const char * gr_socktype_to_name(unsigned char type);
79801+extern const char * gr_proto_to_name(unsigned char proto);
79802+extern const char * gr_sockfamily_to_name(unsigned char family);
79803+
79804+static __inline__ int
79805+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
79806+{
79807+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
79808+}
79809+
79810+static __inline__ int
79811+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
79812+ __u16 sport, __u16 dport)
79813+{
79814+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
79815+ sig->gr_sport == sport && sig->gr_dport == dport))
79816+ return 1;
79817+ else
79818+ return 0;
79819+}
79820+
79821+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
79822+{
79823+ struct conn_table_entry **match;
79824+ unsigned int index;
79825+
79826+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
79827+ sig->gr_sport, sig->gr_dport,
79828+ gr_conn_table_size);
79829+
79830+ newent->sig = sig;
79831+
79832+ match = &gr_conn_table[index];
79833+ newent->next = *match;
79834+ *match = newent;
79835+
79836+ return;
79837+}
79838+
79839+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
79840+{
79841+ struct conn_table_entry *match, *last = NULL;
79842+ unsigned int index;
79843+
79844+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
79845+ sig->gr_sport, sig->gr_dport,
79846+ gr_conn_table_size);
79847+
79848+ match = gr_conn_table[index];
79849+ while (match && !conn_match(match->sig,
79850+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
79851+ sig->gr_dport)) {
79852+ last = match;
79853+ match = match->next;
79854+ }
79855+
79856+ if (match) {
79857+ if (last)
79858+ last->next = match->next;
79859+ else
79860+ gr_conn_table[index] = NULL;
79861+ kfree(match);
79862+ }
79863+
79864+ return;
79865+}
79866+
79867+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
79868+ __u16 sport, __u16 dport)
79869+{
79870+ struct conn_table_entry *match;
79871+ unsigned int index;
79872+
79873+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
79874+
79875+ match = gr_conn_table[index];
79876+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
79877+ match = match->next;
79878+
79879+ if (match)
79880+ return match->sig;
79881+ else
79882+ return NULL;
79883+}
79884+
79885+#endif
79886+
79887+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
79888+{
79889+#ifdef CONFIG_GRKERNSEC
79890+ struct signal_struct *sig = task->signal;
79891+ struct conn_table_entry *newent;
79892+
79893+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
79894+ if (newent == NULL)
79895+ return;
79896+ /* no bh lock needed since we are called with bh disabled */
79897+ spin_lock(&gr_conn_table_lock);
79898+ gr_del_task_from_ip_table_nolock(sig);
79899+ sig->gr_saddr = inet->inet_rcv_saddr;
79900+ sig->gr_daddr = inet->inet_daddr;
79901+ sig->gr_sport = inet->inet_sport;
79902+ sig->gr_dport = inet->inet_dport;
79903+ gr_add_to_task_ip_table_nolock(sig, newent);
79904+ spin_unlock(&gr_conn_table_lock);
79905+#endif
79906+ return;
79907+}
79908+
79909+void gr_del_task_from_ip_table(struct task_struct *task)
79910+{
79911+#ifdef CONFIG_GRKERNSEC
79912+ spin_lock_bh(&gr_conn_table_lock);
79913+ gr_del_task_from_ip_table_nolock(task->signal);
79914+ spin_unlock_bh(&gr_conn_table_lock);
79915+#endif
79916+ return;
79917+}
79918+
79919+void
79920+gr_attach_curr_ip(const struct sock *sk)
79921+{
79922+#ifdef CONFIG_GRKERNSEC
79923+ struct signal_struct *p, *set;
79924+ const struct inet_sock *inet = inet_sk(sk);
79925+
79926+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
79927+ return;
79928+
79929+ set = current->signal;
79930+
79931+ spin_lock_bh(&gr_conn_table_lock);
79932+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
79933+ inet->inet_dport, inet->inet_sport);
79934+ if (unlikely(p != NULL)) {
79935+ set->curr_ip = p->curr_ip;
79936+ set->used_accept = 1;
79937+ gr_del_task_from_ip_table_nolock(p);
79938+ spin_unlock_bh(&gr_conn_table_lock);
79939+ return;
79940+ }
79941+ spin_unlock_bh(&gr_conn_table_lock);
79942+
79943+ set->curr_ip = inet->inet_daddr;
79944+ set->used_accept = 1;
79945+#endif
79946+ return;
79947+}
79948+
79949+int
79950+gr_handle_sock_all(const int family, const int type, const int protocol)
79951+{
79952+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
79953+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
79954+ (family != AF_UNIX)) {
79955+ if (family == AF_INET)
79956+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
79957+ else
79958+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
79959+ return -EACCES;
79960+ }
79961+#endif
79962+ return 0;
79963+}
79964+
79965+int
79966+gr_handle_sock_server(const struct sockaddr *sck)
79967+{
79968+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
79969+ if (grsec_enable_socket_server &&
79970+ in_group_p(grsec_socket_server_gid) &&
79971+ sck && (sck->sa_family != AF_UNIX) &&
79972+ (sck->sa_family != AF_LOCAL)) {
79973+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
79974+ return -EACCES;
79975+ }
79976+#endif
79977+ return 0;
79978+}
79979+
79980+int
79981+gr_handle_sock_server_other(const struct sock *sck)
79982+{
79983+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
79984+ if (grsec_enable_socket_server &&
79985+ in_group_p(grsec_socket_server_gid) &&
79986+ sck && (sck->sk_family != AF_UNIX) &&
79987+ (sck->sk_family != AF_LOCAL)) {
79988+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
79989+ return -EACCES;
79990+ }
79991+#endif
79992+ return 0;
79993+}
79994+
79995+int
79996+gr_handle_sock_client(const struct sockaddr *sck)
79997+{
79998+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
79999+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
80000+ sck && (sck->sa_family != AF_UNIX) &&
80001+ (sck->sa_family != AF_LOCAL)) {
80002+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
80003+ return -EACCES;
80004+ }
80005+#endif
80006+ return 0;
80007+}
80008diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
80009new file mode 100644
80010index 0000000..8159888
80011--- /dev/null
80012+++ b/grsecurity/grsec_sysctl.c
80013@@ -0,0 +1,479 @@
80014+#include <linux/kernel.h>
80015+#include <linux/sched.h>
80016+#include <linux/sysctl.h>
80017+#include <linux/grsecurity.h>
80018+#include <linux/grinternal.h>
80019+
80020+int
80021+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
80022+{
80023+#ifdef CONFIG_GRKERNSEC_SYSCTL
80024+ if (dirname == NULL || name == NULL)
80025+ return 0;
80026+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
80027+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
80028+ return -EACCES;
80029+ }
80030+#endif
80031+ return 0;
80032+}
80033+
80034+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
80035+static int __maybe_unused __read_only one = 1;
80036+#endif
80037+
80038+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
80039+ defined(CONFIG_GRKERNSEC_DENYUSB)
80040+struct ctl_table grsecurity_table[] = {
80041+#ifdef CONFIG_GRKERNSEC_SYSCTL
80042+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
80043+#ifdef CONFIG_GRKERNSEC_IO
80044+ {
80045+ .procname = "disable_priv_io",
80046+ .data = &grsec_disable_privio,
80047+ .maxlen = sizeof(int),
80048+ .mode = 0600,
80049+ .proc_handler = &proc_dointvec,
80050+ },
80051+#endif
80052+#endif
80053+#ifdef CONFIG_GRKERNSEC_LINK
80054+ {
80055+ .procname = "linking_restrictions",
80056+ .data = &grsec_enable_link,
80057+ .maxlen = sizeof(int),
80058+ .mode = 0600,
80059+ .proc_handler = &proc_dointvec,
80060+ },
80061+#endif
80062+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
80063+ {
80064+ .procname = "enforce_symlinksifowner",
80065+ .data = &grsec_enable_symlinkown,
80066+ .maxlen = sizeof(int),
80067+ .mode = 0600,
80068+ .proc_handler = &proc_dointvec,
80069+ },
80070+ {
80071+ .procname = "symlinkown_gid",
80072+ .data = &grsec_symlinkown_gid,
80073+ .maxlen = sizeof(int),
80074+ .mode = 0600,
80075+ .proc_handler = &proc_dointvec,
80076+ },
80077+#endif
80078+#ifdef CONFIG_GRKERNSEC_BRUTE
80079+ {
80080+ .procname = "deter_bruteforce",
80081+ .data = &grsec_enable_brute,
80082+ .maxlen = sizeof(int),
80083+ .mode = 0600,
80084+ .proc_handler = &proc_dointvec,
80085+ },
80086+#endif
80087+#ifdef CONFIG_GRKERNSEC_FIFO
80088+ {
80089+ .procname = "fifo_restrictions",
80090+ .data = &grsec_enable_fifo,
80091+ .maxlen = sizeof(int),
80092+ .mode = 0600,
80093+ .proc_handler = &proc_dointvec,
80094+ },
80095+#endif
80096+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
80097+ {
80098+ .procname = "ptrace_readexec",
80099+ .data = &grsec_enable_ptrace_readexec,
80100+ .maxlen = sizeof(int),
80101+ .mode = 0600,
80102+ .proc_handler = &proc_dointvec,
80103+ },
80104+#endif
80105+#ifdef CONFIG_GRKERNSEC_SETXID
80106+ {
80107+ .procname = "consistent_setxid",
80108+ .data = &grsec_enable_setxid,
80109+ .maxlen = sizeof(int),
80110+ .mode = 0600,
80111+ .proc_handler = &proc_dointvec,
80112+ },
80113+#endif
80114+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80115+ {
80116+ .procname = "ip_blackhole",
80117+ .data = &grsec_enable_blackhole,
80118+ .maxlen = sizeof(int),
80119+ .mode = 0600,
80120+ .proc_handler = &proc_dointvec,
80121+ },
80122+ {
80123+ .procname = "lastack_retries",
80124+ .data = &grsec_lastack_retries,
80125+ .maxlen = sizeof(int),
80126+ .mode = 0600,
80127+ .proc_handler = &proc_dointvec,
80128+ },
80129+#endif
80130+#ifdef CONFIG_GRKERNSEC_EXECLOG
80131+ {
80132+ .procname = "exec_logging",
80133+ .data = &grsec_enable_execlog,
80134+ .maxlen = sizeof(int),
80135+ .mode = 0600,
80136+ .proc_handler = &proc_dointvec,
80137+ },
80138+#endif
80139+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
80140+ {
80141+ .procname = "rwxmap_logging",
80142+ .data = &grsec_enable_log_rwxmaps,
80143+ .maxlen = sizeof(int),
80144+ .mode = 0600,
80145+ .proc_handler = &proc_dointvec,
80146+ },
80147+#endif
80148+#ifdef CONFIG_GRKERNSEC_SIGNAL
80149+ {
80150+ .procname = "signal_logging",
80151+ .data = &grsec_enable_signal,
80152+ .maxlen = sizeof(int),
80153+ .mode = 0600,
80154+ .proc_handler = &proc_dointvec,
80155+ },
80156+#endif
80157+#ifdef CONFIG_GRKERNSEC_FORKFAIL
80158+ {
80159+ .procname = "forkfail_logging",
80160+ .data = &grsec_enable_forkfail,
80161+ .maxlen = sizeof(int),
80162+ .mode = 0600,
80163+ .proc_handler = &proc_dointvec,
80164+ },
80165+#endif
80166+#ifdef CONFIG_GRKERNSEC_TIME
80167+ {
80168+ .procname = "timechange_logging",
80169+ .data = &grsec_enable_time,
80170+ .maxlen = sizeof(int),
80171+ .mode = 0600,
80172+ .proc_handler = &proc_dointvec,
80173+ },
80174+#endif
80175+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
80176+ {
80177+ .procname = "chroot_deny_shmat",
80178+ .data = &grsec_enable_chroot_shmat,
80179+ .maxlen = sizeof(int),
80180+ .mode = 0600,
80181+ .proc_handler = &proc_dointvec,
80182+ },
80183+#endif
80184+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
80185+ {
80186+ .procname = "chroot_deny_unix",
80187+ .data = &grsec_enable_chroot_unix,
80188+ .maxlen = sizeof(int),
80189+ .mode = 0600,
80190+ .proc_handler = &proc_dointvec,
80191+ },
80192+#endif
80193+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
80194+ {
80195+ .procname = "chroot_deny_mount",
80196+ .data = &grsec_enable_chroot_mount,
80197+ .maxlen = sizeof(int),
80198+ .mode = 0600,
80199+ .proc_handler = &proc_dointvec,
80200+ },
80201+#endif
80202+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
80203+ {
80204+ .procname = "chroot_deny_fchdir",
80205+ .data = &grsec_enable_chroot_fchdir,
80206+ .maxlen = sizeof(int),
80207+ .mode = 0600,
80208+ .proc_handler = &proc_dointvec,
80209+ },
80210+#endif
80211+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
80212+ {
80213+ .procname = "chroot_deny_chroot",
80214+ .data = &grsec_enable_chroot_double,
80215+ .maxlen = sizeof(int),
80216+ .mode = 0600,
80217+ .proc_handler = &proc_dointvec,
80218+ },
80219+#endif
80220+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
80221+ {
80222+ .procname = "chroot_deny_pivot",
80223+ .data = &grsec_enable_chroot_pivot,
80224+ .maxlen = sizeof(int),
80225+ .mode = 0600,
80226+ .proc_handler = &proc_dointvec,
80227+ },
80228+#endif
80229+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
80230+ {
80231+ .procname = "chroot_enforce_chdir",
80232+ .data = &grsec_enable_chroot_chdir,
80233+ .maxlen = sizeof(int),
80234+ .mode = 0600,
80235+ .proc_handler = &proc_dointvec,
80236+ },
80237+#endif
80238+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
80239+ {
80240+ .procname = "chroot_deny_chmod",
80241+ .data = &grsec_enable_chroot_chmod,
80242+ .maxlen = sizeof(int),
80243+ .mode = 0600,
80244+ .proc_handler = &proc_dointvec,
80245+ },
80246+#endif
80247+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
80248+ {
80249+ .procname = "chroot_deny_mknod",
80250+ .data = &grsec_enable_chroot_mknod,
80251+ .maxlen = sizeof(int),
80252+ .mode = 0600,
80253+ .proc_handler = &proc_dointvec,
80254+ },
80255+#endif
80256+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
80257+ {
80258+ .procname = "chroot_restrict_nice",
80259+ .data = &grsec_enable_chroot_nice,
80260+ .maxlen = sizeof(int),
80261+ .mode = 0600,
80262+ .proc_handler = &proc_dointvec,
80263+ },
80264+#endif
80265+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
80266+ {
80267+ .procname = "chroot_execlog",
80268+ .data = &grsec_enable_chroot_execlog,
80269+ .maxlen = sizeof(int),
80270+ .mode = 0600,
80271+ .proc_handler = &proc_dointvec,
80272+ },
80273+#endif
80274+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
80275+ {
80276+ .procname = "chroot_caps",
80277+ .data = &grsec_enable_chroot_caps,
80278+ .maxlen = sizeof(int),
80279+ .mode = 0600,
80280+ .proc_handler = &proc_dointvec,
80281+ },
80282+#endif
80283+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
80284+ {
80285+ .procname = "chroot_deny_sysctl",
80286+ .data = &grsec_enable_chroot_sysctl,
80287+ .maxlen = sizeof(int),
80288+ .mode = 0600,
80289+ .proc_handler = &proc_dointvec,
80290+ },
80291+#endif
80292+#ifdef CONFIG_GRKERNSEC_TPE
80293+ {
80294+ .procname = "tpe",
80295+ .data = &grsec_enable_tpe,
80296+ .maxlen = sizeof(int),
80297+ .mode = 0600,
80298+ .proc_handler = &proc_dointvec,
80299+ },
80300+ {
80301+ .procname = "tpe_gid",
80302+ .data = &grsec_tpe_gid,
80303+ .maxlen = sizeof(int),
80304+ .mode = 0600,
80305+ .proc_handler = &proc_dointvec,
80306+ },
80307+#endif
80308+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
80309+ {
80310+ .procname = "tpe_invert",
80311+ .data = &grsec_enable_tpe_invert,
80312+ .maxlen = sizeof(int),
80313+ .mode = 0600,
80314+ .proc_handler = &proc_dointvec,
80315+ },
80316+#endif
80317+#ifdef CONFIG_GRKERNSEC_TPE_ALL
80318+ {
80319+ .procname = "tpe_restrict_all",
80320+ .data = &grsec_enable_tpe_all,
80321+ .maxlen = sizeof(int),
80322+ .mode = 0600,
80323+ .proc_handler = &proc_dointvec,
80324+ },
80325+#endif
80326+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
80327+ {
80328+ .procname = "socket_all",
80329+ .data = &grsec_enable_socket_all,
80330+ .maxlen = sizeof(int),
80331+ .mode = 0600,
80332+ .proc_handler = &proc_dointvec,
80333+ },
80334+ {
80335+ .procname = "socket_all_gid",
80336+ .data = &grsec_socket_all_gid,
80337+ .maxlen = sizeof(int),
80338+ .mode = 0600,
80339+ .proc_handler = &proc_dointvec,
80340+ },
80341+#endif
80342+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
80343+ {
80344+ .procname = "socket_client",
80345+ .data = &grsec_enable_socket_client,
80346+ .maxlen = sizeof(int),
80347+ .mode = 0600,
80348+ .proc_handler = &proc_dointvec,
80349+ },
80350+ {
80351+ .procname = "socket_client_gid",
80352+ .data = &grsec_socket_client_gid,
80353+ .maxlen = sizeof(int),
80354+ .mode = 0600,
80355+ .proc_handler = &proc_dointvec,
80356+ },
80357+#endif
80358+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
80359+ {
80360+ .procname = "socket_server",
80361+ .data = &grsec_enable_socket_server,
80362+ .maxlen = sizeof(int),
80363+ .mode = 0600,
80364+ .proc_handler = &proc_dointvec,
80365+ },
80366+ {
80367+ .procname = "socket_server_gid",
80368+ .data = &grsec_socket_server_gid,
80369+ .maxlen = sizeof(int),
80370+ .mode = 0600,
80371+ .proc_handler = &proc_dointvec,
80372+ },
80373+#endif
80374+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
80375+ {
80376+ .procname = "audit_group",
80377+ .data = &grsec_enable_group,
80378+ .maxlen = sizeof(int),
80379+ .mode = 0600,
80380+ .proc_handler = &proc_dointvec,
80381+ },
80382+ {
80383+ .procname = "audit_gid",
80384+ .data = &grsec_audit_gid,
80385+ .maxlen = sizeof(int),
80386+ .mode = 0600,
80387+ .proc_handler = &proc_dointvec,
80388+ },
80389+#endif
80390+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
80391+ {
80392+ .procname = "audit_chdir",
80393+ .data = &grsec_enable_chdir,
80394+ .maxlen = sizeof(int),
80395+ .mode = 0600,
80396+ .proc_handler = &proc_dointvec,
80397+ },
80398+#endif
80399+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
80400+ {
80401+ .procname = "audit_mount",
80402+ .data = &grsec_enable_mount,
80403+ .maxlen = sizeof(int),
80404+ .mode = 0600,
80405+ .proc_handler = &proc_dointvec,
80406+ },
80407+#endif
80408+#ifdef CONFIG_GRKERNSEC_DMESG
80409+ {
80410+ .procname = "dmesg",
80411+ .data = &grsec_enable_dmesg,
80412+ .maxlen = sizeof(int),
80413+ .mode = 0600,
80414+ .proc_handler = &proc_dointvec,
80415+ },
80416+#endif
80417+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
80418+ {
80419+ .procname = "chroot_findtask",
80420+ .data = &grsec_enable_chroot_findtask,
80421+ .maxlen = sizeof(int),
80422+ .mode = 0600,
80423+ .proc_handler = &proc_dointvec,
80424+ },
80425+#endif
80426+#ifdef CONFIG_GRKERNSEC_RESLOG
80427+ {
80428+ .procname = "resource_logging",
80429+ .data = &grsec_resource_logging,
80430+ .maxlen = sizeof(int),
80431+ .mode = 0600,
80432+ .proc_handler = &proc_dointvec,
80433+ },
80434+#endif
80435+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
80436+ {
80437+ .procname = "audit_ptrace",
80438+ .data = &grsec_enable_audit_ptrace,
80439+ .maxlen = sizeof(int),
80440+ .mode = 0600,
80441+ .proc_handler = &proc_dointvec,
80442+ },
80443+#endif
80444+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
80445+ {
80446+ .procname = "harden_ptrace",
80447+ .data = &grsec_enable_harden_ptrace,
80448+ .maxlen = sizeof(int),
80449+ .mode = 0600,
80450+ .proc_handler = &proc_dointvec,
80451+ },
80452+#endif
80453+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
80454+ {
80455+ .procname = "harden_ipc",
80456+ .data = &grsec_enable_harden_ipc,
80457+ .maxlen = sizeof(int),
80458+ .mode = 0600,
80459+ .proc_handler = &proc_dointvec,
80460+ },
80461+#endif
80462+ {
80463+ .procname = "grsec_lock",
80464+ .data = &grsec_lock,
80465+ .maxlen = sizeof(int),
80466+ .mode = 0600,
80467+ .proc_handler = &proc_dointvec,
80468+ },
80469+#endif
80470+#ifdef CONFIG_GRKERNSEC_ROFS
80471+ {
80472+ .procname = "romount_protect",
80473+ .data = &grsec_enable_rofs,
80474+ .maxlen = sizeof(int),
80475+ .mode = 0600,
80476+ .proc_handler = &proc_dointvec_minmax,
80477+ .extra1 = &one,
80478+ .extra2 = &one,
80479+ },
80480+#endif
80481+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
80482+ {
80483+ .procname = "deny_new_usb",
80484+ .data = &grsec_deny_new_usb,
80485+ .maxlen = sizeof(int),
80486+ .mode = 0600,
80487+ .proc_handler = &proc_dointvec,
80488+ },
80489+#endif
80490+ { }
80491+};
80492+#endif
80493diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
80494new file mode 100644
80495index 0000000..61b514e
80496--- /dev/null
80497+++ b/grsecurity/grsec_time.c
80498@@ -0,0 +1,16 @@
80499+#include <linux/kernel.h>
80500+#include <linux/sched.h>
80501+#include <linux/grinternal.h>
80502+#include <linux/module.h>
80503+
80504+void
80505+gr_log_timechange(void)
80506+{
80507+#ifdef CONFIG_GRKERNSEC_TIME
80508+ if (grsec_enable_time)
80509+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
80510+#endif
80511+ return;
80512+}
80513+
80514+EXPORT_SYMBOL_GPL(gr_log_timechange);
80515diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
80516new file mode 100644
80517index 0000000..d1953de
80518--- /dev/null
80519+++ b/grsecurity/grsec_tpe.c
80520@@ -0,0 +1,78 @@
80521+#include <linux/kernel.h>
80522+#include <linux/sched.h>
80523+#include <linux/file.h>
80524+#include <linux/fs.h>
80525+#include <linux/grinternal.h>
80526+
80527+extern int gr_acl_tpe_check(void);
80528+
80529+int
80530+gr_tpe_allow(const struct file *file)
80531+{
80532+#ifdef CONFIG_GRKERNSEC
80533+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
80534+ struct inode *file_inode = file->f_path.dentry->d_inode;
80535+ const struct cred *cred = current_cred();
80536+ char *msg = NULL;
80537+ char *msg2 = NULL;
80538+
80539+ // never restrict root
80540+ if (gr_is_global_root(cred->uid))
80541+ return 1;
80542+
80543+ if (grsec_enable_tpe) {
80544+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
80545+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
80546+ msg = "not being in trusted group";
80547+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
80548+ msg = "being in untrusted group";
80549+#else
80550+ if (in_group_p(grsec_tpe_gid))
80551+ msg = "being in untrusted group";
80552+#endif
80553+ }
80554+ if (!msg && gr_acl_tpe_check())
80555+ msg = "being in untrusted role";
80556+
80557+ // not in any affected group/role
80558+ if (!msg)
80559+ goto next_check;
80560+
80561+ if (gr_is_global_nonroot(inode->i_uid))
80562+ msg2 = "file in non-root-owned directory";
80563+ else if (inode->i_mode & S_IWOTH)
80564+ msg2 = "file in world-writable directory";
80565+ else if (inode->i_mode & S_IWGRP)
80566+ msg2 = "file in group-writable directory";
80567+ else if (file_inode->i_mode & S_IWOTH)
80568+ msg2 = "file is world-writable";
80569+
80570+ if (msg && msg2) {
80571+ char fullmsg[70] = {0};
80572+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
80573+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
80574+ return 0;
80575+ }
80576+ msg = NULL;
80577+next_check:
80578+#ifdef CONFIG_GRKERNSEC_TPE_ALL
80579+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
80580+ return 1;
80581+
80582+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
80583+ msg = "directory not owned by user";
80584+ else if (inode->i_mode & S_IWOTH)
80585+ msg = "file in world-writable directory";
80586+ else if (inode->i_mode & S_IWGRP)
80587+ msg = "file in group-writable directory";
80588+ else if (file_inode->i_mode & S_IWOTH)
80589+ msg = "file is world-writable";
80590+
80591+ if (msg) {
80592+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
80593+ return 0;
80594+ }
80595+#endif
80596+#endif
80597+ return 1;
80598+}
80599diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
80600new file mode 100644
80601index 0000000..ae02d8e
80602--- /dev/null
80603+++ b/grsecurity/grsec_usb.c
80604@@ -0,0 +1,15 @@
80605+#include <linux/kernel.h>
80606+#include <linux/grinternal.h>
80607+#include <linux/module.h>
80608+
80609+int gr_handle_new_usb(void)
80610+{
80611+#ifdef CONFIG_GRKERNSEC_DENYUSB
80612+ if (grsec_deny_new_usb) {
80613+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
80614+ return 1;
80615+ }
80616+#endif
80617+ return 0;
80618+}
80619+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
80620diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
80621new file mode 100644
80622index 0000000..158b330
80623--- /dev/null
80624+++ b/grsecurity/grsum.c
80625@@ -0,0 +1,64 @@
80626+#include <linux/err.h>
80627+#include <linux/kernel.h>
80628+#include <linux/sched.h>
80629+#include <linux/mm.h>
80630+#include <linux/scatterlist.h>
80631+#include <linux/crypto.h>
80632+#include <linux/gracl.h>
80633+
80634+
80635+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
80636+#error "crypto and sha256 must be built into the kernel"
80637+#endif
80638+
80639+int
80640+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
80641+{
80642+ struct crypto_hash *tfm;
80643+ struct hash_desc desc;
80644+ struct scatterlist sg[2];
80645+ unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long))));
80646+ unsigned long *tmpsumptr = (unsigned long *)temp_sum;
80647+ unsigned long *sumptr = (unsigned long *)sum;
80648+ int cryptres;
80649+ int retval = 1;
80650+ volatile int mismatched = 0;
80651+ volatile int dummy = 0;
80652+ unsigned int i;
80653+
80654+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
80655+ if (IS_ERR(tfm)) {
80656+ /* should never happen, since sha256 should be built in */
80657+ memset(entry->pw, 0, GR_PW_LEN);
80658+ return 1;
80659+ }
80660+
80661+ sg_init_table(sg, 2);
80662+ sg_set_buf(&sg[0], salt, GR_SALT_LEN);
80663+ sg_set_buf(&sg[1], entry->pw, strlen(entry->pw));
80664+
80665+ desc.tfm = tfm;
80666+ desc.flags = 0;
80667+
80668+ cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw),
80669+ temp_sum);
80670+
80671+ memset(entry->pw, 0, GR_PW_LEN);
80672+
80673+ if (cryptres)
80674+ goto out;
80675+
80676+ for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++)
80677+ if (sumptr[i] != tmpsumptr[i])
80678+ mismatched = 1;
80679+ else
80680+ dummy = 1; // waste a cycle
80681+
80682+ if (!mismatched)
80683+ retval = dummy - 1;
80684+
80685+out:
80686+ crypto_free_hash(tfm);
80687+
80688+ return retval;
80689+}
80690diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
80691index 77ff547..181834f 100644
80692--- a/include/asm-generic/4level-fixup.h
80693+++ b/include/asm-generic/4level-fixup.h
80694@@ -13,8 +13,10 @@
80695 #define pmd_alloc(mm, pud, address) \
80696 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
80697 NULL: pmd_offset(pud, address))
80698+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
80699
80700 #define pud_alloc(mm, pgd, address) (pgd)
80701+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
80702 #define pud_offset(pgd, start) (pgd)
80703 #define pud_none(pud) 0
80704 #define pud_bad(pud) 0
80705diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
80706index b7babf0..97f4c4f 100644
80707--- a/include/asm-generic/atomic-long.h
80708+++ b/include/asm-generic/atomic-long.h
80709@@ -22,6 +22,12 @@
80710
80711 typedef atomic64_t atomic_long_t;
80712
80713+#ifdef CONFIG_PAX_REFCOUNT
80714+typedef atomic64_unchecked_t atomic_long_unchecked_t;
80715+#else
80716+typedef atomic64_t atomic_long_unchecked_t;
80717+#endif
80718+
80719 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
80720
80721 static inline long atomic_long_read(atomic_long_t *l)
80722@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
80723 return (long)atomic64_read(v);
80724 }
80725
80726+#ifdef CONFIG_PAX_REFCOUNT
80727+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
80728+{
80729+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80730+
80731+ return (long)atomic64_read_unchecked(v);
80732+}
80733+#endif
80734+
80735 static inline void atomic_long_set(atomic_long_t *l, long i)
80736 {
80737 atomic64_t *v = (atomic64_t *)l;
80738@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
80739 atomic64_set(v, i);
80740 }
80741
80742+#ifdef CONFIG_PAX_REFCOUNT
80743+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
80744+{
80745+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80746+
80747+ atomic64_set_unchecked(v, i);
80748+}
80749+#endif
80750+
80751 static inline void atomic_long_inc(atomic_long_t *l)
80752 {
80753 atomic64_t *v = (atomic64_t *)l;
80754@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
80755 atomic64_inc(v);
80756 }
80757
80758+#ifdef CONFIG_PAX_REFCOUNT
80759+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
80760+{
80761+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80762+
80763+ atomic64_inc_unchecked(v);
80764+}
80765+#endif
80766+
80767 static inline void atomic_long_dec(atomic_long_t *l)
80768 {
80769 atomic64_t *v = (atomic64_t *)l;
80770@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
80771 atomic64_dec(v);
80772 }
80773
80774+#ifdef CONFIG_PAX_REFCOUNT
80775+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
80776+{
80777+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80778+
80779+ atomic64_dec_unchecked(v);
80780+}
80781+#endif
80782+
80783 static inline void atomic_long_add(long i, atomic_long_t *l)
80784 {
80785 atomic64_t *v = (atomic64_t *)l;
80786@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
80787 atomic64_add(i, v);
80788 }
80789
80790+#ifdef CONFIG_PAX_REFCOUNT
80791+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
80792+{
80793+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80794+
80795+ atomic64_add_unchecked(i, v);
80796+}
80797+#endif
80798+
80799 static inline void atomic_long_sub(long i, atomic_long_t *l)
80800 {
80801 atomic64_t *v = (atomic64_t *)l;
80802@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
80803 atomic64_sub(i, v);
80804 }
80805
80806+#ifdef CONFIG_PAX_REFCOUNT
80807+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
80808+{
80809+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80810+
80811+ atomic64_sub_unchecked(i, v);
80812+}
80813+#endif
80814+
80815 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
80816 {
80817 atomic64_t *v = (atomic64_t *)l;
80818@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
80819 return atomic64_add_negative(i, v);
80820 }
80821
80822-static inline long atomic_long_add_return(long i, atomic_long_t *l)
80823+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
80824 {
80825 atomic64_t *v = (atomic64_t *)l;
80826
80827 return (long)atomic64_add_return(i, v);
80828 }
80829
80830+#ifdef CONFIG_PAX_REFCOUNT
80831+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
80832+{
80833+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80834+
80835+ return (long)atomic64_add_return_unchecked(i, v);
80836+}
80837+#endif
80838+
80839 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
80840 {
80841 atomic64_t *v = (atomic64_t *)l;
80842@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
80843 return (long)atomic64_inc_return(v);
80844 }
80845
80846+#ifdef CONFIG_PAX_REFCOUNT
80847+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
80848+{
80849+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80850+
80851+ return (long)atomic64_inc_return_unchecked(v);
80852+}
80853+#endif
80854+
80855 static inline long atomic_long_dec_return(atomic_long_t *l)
80856 {
80857 atomic64_t *v = (atomic64_t *)l;
80858@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
80859
80860 typedef atomic_t atomic_long_t;
80861
80862+#ifdef CONFIG_PAX_REFCOUNT
80863+typedef atomic_unchecked_t atomic_long_unchecked_t;
80864+#else
80865+typedef atomic_t atomic_long_unchecked_t;
80866+#endif
80867+
80868 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
80869 static inline long atomic_long_read(atomic_long_t *l)
80870 {
80871@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
80872 return (long)atomic_read(v);
80873 }
80874
80875+#ifdef CONFIG_PAX_REFCOUNT
80876+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
80877+{
80878+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80879+
80880+ return (long)atomic_read_unchecked(v);
80881+}
80882+#endif
80883+
80884 static inline void atomic_long_set(atomic_long_t *l, long i)
80885 {
80886 atomic_t *v = (atomic_t *)l;
80887@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
80888 atomic_set(v, i);
80889 }
80890
80891+#ifdef CONFIG_PAX_REFCOUNT
80892+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
80893+{
80894+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80895+
80896+ atomic_set_unchecked(v, i);
80897+}
80898+#endif
80899+
80900 static inline void atomic_long_inc(atomic_long_t *l)
80901 {
80902 atomic_t *v = (atomic_t *)l;
80903@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
80904 atomic_inc(v);
80905 }
80906
80907+#ifdef CONFIG_PAX_REFCOUNT
80908+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
80909+{
80910+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80911+
80912+ atomic_inc_unchecked(v);
80913+}
80914+#endif
80915+
80916 static inline void atomic_long_dec(atomic_long_t *l)
80917 {
80918 atomic_t *v = (atomic_t *)l;
80919@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
80920 atomic_dec(v);
80921 }
80922
80923+#ifdef CONFIG_PAX_REFCOUNT
80924+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
80925+{
80926+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80927+
80928+ atomic_dec_unchecked(v);
80929+}
80930+#endif
80931+
80932 static inline void atomic_long_add(long i, atomic_long_t *l)
80933 {
80934 atomic_t *v = (atomic_t *)l;
80935@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
80936 atomic_add(i, v);
80937 }
80938
80939+#ifdef CONFIG_PAX_REFCOUNT
80940+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
80941+{
80942+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80943+
80944+ atomic_add_unchecked(i, v);
80945+}
80946+#endif
80947+
80948 static inline void atomic_long_sub(long i, atomic_long_t *l)
80949 {
80950 atomic_t *v = (atomic_t *)l;
80951@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
80952 atomic_sub(i, v);
80953 }
80954
80955+#ifdef CONFIG_PAX_REFCOUNT
80956+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
80957+{
80958+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80959+
80960+ atomic_sub_unchecked(i, v);
80961+}
80962+#endif
80963+
80964 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
80965 {
80966 atomic_t *v = (atomic_t *)l;
80967@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
80968 return (long)atomic_add_return(i, v);
80969 }
80970
80971+#ifdef CONFIG_PAX_REFCOUNT
80972+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
80973+{
80974+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80975+
80976+ return (long)atomic_add_return_unchecked(i, v);
80977+}
80978+
80979+#endif
80980+
80981 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
80982 {
80983 atomic_t *v = (atomic_t *)l;
80984@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
80985 return (long)atomic_inc_return(v);
80986 }
80987
80988+#ifdef CONFIG_PAX_REFCOUNT
80989+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
80990+{
80991+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80992+
80993+ return (long)atomic_inc_return_unchecked(v);
80994+}
80995+#endif
80996+
80997 static inline long atomic_long_dec_return(atomic_long_t *l)
80998 {
80999 atomic_t *v = (atomic_t *)l;
81000@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
81001
81002 #endif /* BITS_PER_LONG == 64 */
81003
81004+#ifdef CONFIG_PAX_REFCOUNT
81005+static inline void pax_refcount_needs_these_functions(void)
81006+{
81007+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
81008+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
81009+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
81010+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
81011+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
81012+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
81013+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
81014+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
81015+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
81016+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
81017+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
81018+#ifdef CONFIG_X86
81019+ atomic_clear_mask_unchecked(0, NULL);
81020+ atomic_set_mask_unchecked(0, NULL);
81021+#endif
81022+
81023+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
81024+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
81025+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
81026+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
81027+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
81028+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
81029+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
81030+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
81031+}
81032+#else
81033+#define atomic_read_unchecked(v) atomic_read(v)
81034+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
81035+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
81036+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
81037+#define atomic_inc_unchecked(v) atomic_inc(v)
81038+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
81039+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
81040+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
81041+#define atomic_dec_unchecked(v) atomic_dec(v)
81042+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
81043+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
81044+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
81045+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
81046+
81047+#define atomic_long_read_unchecked(v) atomic_long_read(v)
81048+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
81049+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
81050+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
81051+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
81052+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
81053+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
81054+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
81055+#endif
81056+
81057 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
81058diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
81059index 9c79e76..9f7827d 100644
81060--- a/include/asm-generic/atomic.h
81061+++ b/include/asm-generic/atomic.h
81062@@ -154,7 +154,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
81063 * Atomically clears the bits set in @mask from @v
81064 */
81065 #ifndef atomic_clear_mask
81066-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
81067+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
81068 {
81069 unsigned long flags;
81070
81071diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
81072index b18ce4f..2ee2843 100644
81073--- a/include/asm-generic/atomic64.h
81074+++ b/include/asm-generic/atomic64.h
81075@@ -16,6 +16,8 @@ typedef struct {
81076 long long counter;
81077 } atomic64_t;
81078
81079+typedef atomic64_t atomic64_unchecked_t;
81080+
81081 #define ATOMIC64_INIT(i) { (i) }
81082
81083 extern long long atomic64_read(const atomic64_t *v);
81084@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
81085 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
81086 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
81087
81088+#define atomic64_read_unchecked(v) atomic64_read(v)
81089+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
81090+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
81091+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
81092+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
81093+#define atomic64_inc_unchecked(v) atomic64_inc(v)
81094+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
81095+#define atomic64_dec_unchecked(v) atomic64_dec(v)
81096+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
81097+
81098 #endif /* _ASM_GENERIC_ATOMIC64_H */
81099diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
81100index 1402fa8..025a736 100644
81101--- a/include/asm-generic/barrier.h
81102+++ b/include/asm-generic/barrier.h
81103@@ -74,7 +74,7 @@
81104 do { \
81105 compiletime_assert_atomic_type(*p); \
81106 smp_mb(); \
81107- ACCESS_ONCE(*p) = (v); \
81108+ ACCESS_ONCE_RW(*p) = (v); \
81109 } while (0)
81110
81111 #define smp_load_acquire(p) \
81112diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
81113index a60a7cc..0fe12f2 100644
81114--- a/include/asm-generic/bitops/__fls.h
81115+++ b/include/asm-generic/bitops/__fls.h
81116@@ -9,7 +9,7 @@
81117 *
81118 * Undefined if no set bit exists, so code should check against 0 first.
81119 */
81120-static __always_inline unsigned long __fls(unsigned long word)
81121+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
81122 {
81123 int num = BITS_PER_LONG - 1;
81124
81125diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
81126index 0576d1f..dad6c71 100644
81127--- a/include/asm-generic/bitops/fls.h
81128+++ b/include/asm-generic/bitops/fls.h
81129@@ -9,7 +9,7 @@
81130 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
81131 */
81132
81133-static __always_inline int fls(int x)
81134+static __always_inline int __intentional_overflow(-1) fls(int x)
81135 {
81136 int r = 32;
81137
81138diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
81139index b097cf8..3d40e14 100644
81140--- a/include/asm-generic/bitops/fls64.h
81141+++ b/include/asm-generic/bitops/fls64.h
81142@@ -15,7 +15,7 @@
81143 * at position 64.
81144 */
81145 #if BITS_PER_LONG == 32
81146-static __always_inline int fls64(__u64 x)
81147+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
81148 {
81149 __u32 h = x >> 32;
81150 if (h)
81151@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
81152 return fls(x);
81153 }
81154 #elif BITS_PER_LONG == 64
81155-static __always_inline int fls64(__u64 x)
81156+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
81157 {
81158 if (x == 0)
81159 return 0;
81160diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
81161index 1bfcfe5..e04c5c9 100644
81162--- a/include/asm-generic/cache.h
81163+++ b/include/asm-generic/cache.h
81164@@ -6,7 +6,7 @@
81165 * cache lines need to provide their own cache.h.
81166 */
81167
81168-#define L1_CACHE_SHIFT 5
81169-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
81170+#define L1_CACHE_SHIFT 5UL
81171+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
81172
81173 #endif /* __ASM_GENERIC_CACHE_H */
81174diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
81175index 0d68a1e..b74a761 100644
81176--- a/include/asm-generic/emergency-restart.h
81177+++ b/include/asm-generic/emergency-restart.h
81178@@ -1,7 +1,7 @@
81179 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
81180 #define _ASM_GENERIC_EMERGENCY_RESTART_H
81181
81182-static inline void machine_emergency_restart(void)
81183+static inline __noreturn void machine_emergency_restart(void)
81184 {
81185 machine_restart(NULL);
81186 }
81187diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
81188index 975e1cc..0b8a083 100644
81189--- a/include/asm-generic/io.h
81190+++ b/include/asm-generic/io.h
81191@@ -289,7 +289,7 @@ static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
81192 * These are pretty trivial
81193 */
81194 #ifndef virt_to_phys
81195-static inline unsigned long virt_to_phys(volatile void *address)
81196+static inline unsigned long __intentional_overflow(-1) virt_to_phys(volatile void *address)
81197 {
81198 return __pa((unsigned long)address);
81199 }
81200diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
81201index 90f99c7..00ce236 100644
81202--- a/include/asm-generic/kmap_types.h
81203+++ b/include/asm-generic/kmap_types.h
81204@@ -2,9 +2,9 @@
81205 #define _ASM_GENERIC_KMAP_TYPES_H
81206
81207 #ifdef __WITH_KM_FENCE
81208-# define KM_TYPE_NR 41
81209+# define KM_TYPE_NR 42
81210 #else
81211-# define KM_TYPE_NR 20
81212+# define KM_TYPE_NR 21
81213 #endif
81214
81215 #endif
81216diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
81217index 9ceb03b..62b0b8f 100644
81218--- a/include/asm-generic/local.h
81219+++ b/include/asm-generic/local.h
81220@@ -23,24 +23,37 @@ typedef struct
81221 atomic_long_t a;
81222 } local_t;
81223
81224+typedef struct {
81225+ atomic_long_unchecked_t a;
81226+} local_unchecked_t;
81227+
81228 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
81229
81230 #define local_read(l) atomic_long_read(&(l)->a)
81231+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
81232 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
81233+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
81234 #define local_inc(l) atomic_long_inc(&(l)->a)
81235+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
81236 #define local_dec(l) atomic_long_dec(&(l)->a)
81237+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
81238 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
81239+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
81240 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
81241+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
81242
81243 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
81244 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
81245 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
81246 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
81247 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
81248+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
81249 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
81250 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
81251+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
81252
81253 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
81254+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
81255 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
81256 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
81257 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
81258diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
81259index 725612b..9cc513a 100644
81260--- a/include/asm-generic/pgtable-nopmd.h
81261+++ b/include/asm-generic/pgtable-nopmd.h
81262@@ -1,14 +1,19 @@
81263 #ifndef _PGTABLE_NOPMD_H
81264 #define _PGTABLE_NOPMD_H
81265
81266-#ifndef __ASSEMBLY__
81267-
81268 #include <asm-generic/pgtable-nopud.h>
81269
81270-struct mm_struct;
81271-
81272 #define __PAGETABLE_PMD_FOLDED
81273
81274+#define PMD_SHIFT PUD_SHIFT
81275+#define PTRS_PER_PMD 1
81276+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
81277+#define PMD_MASK (~(PMD_SIZE-1))
81278+
81279+#ifndef __ASSEMBLY__
81280+
81281+struct mm_struct;
81282+
81283 /*
81284 * Having the pmd type consist of a pud gets the size right, and allows
81285 * us to conceptually access the pud entry that this pmd is folded into
81286@@ -16,11 +21,6 @@ struct mm_struct;
81287 */
81288 typedef struct { pud_t pud; } pmd_t;
81289
81290-#define PMD_SHIFT PUD_SHIFT
81291-#define PTRS_PER_PMD 1
81292-#define PMD_SIZE (1UL << PMD_SHIFT)
81293-#define PMD_MASK (~(PMD_SIZE-1))
81294-
81295 /*
81296 * The "pud_xxx()" functions here are trivial for a folded two-level
81297 * setup: the pmd is never bad, and a pmd always exists (as it's folded
81298diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
81299index 810431d..0ec4804f 100644
81300--- a/include/asm-generic/pgtable-nopud.h
81301+++ b/include/asm-generic/pgtable-nopud.h
81302@@ -1,10 +1,15 @@
81303 #ifndef _PGTABLE_NOPUD_H
81304 #define _PGTABLE_NOPUD_H
81305
81306-#ifndef __ASSEMBLY__
81307-
81308 #define __PAGETABLE_PUD_FOLDED
81309
81310+#define PUD_SHIFT PGDIR_SHIFT
81311+#define PTRS_PER_PUD 1
81312+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
81313+#define PUD_MASK (~(PUD_SIZE-1))
81314+
81315+#ifndef __ASSEMBLY__
81316+
81317 /*
81318 * Having the pud type consist of a pgd gets the size right, and allows
81319 * us to conceptually access the pgd entry that this pud is folded into
81320@@ -12,11 +17,6 @@
81321 */
81322 typedef struct { pgd_t pgd; } pud_t;
81323
81324-#define PUD_SHIFT PGDIR_SHIFT
81325-#define PTRS_PER_PUD 1
81326-#define PUD_SIZE (1UL << PUD_SHIFT)
81327-#define PUD_MASK (~(PUD_SIZE-1))
81328-
81329 /*
81330 * The "pgd_xxx()" functions here are trivial for a folded two-level
81331 * setup: the pud is never bad, and a pud always exists (as it's folded
81332@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
81333 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
81334
81335 #define pgd_populate(mm, pgd, pud) do { } while (0)
81336+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
81337 /*
81338 * (puds are folded into pgds so this doesn't get actually called,
81339 * but the define is needed for a generic inline function.)
81340diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
81341index 53b2acc..f4568e7 100644
81342--- a/include/asm-generic/pgtable.h
81343+++ b/include/asm-generic/pgtable.h
81344@@ -819,6 +819,22 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
81345 }
81346 #endif /* CONFIG_NUMA_BALANCING */
81347
81348+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
81349+#ifdef CONFIG_PAX_KERNEXEC
81350+#error KERNEXEC requires pax_open_kernel
81351+#else
81352+static inline unsigned long pax_open_kernel(void) { return 0; }
81353+#endif
81354+#endif
81355+
81356+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
81357+#ifdef CONFIG_PAX_KERNEXEC
81358+#error KERNEXEC requires pax_close_kernel
81359+#else
81360+static inline unsigned long pax_close_kernel(void) { return 0; }
81361+#endif
81362+#endif
81363+
81364 #endif /* CONFIG_MMU */
81365
81366 #endif /* !__ASSEMBLY__ */
81367diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
81368index 72d8803..cb9749c 100644
81369--- a/include/asm-generic/uaccess.h
81370+++ b/include/asm-generic/uaccess.h
81371@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
81372 return __clear_user(to, n);
81373 }
81374
81375+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
81376+#ifdef CONFIG_PAX_MEMORY_UDEREF
81377+#error UDEREF requires pax_open_userland
81378+#else
81379+static inline unsigned long pax_open_userland(void) { return 0; }
81380+#endif
81381+#endif
81382+
81383+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
81384+#ifdef CONFIG_PAX_MEMORY_UDEREF
81385+#error UDEREF requires pax_close_userland
81386+#else
81387+static inline unsigned long pax_close_userland(void) { return 0; }
81388+#endif
81389+#endif
81390+
81391 #endif /* __ASM_GENERIC_UACCESS_H */
81392diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
81393index c1c0b0c..05c9588 100644
81394--- a/include/asm-generic/vmlinux.lds.h
81395+++ b/include/asm-generic/vmlinux.lds.h
81396@@ -231,6 +231,7 @@
81397 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
81398 VMLINUX_SYMBOL(__start_rodata) = .; \
81399 *(.rodata) *(.rodata.*) \
81400+ *(.data..read_only) \
81401 *(__vermagic) /* Kernel version magic */ \
81402 . = ALIGN(8); \
81403 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
81404@@ -719,17 +720,18 @@
81405 * section in the linker script will go there too. @phdr should have
81406 * a leading colon.
81407 *
81408- * Note that this macros defines __per_cpu_load as an absolute symbol.
81409+ * Note that this macros defines per_cpu_load as an absolute symbol.
81410 * If there is no need to put the percpu section at a predetermined
81411 * address, use PERCPU_SECTION.
81412 */
81413 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
81414- VMLINUX_SYMBOL(__per_cpu_load) = .; \
81415- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
81416+ per_cpu_load = .; \
81417+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
81418 - LOAD_OFFSET) { \
81419+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
81420 PERCPU_INPUT(cacheline) \
81421 } phdr \
81422- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
81423+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
81424
81425 /**
81426 * PERCPU_SECTION - define output section for percpu area, simple version
81427diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
81428index 016c2f1..c4baa98 100644
81429--- a/include/crypto/algapi.h
81430+++ b/include/crypto/algapi.h
81431@@ -34,7 +34,7 @@ struct crypto_type {
81432 unsigned int maskclear;
81433 unsigned int maskset;
81434 unsigned int tfmsize;
81435-};
81436+} __do_const;
81437
81438 struct crypto_instance {
81439 struct crypto_alg alg;
81440diff --git a/include/drm/drmP.h b/include/drm/drmP.h
81441index 8af71a8..7fe6c19 100644
81442--- a/include/drm/drmP.h
81443+++ b/include/drm/drmP.h
81444@@ -68,6 +68,7 @@
81445 #include <linux/workqueue.h>
81446 #include <linux/poll.h>
81447 #include <asm/pgalloc.h>
81448+#include <asm/local.h>
81449 #include <drm/drm.h>
81450 #include <drm/drm_sarea.h>
81451 #include <drm/drm_vma_manager.h>
81452@@ -261,10 +262,12 @@ do { \
81453 * \param cmd command.
81454 * \param arg argument.
81455 */
81456-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
81457+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
81458+ struct drm_file *file_priv);
81459+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
81460 struct drm_file *file_priv);
81461
81462-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
81463+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
81464 unsigned long arg);
81465
81466 #define DRM_IOCTL_NR(n) _IOC_NR(n)
81467@@ -280,10 +283,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
81468 struct drm_ioctl_desc {
81469 unsigned int cmd;
81470 int flags;
81471- drm_ioctl_t *func;
81472+ drm_ioctl_t func;
81473 unsigned int cmd_drv;
81474 const char *name;
81475-};
81476+} __do_const;
81477
81478 /**
81479 * Creates a driver or general drm_ioctl_desc array entry for the given
81480@@ -983,7 +986,8 @@ struct drm_info_list {
81481 int (*show)(struct seq_file*, void*); /** show callback */
81482 u32 driver_features; /**< Required driver features for this entry */
81483 void *data;
81484-};
81485+} __do_const;
81486+typedef struct drm_info_list __no_const drm_info_list_no_const;
81487
81488 /**
81489 * debugfs node structure. This structure represents a debugfs file.
81490@@ -1067,7 +1071,7 @@ struct drm_device {
81491
81492 /** \name Usage Counters */
81493 /*@{ */
81494- int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
81495+ local_t open_count; /**< Outstanding files open, protected by drm_global_mutex. */
81496 spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
81497 int buf_use; /**< Buffers in use -- cannot alloc */
81498 atomic_t buf_alloc; /**< Buffer allocation in progress */
81499diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
81500index a3d75fe..6802f9c 100644
81501--- a/include/drm/drm_crtc_helper.h
81502+++ b/include/drm/drm_crtc_helper.h
81503@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
81504 struct drm_connector *connector);
81505 /* disable encoder when not in use - more explicit than dpms off */
81506 void (*disable)(struct drm_encoder *encoder);
81507-};
81508+} __no_const;
81509
81510 /**
81511 * drm_connector_helper_funcs - helper operations for connectors
81512diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
81513index a70d456..6ea07cd 100644
81514--- a/include/drm/i915_pciids.h
81515+++ b/include/drm/i915_pciids.h
81516@@ -37,7 +37,7 @@
81517 */
81518 #define INTEL_VGA_DEVICE(id, info) { \
81519 0x8086, id, \
81520- ~0, ~0, \
81521+ PCI_ANY_ID, PCI_ANY_ID, \
81522 0x030000, 0xff0000, \
81523 (unsigned long) info }
81524
81525diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
81526index 72dcbe8..8db58d7 100644
81527--- a/include/drm/ttm/ttm_memory.h
81528+++ b/include/drm/ttm/ttm_memory.h
81529@@ -48,7 +48,7 @@
81530
81531 struct ttm_mem_shrink {
81532 int (*do_shrink) (struct ttm_mem_shrink *);
81533-};
81534+} __no_const;
81535
81536 /**
81537 * struct ttm_mem_global - Global memory accounting structure.
81538diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
81539index 49a8284..9643967 100644
81540--- a/include/drm/ttm/ttm_page_alloc.h
81541+++ b/include/drm/ttm/ttm_page_alloc.h
81542@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
81543 */
81544 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
81545
81546+struct device;
81547 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
81548 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
81549
81550diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
81551index 4b840e8..155d235 100644
81552--- a/include/keys/asymmetric-subtype.h
81553+++ b/include/keys/asymmetric-subtype.h
81554@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
81555 /* Verify the signature on a key of this subtype (optional) */
81556 int (*verify_signature)(const struct key *key,
81557 const struct public_key_signature *sig);
81558-};
81559+} __do_const;
81560
81561 /**
81562 * asymmetric_key_subtype - Get the subtype from an asymmetric key
81563diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
81564index c1da539..1dcec55 100644
81565--- a/include/linux/atmdev.h
81566+++ b/include/linux/atmdev.h
81567@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
81568 #endif
81569
81570 struct k_atm_aal_stats {
81571-#define __HANDLE_ITEM(i) atomic_t i
81572+#define __HANDLE_ITEM(i) atomic_unchecked_t i
81573 __AAL_STAT_ITEMS
81574 #undef __HANDLE_ITEM
81575 };
81576@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
81577 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
81578 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
81579 struct module *owner;
81580-};
81581+} __do_const ;
81582
81583 struct atmphy_ops {
81584 int (*start)(struct atm_dev *dev);
81585diff --git a/include/linux/audit.h b/include/linux/audit.h
81586index 22cfddb..ab759e8 100644
81587--- a/include/linux/audit.h
81588+++ b/include/linux/audit.h
81589@@ -210,7 +210,7 @@ static inline void audit_ptrace(struct task_struct *t)
81590 extern unsigned int audit_serial(void);
81591 extern int auditsc_get_stamp(struct audit_context *ctx,
81592 struct timespec *t, unsigned int *serial);
81593-extern int audit_set_loginuid(kuid_t loginuid);
81594+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
81595
81596 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
81597 {
81598diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
81599index 61f29e5..e67c658 100644
81600--- a/include/linux/binfmts.h
81601+++ b/include/linux/binfmts.h
81602@@ -44,7 +44,7 @@ struct linux_binprm {
81603 unsigned interp_flags;
81604 unsigned interp_data;
81605 unsigned long loader, exec;
81606-};
81607+} __randomize_layout;
81608
81609 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
81610 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
81611@@ -73,8 +73,10 @@ struct linux_binfmt {
81612 int (*load_binary)(struct linux_binprm *);
81613 int (*load_shlib)(struct file *);
81614 int (*core_dump)(struct coredump_params *cprm);
81615+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
81616+ void (*handle_mmap)(struct file *);
81617 unsigned long min_coredump; /* minimal dump size */
81618-};
81619+} __do_const __randomize_layout;
81620
81621 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
81622
81623diff --git a/include/linux/bitops.h b/include/linux/bitops.h
81624index cbc5833..8123ebc 100644
81625--- a/include/linux/bitops.h
81626+++ b/include/linux/bitops.h
81627@@ -122,7 +122,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
81628 * @word: value to rotate
81629 * @shift: bits to roll
81630 */
81631-static inline __u32 rol32(__u32 word, unsigned int shift)
81632+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
81633 {
81634 return (word << shift) | (word >> (32 - shift));
81635 }
81636@@ -132,7 +132,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
81637 * @word: value to rotate
81638 * @shift: bits to roll
81639 */
81640-static inline __u32 ror32(__u32 word, unsigned int shift)
81641+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
81642 {
81643 return (word >> shift) | (word << (32 - shift));
81644 }
81645@@ -188,7 +188,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
81646 return (__s32)(value << shift) >> shift;
81647 }
81648
81649-static inline unsigned fls_long(unsigned long l)
81650+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
81651 {
81652 if (sizeof(l) == 4)
81653 return fls(l);
81654diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
81655index 8699bcf..279485d 100644
81656--- a/include/linux/blkdev.h
81657+++ b/include/linux/blkdev.h
81658@@ -1625,7 +1625,7 @@ struct block_device_operations {
81659 /* this callback is with swap_lock and sometimes page table lock held */
81660 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
81661 struct module *owner;
81662-};
81663+} __do_const;
81664
81665 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
81666 unsigned long);
81667diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
81668index afc1343..9735539 100644
81669--- a/include/linux/blktrace_api.h
81670+++ b/include/linux/blktrace_api.h
81671@@ -25,7 +25,7 @@ struct blk_trace {
81672 struct dentry *dropped_file;
81673 struct dentry *msg_file;
81674 struct list_head running_list;
81675- atomic_t dropped;
81676+ atomic_unchecked_t dropped;
81677 };
81678
81679 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
81680diff --git a/include/linux/cache.h b/include/linux/cache.h
81681index 17e7e82..1d7da26 100644
81682--- a/include/linux/cache.h
81683+++ b/include/linux/cache.h
81684@@ -16,6 +16,14 @@
81685 #define __read_mostly
81686 #endif
81687
81688+#ifndef __read_only
81689+#ifdef CONFIG_PAX_KERNEXEC
81690+#error KERNEXEC requires __read_only
81691+#else
81692+#define __read_only __read_mostly
81693+#endif
81694+#endif
81695+
81696 #ifndef ____cacheline_aligned
81697 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
81698 #endif
81699diff --git a/include/linux/capability.h b/include/linux/capability.h
81700index 84b13ad..172cdee 100644
81701--- a/include/linux/capability.h
81702+++ b/include/linux/capability.h
81703@@ -211,9 +211,14 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
81704 extern bool capable(int cap);
81705 extern bool ns_capable(struct user_namespace *ns, int cap);
81706 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
81707+extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap);
81708 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
81709+extern bool capable_nolog(int cap);
81710+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
81711
81712 /* audit system wants to get cap info from files as well */
81713 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
81714
81715+extern int is_privileged_binary(const struct dentry *dentry);
81716+
81717 #endif /* !_LINUX_CAPABILITY_H */
81718diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
81719index 8609d57..86e4d79 100644
81720--- a/include/linux/cdrom.h
81721+++ b/include/linux/cdrom.h
81722@@ -87,7 +87,6 @@ struct cdrom_device_ops {
81723
81724 /* driver specifications */
81725 const int capability; /* capability flags */
81726- int n_minors; /* number of active minor devices */
81727 /* handle uniform packets for scsi type devices (scsi,atapi) */
81728 int (*generic_packet) (struct cdrom_device_info *,
81729 struct packet_command *);
81730diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
81731index 4ce9056..86caac6 100644
81732--- a/include/linux/cleancache.h
81733+++ b/include/linux/cleancache.h
81734@@ -31,7 +31,7 @@ struct cleancache_ops {
81735 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
81736 void (*invalidate_inode)(int, struct cleancache_filekey);
81737 void (*invalidate_fs)(int);
81738-};
81739+} __no_const;
81740
81741 extern struct cleancache_ops *
81742 cleancache_register_ops(struct cleancache_ops *ops);
81743diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
81744index 0c287db..5efa775 100644
81745--- a/include/linux/clk-provider.h
81746+++ b/include/linux/clk-provider.h
81747@@ -180,6 +180,7 @@ struct clk_ops {
81748 void (*init)(struct clk_hw *hw);
81749 int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
81750 };
81751+typedef struct clk_ops __no_const clk_ops_no_const;
81752
81753 /**
81754 * struct clk_init_data - holds init data that's common to all clocks and is
81755diff --git a/include/linux/compat.h b/include/linux/compat.h
81756index e649426..a74047b 100644
81757--- a/include/linux/compat.h
81758+++ b/include/linux/compat.h
81759@@ -316,7 +316,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
81760 compat_size_t __user *len_ptr);
81761
81762 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
81763-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
81764+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
81765 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
81766 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
81767 compat_ssize_t msgsz, int msgflg);
81768@@ -436,7 +436,7 @@ extern int compat_ptrace_request(struct task_struct *child,
81769 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
81770 compat_ulong_t addr, compat_ulong_t data);
81771 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
81772- compat_long_t addr, compat_long_t data);
81773+ compat_ulong_t addr, compat_ulong_t data);
81774
81775 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
81776 /*
81777diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
81778index 2507fd2..55203f8 100644
81779--- a/include/linux/compiler-gcc4.h
81780+++ b/include/linux/compiler-gcc4.h
81781@@ -39,9 +39,34 @@
81782 # define __compiletime_warning(message) __attribute__((warning(message)))
81783 # define __compiletime_error(message) __attribute__((error(message)))
81784 #endif /* __CHECKER__ */
81785+
81786+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
81787+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
81788+#define __bos0(ptr) __bos((ptr), 0)
81789+#define __bos1(ptr) __bos((ptr), 1)
81790 #endif /* GCC_VERSION >= 40300 */
81791
81792 #if GCC_VERSION >= 40500
81793+
81794+#ifdef RANDSTRUCT_PLUGIN
81795+#define __randomize_layout __attribute__((randomize_layout))
81796+#define __no_randomize_layout __attribute__((no_randomize_layout))
81797+#endif
81798+
81799+#ifdef CONSTIFY_PLUGIN
81800+#define __no_const __attribute__((no_const))
81801+#define __do_const __attribute__((do_const))
81802+#endif
81803+
81804+#ifdef SIZE_OVERFLOW_PLUGIN
81805+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
81806+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
81807+#endif
81808+
81809+#ifdef LATENT_ENTROPY_PLUGIN
81810+#define __latent_entropy __attribute__((latent_entropy))
81811+#endif
81812+
81813 /*
81814 * Mark a position in code as unreachable. This can be used to
81815 * suppress control flow warnings after asm blocks that transfer
81816diff --git a/include/linux/compiler.h b/include/linux/compiler.h
81817index d5ad7b1..3b74638 100644
81818--- a/include/linux/compiler.h
81819+++ b/include/linux/compiler.h
81820@@ -5,11 +5,14 @@
81821
81822 #ifdef __CHECKER__
81823 # define __user __attribute__((noderef, address_space(1)))
81824+# define __force_user __force __user
81825 # define __kernel __attribute__((address_space(0)))
81826+# define __force_kernel __force __kernel
81827 # define __safe __attribute__((safe))
81828 # define __force __attribute__((force))
81829 # define __nocast __attribute__((nocast))
81830 # define __iomem __attribute__((noderef, address_space(2)))
81831+# define __force_iomem __force __iomem
81832 # define __must_hold(x) __attribute__((context(x,1,1)))
81833 # define __acquires(x) __attribute__((context(x,0,1)))
81834 # define __releases(x) __attribute__((context(x,1,0)))
81835@@ -17,20 +20,37 @@
81836 # define __release(x) __context__(x,-1)
81837 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
81838 # define __percpu __attribute__((noderef, address_space(3)))
81839+# define __force_percpu __force __percpu
81840 #ifdef CONFIG_SPARSE_RCU_POINTER
81841 # define __rcu __attribute__((noderef, address_space(4)))
81842+# define __force_rcu __force __rcu
81843 #else
81844 # define __rcu
81845+# define __force_rcu
81846 #endif
81847 extern void __chk_user_ptr(const volatile void __user *);
81848 extern void __chk_io_ptr(const volatile void __iomem *);
81849 #else
81850-# define __user
81851-# define __kernel
81852+# ifdef CHECKER_PLUGIN
81853+//# define __user
81854+//# define __force_user
81855+//# define __kernel
81856+//# define __force_kernel
81857+# else
81858+# ifdef STRUCTLEAK_PLUGIN
81859+# define __user __attribute__((user))
81860+# else
81861+# define __user
81862+# endif
81863+# define __force_user
81864+# define __kernel
81865+# define __force_kernel
81866+# endif
81867 # define __safe
81868 # define __force
81869 # define __nocast
81870 # define __iomem
81871+# define __force_iomem
81872 # define __chk_user_ptr(x) (void)0
81873 # define __chk_io_ptr(x) (void)0
81874 # define __builtin_warning(x, y...) (1)
81875@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
81876 # define __release(x) (void)0
81877 # define __cond_lock(x,c) (c)
81878 # define __percpu
81879+# define __force_percpu
81880 # define __rcu
81881+# define __force_rcu
81882 #endif
81883
81884 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
81885@@ -286,6 +308,34 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
81886 # define __attribute_const__ /* unimplemented */
81887 #endif
81888
81889+#ifndef __randomize_layout
81890+# define __randomize_layout
81891+#endif
81892+
81893+#ifndef __no_randomize_layout
81894+# define __no_randomize_layout
81895+#endif
81896+
81897+#ifndef __no_const
81898+# define __no_const
81899+#endif
81900+
81901+#ifndef __do_const
81902+# define __do_const
81903+#endif
81904+
81905+#ifndef __size_overflow
81906+# define __size_overflow(...)
81907+#endif
81908+
81909+#ifndef __intentional_overflow
81910+# define __intentional_overflow(...)
81911+#endif
81912+
81913+#ifndef __latent_entropy
81914+# define __latent_entropy
81915+#endif
81916+
81917 /*
81918 * Tell gcc if a function is cold. The compiler will assume any path
81919 * directly leading to the call is unlikely.
81920@@ -295,6 +345,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
81921 #define __cold
81922 #endif
81923
81924+#ifndef __alloc_size
81925+#define __alloc_size(...)
81926+#endif
81927+
81928+#ifndef __bos
81929+#define __bos(ptr, arg)
81930+#endif
81931+
81932+#ifndef __bos0
81933+#define __bos0(ptr)
81934+#endif
81935+
81936+#ifndef __bos1
81937+#define __bos1(ptr)
81938+#endif
81939+
81940 /* Simple shorthand for a section definition */
81941 #ifndef __section
81942 # define __section(S) __attribute__ ((__section__(#S)))
81943@@ -378,7 +444,8 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
81944 * use is to mediate communication between process-level code and irq/NMI
81945 * handlers, all running on the same CPU.
81946 */
81947-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
81948+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
81949+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
81950
81951 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
81952 #ifdef CONFIG_KPROBES
81953diff --git a/include/linux/completion.h b/include/linux/completion.h
81954index 5d5aaae..0ea9b84 100644
81955--- a/include/linux/completion.h
81956+++ b/include/linux/completion.h
81957@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
81958
81959 extern void wait_for_completion(struct completion *);
81960 extern void wait_for_completion_io(struct completion *);
81961-extern int wait_for_completion_interruptible(struct completion *x);
81962-extern int wait_for_completion_killable(struct completion *x);
81963+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
81964+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
81965 extern unsigned long wait_for_completion_timeout(struct completion *x,
81966- unsigned long timeout);
81967+ unsigned long timeout) __intentional_overflow(-1);
81968 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
81969- unsigned long timeout);
81970+ unsigned long timeout) __intentional_overflow(-1);
81971 extern long wait_for_completion_interruptible_timeout(
81972- struct completion *x, unsigned long timeout);
81973+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
81974 extern long wait_for_completion_killable_timeout(
81975- struct completion *x, unsigned long timeout);
81976+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
81977 extern bool try_wait_for_completion(struct completion *x);
81978 extern bool completion_done(struct completion *x);
81979
81980diff --git a/include/linux/configfs.h b/include/linux/configfs.h
81981index 34025df..d94bbbc 100644
81982--- a/include/linux/configfs.h
81983+++ b/include/linux/configfs.h
81984@@ -125,7 +125,7 @@ struct configfs_attribute {
81985 const char *ca_name;
81986 struct module *ca_owner;
81987 umode_t ca_mode;
81988-};
81989+} __do_const;
81990
81991 /*
81992 * Users often need to create attribute structures for their configurable
81993diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
81994index 8f8ae95..b9b0e6d 100644
81995--- a/include/linux/cpufreq.h
81996+++ b/include/linux/cpufreq.h
81997@@ -202,6 +202,7 @@ struct global_attr {
81998 ssize_t (*store)(struct kobject *a, struct attribute *b,
81999 const char *c, size_t count);
82000 };
82001+typedef struct global_attr __no_const global_attr_no_const;
82002
82003 #define define_one_global_ro(_name) \
82004 static struct global_attr _name = \
82005@@ -268,7 +269,7 @@ struct cpufreq_driver {
82006 bool boost_supported;
82007 bool boost_enabled;
82008 int (*set_boost) (int state);
82009-};
82010+} __do_const;
82011
82012 /* flags */
82013 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
82014diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
82015index 25e0df6..952dffd 100644
82016--- a/include/linux/cpuidle.h
82017+++ b/include/linux/cpuidle.h
82018@@ -50,7 +50,8 @@ struct cpuidle_state {
82019 int index);
82020
82021 int (*enter_dead) (struct cpuidle_device *dev, int index);
82022-};
82023+} __do_const;
82024+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
82025
82026 /* Idle State Flags */
82027 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
82028@@ -209,7 +210,7 @@ struct cpuidle_governor {
82029 void (*reflect) (struct cpuidle_device *dev, int index);
82030
82031 struct module *owner;
82032-};
82033+} __do_const;
82034
82035 #ifdef CONFIG_CPU_IDLE
82036 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
82037diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
82038index 2997af6..424ddc1 100644
82039--- a/include/linux/cpumask.h
82040+++ b/include/linux/cpumask.h
82041@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
82042 }
82043
82044 /* Valid inputs for n are -1 and 0. */
82045-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
82046+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
82047 {
82048 return n+1;
82049 }
82050
82051-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
82052+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
82053 {
82054 return n+1;
82055 }
82056
82057-static inline unsigned int cpumask_next_and(int n,
82058+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
82059 const struct cpumask *srcp,
82060 const struct cpumask *andp)
82061 {
82062@@ -174,7 +174,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
82063 *
82064 * Returns >= nr_cpu_ids if no further cpus set.
82065 */
82066-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
82067+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
82068 {
82069 /* -1 is a legal arg here. */
82070 if (n != -1)
82071@@ -189,7 +189,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
82072 *
82073 * Returns >= nr_cpu_ids if no further cpus unset.
82074 */
82075-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
82076+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
82077 {
82078 /* -1 is a legal arg here. */
82079 if (n != -1)
82080@@ -197,7 +197,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
82081 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
82082 }
82083
82084-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
82085+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
82086 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
82087 int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
82088
82089diff --git a/include/linux/cred.h b/include/linux/cred.h
82090index f61d6c8..d372d95 100644
82091--- a/include/linux/cred.h
82092+++ b/include/linux/cred.h
82093@@ -35,7 +35,7 @@ struct group_info {
82094 int nblocks;
82095 kgid_t small_block[NGROUPS_SMALL];
82096 kgid_t *blocks[0];
82097-};
82098+} __randomize_layout;
82099
82100 /**
82101 * get_group_info - Get a reference to a group info structure
82102@@ -136,7 +136,7 @@ struct cred {
82103 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
82104 struct group_info *group_info; /* supplementary groups for euid/fsgid */
82105 struct rcu_head rcu; /* RCU deletion hook */
82106-};
82107+} __randomize_layout;
82108
82109 extern void __put_cred(struct cred *);
82110 extern void exit_creds(struct task_struct *);
82111@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
82112 static inline void validate_process_creds(void)
82113 {
82114 }
82115+static inline void validate_task_creds(struct task_struct *task)
82116+{
82117+}
82118 #endif
82119
82120 /**
82121@@ -322,6 +325,7 @@ static inline void put_cred(const struct cred *_cred)
82122
82123 #define task_uid(task) (task_cred_xxx((task), uid))
82124 #define task_euid(task) (task_cred_xxx((task), euid))
82125+#define task_securebits(task) (task_cred_xxx((task), securebits))
82126
82127 #define current_cred_xxx(xxx) \
82128 ({ \
82129diff --git a/include/linux/crypto.h b/include/linux/crypto.h
82130index b92eadf..b4ecdc1 100644
82131--- a/include/linux/crypto.h
82132+++ b/include/linux/crypto.h
82133@@ -373,7 +373,7 @@ struct cipher_tfm {
82134 const u8 *key, unsigned int keylen);
82135 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
82136 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
82137-};
82138+} __no_const;
82139
82140 struct hash_tfm {
82141 int (*init)(struct hash_desc *desc);
82142@@ -394,13 +394,13 @@ struct compress_tfm {
82143 int (*cot_decompress)(struct crypto_tfm *tfm,
82144 const u8 *src, unsigned int slen,
82145 u8 *dst, unsigned int *dlen);
82146-};
82147+} __no_const;
82148
82149 struct rng_tfm {
82150 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
82151 unsigned int dlen);
82152 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
82153-};
82154+} __no_const;
82155
82156 #define crt_ablkcipher crt_u.ablkcipher
82157 #define crt_aead crt_u.aead
82158diff --git a/include/linux/ctype.h b/include/linux/ctype.h
82159index 653589e..4ef254a 100644
82160--- a/include/linux/ctype.h
82161+++ b/include/linux/ctype.h
82162@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
82163 * Fast implementation of tolower() for internal usage. Do not use in your
82164 * code.
82165 */
82166-static inline char _tolower(const char c)
82167+static inline unsigned char _tolower(const unsigned char c)
82168 {
82169 return c | 0x20;
82170 }
82171diff --git a/include/linux/dcache.h b/include/linux/dcache.h
82172index 3c7ec32..4ca97cc 100644
82173--- a/include/linux/dcache.h
82174+++ b/include/linux/dcache.h
82175@@ -133,7 +133,7 @@ struct dentry {
82176 } d_u;
82177 struct list_head d_subdirs; /* our children */
82178 struct hlist_node d_alias; /* inode alias list */
82179-};
82180+} __randomize_layout;
82181
82182 /*
82183 * dentry->d_lock spinlock nesting subclasses:
82184diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
82185index 7925bf0..d5143d2 100644
82186--- a/include/linux/decompress/mm.h
82187+++ b/include/linux/decompress/mm.h
82188@@ -77,7 +77,7 @@ static void free(void *where)
82189 * warnings when not needed (indeed large_malloc / large_free are not
82190 * needed by inflate */
82191
82192-#define malloc(a) kmalloc(a, GFP_KERNEL)
82193+#define malloc(a) kmalloc((a), GFP_KERNEL)
82194 #define free(a) kfree(a)
82195
82196 #define large_malloc(a) vmalloc(a)
82197diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
82198index f1863dc..5c26074 100644
82199--- a/include/linux/devfreq.h
82200+++ b/include/linux/devfreq.h
82201@@ -114,7 +114,7 @@ struct devfreq_governor {
82202 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
82203 int (*event_handler)(struct devfreq *devfreq,
82204 unsigned int event, void *data);
82205-};
82206+} __do_const;
82207
82208 /**
82209 * struct devfreq - Device devfreq structure
82210diff --git a/include/linux/device.h b/include/linux/device.h
82211index af424ac..fd46ddf 100644
82212--- a/include/linux/device.h
82213+++ b/include/linux/device.h
82214@@ -310,7 +310,7 @@ struct subsys_interface {
82215 struct list_head node;
82216 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
82217 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
82218-};
82219+} __do_const;
82220
82221 int subsys_interface_register(struct subsys_interface *sif);
82222 void subsys_interface_unregister(struct subsys_interface *sif);
82223@@ -506,7 +506,7 @@ struct device_type {
82224 void (*release)(struct device *dev);
82225
82226 const struct dev_pm_ops *pm;
82227-};
82228+} __do_const;
82229
82230 /* interface for exporting device attributes */
82231 struct device_attribute {
82232@@ -516,11 +516,12 @@ struct device_attribute {
82233 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
82234 const char *buf, size_t count);
82235 };
82236+typedef struct device_attribute __no_const device_attribute_no_const;
82237
82238 struct dev_ext_attribute {
82239 struct device_attribute attr;
82240 void *var;
82241-};
82242+} __do_const;
82243
82244 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
82245 char *buf);
82246diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
82247index 931b709..89b2d89 100644
82248--- a/include/linux/dma-mapping.h
82249+++ b/include/linux/dma-mapping.h
82250@@ -60,7 +60,7 @@ struct dma_map_ops {
82251 u64 (*get_required_mask)(struct device *dev);
82252 #endif
82253 int is_phys;
82254-};
82255+} __do_const;
82256
82257 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
82258
82259diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
82260index d2c5cc7..d193394 100644
82261--- a/include/linux/dmaengine.h
82262+++ b/include/linux/dmaengine.h
82263@@ -1147,9 +1147,9 @@ struct dma_pinned_list {
82264 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
82265 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
82266
82267-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
82268+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
82269 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
82270-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
82271+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
82272 struct dma_pinned_list *pinned_list, struct page *page,
82273 unsigned int offset, size_t len);
82274
82275diff --git a/include/linux/efi.h b/include/linux/efi.h
82276index 41bbf8b..bd3a718 100644
82277--- a/include/linux/efi.h
82278+++ b/include/linux/efi.h
82279@@ -1027,6 +1027,7 @@ struct efivar_operations {
82280 efi_set_variable_t *set_variable;
82281 efi_query_variable_store_t *query_variable_store;
82282 };
82283+typedef struct efivar_operations __no_const efivar_operations_no_const;
82284
82285 struct efivars {
82286 /*
82287diff --git a/include/linux/elf.h b/include/linux/elf.h
82288index 67a5fa7..b817372 100644
82289--- a/include/linux/elf.h
82290+++ b/include/linux/elf.h
82291@@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
82292 #define elf_note elf32_note
82293 #define elf_addr_t Elf32_Off
82294 #define Elf_Half Elf32_Half
82295+#define elf_dyn Elf32_Dyn
82296
82297 #else
82298
82299@@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
82300 #define elf_note elf64_note
82301 #define elf_addr_t Elf64_Off
82302 #define Elf_Half Elf64_Half
82303+#define elf_dyn Elf64_Dyn
82304
82305 #endif
82306
82307diff --git a/include/linux/err.h b/include/linux/err.h
82308index a729120..6ede2c9 100644
82309--- a/include/linux/err.h
82310+++ b/include/linux/err.h
82311@@ -20,12 +20,12 @@
82312
82313 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
82314
82315-static inline void * __must_check ERR_PTR(long error)
82316+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
82317 {
82318 return (void *) error;
82319 }
82320
82321-static inline long __must_check PTR_ERR(__force const void *ptr)
82322+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
82323 {
82324 return (long) ptr;
82325 }
82326diff --git a/include/linux/extcon.h b/include/linux/extcon.h
82327index 36f49c4..a2a1f4c 100644
82328--- a/include/linux/extcon.h
82329+++ b/include/linux/extcon.h
82330@@ -135,7 +135,7 @@ struct extcon_dev {
82331 /* /sys/class/extcon/.../mutually_exclusive/... */
82332 struct attribute_group attr_g_muex;
82333 struct attribute **attrs_muex;
82334- struct device_attribute *d_attrs_muex;
82335+ device_attribute_no_const *d_attrs_muex;
82336 };
82337
82338 /**
82339diff --git a/include/linux/fb.h b/include/linux/fb.h
82340index b6bfda9..1f13487 100644
82341--- a/include/linux/fb.h
82342+++ b/include/linux/fb.h
82343@@ -305,7 +305,7 @@ struct fb_ops {
82344 /* called at KDB enter and leave time to prepare the console */
82345 int (*fb_debug_enter)(struct fb_info *info);
82346 int (*fb_debug_leave)(struct fb_info *info);
82347-};
82348+} __do_const;
82349
82350 #ifdef CONFIG_FB_TILEBLITTING
82351 #define FB_TILE_CURSOR_NONE 0
82352diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
82353index 230f87b..1fd0485 100644
82354--- a/include/linux/fdtable.h
82355+++ b/include/linux/fdtable.h
82356@@ -100,7 +100,7 @@ struct files_struct *get_files_struct(struct task_struct *);
82357 void put_files_struct(struct files_struct *fs);
82358 void reset_files_struct(struct files_struct *);
82359 int unshare_files(struct files_struct **);
82360-struct files_struct *dup_fd(struct files_struct *, int *);
82361+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
82362 void do_close_on_exec(struct files_struct *);
82363 int iterate_fd(struct files_struct *, unsigned,
82364 int (*)(const void *, struct file *, unsigned),
82365diff --git a/include/linux/filter.h b/include/linux/filter.h
82366index a7e3c48..e568c8e 100644
82367--- a/include/linux/filter.h
82368+++ b/include/linux/filter.h
82369@@ -9,330 +9,28 @@
82370 #include <linux/workqueue.h>
82371 #include <uapi/linux/filter.h>
82372
82373-/* Internally used and optimized filter representation with extended
82374- * instruction set based on top of classic BPF.
82375- */
82376-
82377-/* instruction classes */
82378-#define BPF_ALU64 0x07 /* alu mode in double word width */
82379-
82380-/* ld/ldx fields */
82381-#define BPF_DW 0x18 /* double word */
82382-#define BPF_XADD 0xc0 /* exclusive add */
82383-
82384-/* alu/jmp fields */
82385-#define BPF_MOV 0xb0 /* mov reg to reg */
82386-#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
82387-
82388-/* change endianness of a register */
82389-#define BPF_END 0xd0 /* flags for endianness conversion: */
82390-#define BPF_TO_LE 0x00 /* convert to little-endian */
82391-#define BPF_TO_BE 0x08 /* convert to big-endian */
82392-#define BPF_FROM_LE BPF_TO_LE
82393-#define BPF_FROM_BE BPF_TO_BE
82394-
82395-#define BPF_JNE 0x50 /* jump != */
82396-#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
82397-#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
82398-#define BPF_CALL 0x80 /* function call */
82399-#define BPF_EXIT 0x90 /* function return */
82400-
82401-/* Register numbers */
82402-enum {
82403- BPF_REG_0 = 0,
82404- BPF_REG_1,
82405- BPF_REG_2,
82406- BPF_REG_3,
82407- BPF_REG_4,
82408- BPF_REG_5,
82409- BPF_REG_6,
82410- BPF_REG_7,
82411- BPF_REG_8,
82412- BPF_REG_9,
82413- BPF_REG_10,
82414- __MAX_BPF_REG,
82415-};
82416-
82417-/* BPF has 10 general purpose 64-bit registers and stack frame. */
82418-#define MAX_BPF_REG __MAX_BPF_REG
82419-
82420-/* ArgX, context and stack frame pointer register positions. Note,
82421- * Arg1, Arg2, Arg3, etc are used as argument mappings of function
82422- * calls in BPF_CALL instruction.
82423- */
82424-#define BPF_REG_ARG1 BPF_REG_1
82425-#define BPF_REG_ARG2 BPF_REG_2
82426-#define BPF_REG_ARG3 BPF_REG_3
82427-#define BPF_REG_ARG4 BPF_REG_4
82428-#define BPF_REG_ARG5 BPF_REG_5
82429-#define BPF_REG_CTX BPF_REG_6
82430-#define BPF_REG_FP BPF_REG_10
82431-
82432-/* Additional register mappings for converted user programs. */
82433-#define BPF_REG_A BPF_REG_0
82434-#define BPF_REG_X BPF_REG_7
82435-#define BPF_REG_TMP BPF_REG_8
82436-
82437-/* BPF program can access up to 512 bytes of stack space. */
82438-#define MAX_BPF_STACK 512
82439-
82440-/* Helper macros for filter block array initializers. */
82441-
82442-/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
82443-
82444-#define BPF_ALU64_REG(OP, DST, SRC) \
82445- ((struct sock_filter_int) { \
82446- .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
82447- .dst_reg = DST, \
82448- .src_reg = SRC, \
82449- .off = 0, \
82450- .imm = 0 })
82451-
82452-#define BPF_ALU32_REG(OP, DST, SRC) \
82453- ((struct sock_filter_int) { \
82454- .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
82455- .dst_reg = DST, \
82456- .src_reg = SRC, \
82457- .off = 0, \
82458- .imm = 0 })
82459-
82460-/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
82461-
82462-#define BPF_ALU64_IMM(OP, DST, IMM) \
82463- ((struct sock_filter_int) { \
82464- .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
82465- .dst_reg = DST, \
82466- .src_reg = 0, \
82467- .off = 0, \
82468- .imm = IMM })
82469-
82470-#define BPF_ALU32_IMM(OP, DST, IMM) \
82471- ((struct sock_filter_int) { \
82472- .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
82473- .dst_reg = DST, \
82474- .src_reg = 0, \
82475- .off = 0, \
82476- .imm = IMM })
82477-
82478-/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
82479-
82480-#define BPF_ENDIAN(TYPE, DST, LEN) \
82481- ((struct sock_filter_int) { \
82482- .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \
82483- .dst_reg = DST, \
82484- .src_reg = 0, \
82485- .off = 0, \
82486- .imm = LEN })
82487-
82488-/* Short form of mov, dst_reg = src_reg */
82489-
82490-#define BPF_MOV64_REG(DST, SRC) \
82491- ((struct sock_filter_int) { \
82492- .code = BPF_ALU64 | BPF_MOV | BPF_X, \
82493- .dst_reg = DST, \
82494- .src_reg = SRC, \
82495- .off = 0, \
82496- .imm = 0 })
82497-
82498-#define BPF_MOV32_REG(DST, SRC) \
82499- ((struct sock_filter_int) { \
82500- .code = BPF_ALU | BPF_MOV | BPF_X, \
82501- .dst_reg = DST, \
82502- .src_reg = SRC, \
82503- .off = 0, \
82504- .imm = 0 })
82505-
82506-/* Short form of mov, dst_reg = imm32 */
82507-
82508-#define BPF_MOV64_IMM(DST, IMM) \
82509- ((struct sock_filter_int) { \
82510- .code = BPF_ALU64 | BPF_MOV | BPF_K, \
82511- .dst_reg = DST, \
82512- .src_reg = 0, \
82513- .off = 0, \
82514- .imm = IMM })
82515-
82516-#define BPF_MOV32_IMM(DST, IMM) \
82517- ((struct sock_filter_int) { \
82518- .code = BPF_ALU | BPF_MOV | BPF_K, \
82519- .dst_reg = DST, \
82520- .src_reg = 0, \
82521- .off = 0, \
82522- .imm = IMM })
82523-
82524-/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
82525-
82526-#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \
82527- ((struct sock_filter_int) { \
82528- .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \
82529- .dst_reg = DST, \
82530- .src_reg = SRC, \
82531- .off = 0, \
82532- .imm = IMM })
82533-
82534-#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \
82535- ((struct sock_filter_int) { \
82536- .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \
82537- .dst_reg = DST, \
82538- .src_reg = SRC, \
82539- .off = 0, \
82540- .imm = IMM })
82541-
82542-/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
82543-
82544-#define BPF_LD_ABS(SIZE, IMM) \
82545- ((struct sock_filter_int) { \
82546- .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
82547- .dst_reg = 0, \
82548- .src_reg = 0, \
82549- .off = 0, \
82550- .imm = IMM })
82551-
82552-/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
82553-
82554-#define BPF_LD_IND(SIZE, SRC, IMM) \
82555- ((struct sock_filter_int) { \
82556- .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \
82557- .dst_reg = 0, \
82558- .src_reg = SRC, \
82559- .off = 0, \
82560- .imm = IMM })
82561-
82562-/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
82563-
82564-#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
82565- ((struct sock_filter_int) { \
82566- .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
82567- .dst_reg = DST, \
82568- .src_reg = SRC, \
82569- .off = OFF, \
82570- .imm = 0 })
82571-
82572-/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
82573-
82574-#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
82575- ((struct sock_filter_int) { \
82576- .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
82577- .dst_reg = DST, \
82578- .src_reg = SRC, \
82579- .off = OFF, \
82580- .imm = 0 })
82581-
82582-/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
82583-
82584-#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
82585- ((struct sock_filter_int) { \
82586- .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
82587- .dst_reg = DST, \
82588- .src_reg = 0, \
82589- .off = OFF, \
82590- .imm = IMM })
82591-
82592-/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
82593-
82594-#define BPF_JMP_REG(OP, DST, SRC, OFF) \
82595- ((struct sock_filter_int) { \
82596- .code = BPF_JMP | BPF_OP(OP) | BPF_X, \
82597- .dst_reg = DST, \
82598- .src_reg = SRC, \
82599- .off = OFF, \
82600- .imm = 0 })
82601-
82602-/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
82603-
82604-#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
82605- ((struct sock_filter_int) { \
82606- .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
82607- .dst_reg = DST, \
82608- .src_reg = 0, \
82609- .off = OFF, \
82610- .imm = IMM })
82611-
82612-/* Function call */
82613-
82614-#define BPF_EMIT_CALL(FUNC) \
82615- ((struct sock_filter_int) { \
82616- .code = BPF_JMP | BPF_CALL, \
82617- .dst_reg = 0, \
82618- .src_reg = 0, \
82619- .off = 0, \
82620- .imm = ((FUNC) - __bpf_call_base) })
82621-
82622-/* Raw code statement block */
82623-
82624-#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
82625- ((struct sock_filter_int) { \
82626- .code = CODE, \
82627- .dst_reg = DST, \
82628- .src_reg = SRC, \
82629- .off = OFF, \
82630- .imm = IMM })
82631-
82632-/* Program exit */
82633-
82634-#define BPF_EXIT_INSN() \
82635- ((struct sock_filter_int) { \
82636- .code = BPF_JMP | BPF_EXIT, \
82637- .dst_reg = 0, \
82638- .src_reg = 0, \
82639- .off = 0, \
82640- .imm = 0 })
82641-
82642-#define bytes_to_bpf_size(bytes) \
82643-({ \
82644- int bpf_size = -EINVAL; \
82645- \
82646- if (bytes == sizeof(u8)) \
82647- bpf_size = BPF_B; \
82648- else if (bytes == sizeof(u16)) \
82649- bpf_size = BPF_H; \
82650- else if (bytes == sizeof(u32)) \
82651- bpf_size = BPF_W; \
82652- else if (bytes == sizeof(u64)) \
82653- bpf_size = BPF_DW; \
82654- \
82655- bpf_size; \
82656-})
82657-
82658-/* Macro to invoke filter function. */
82659-#define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
82660-
82661-struct sock_filter_int {
82662- __u8 code; /* opcode */
82663- __u8 dst_reg:4; /* dest register */
82664- __u8 src_reg:4; /* source register */
82665- __s16 off; /* signed offset */
82666- __s32 imm; /* signed immediate constant */
82667-};
82668-
82669 #ifdef CONFIG_COMPAT
82670-/* A struct sock_filter is architecture independent. */
82671+/*
82672+ * A struct sock_filter is architecture independent.
82673+ */
82674 struct compat_sock_fprog {
82675 u16 len;
82676- compat_uptr_t filter; /* struct sock_filter * */
82677+ compat_uptr_t filter; /* struct sock_filter * */
82678 };
82679 #endif
82680
82681-struct sock_fprog_kern {
82682- u16 len;
82683- struct sock_filter *filter;
82684-};
82685-
82686 struct sk_buff;
82687 struct sock;
82688-struct seccomp_data;
82689
82690-struct sk_filter {
82691+struct sk_filter
82692+{
82693 atomic_t refcnt;
82694- u32 jited:1, /* Is our filter JIT'ed? */
82695- len:31; /* Number of filter blocks */
82696- struct sock_fprog_kern *orig_prog; /* Original BPF program */
82697+ unsigned int len; /* Number of filter blocks */
82698 struct rcu_head rcu;
82699 unsigned int (*bpf_func)(const struct sk_buff *skb,
82700- const struct sock_filter_int *filter);
82701+ const struct sock_filter *filter);
82702 union {
82703- struct sock_filter insns[0];
82704- struct sock_filter_int insnsi[0];
82705+ struct sock_filter insns[0];
82706 struct work_struct work;
82707 };
82708 };
82709@@ -343,76 +41,25 @@ static inline unsigned int sk_filter_size(unsigned int proglen)
82710 offsetof(struct sk_filter, insns[proglen]));
82711 }
82712
82713-#define sk_filter_proglen(fprog) \
82714- (fprog->len * sizeof(fprog->filter[0]))
82715-
82716-int sk_filter(struct sock *sk, struct sk_buff *skb);
82717-
82718-void sk_filter_select_runtime(struct sk_filter *fp);
82719-void sk_filter_free(struct sk_filter *fp);
82720-
82721-int sk_convert_filter(struct sock_filter *prog, int len,
82722- struct sock_filter_int *new_prog, int *new_len);
82723-
82724-int sk_unattached_filter_create(struct sk_filter **pfp,
82725- struct sock_fprog_kern *fprog);
82726-void sk_unattached_filter_destroy(struct sk_filter *fp);
82727-
82728-int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
82729-int sk_detach_filter(struct sock *sk);
82730-
82731-int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
82732-int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
82733- unsigned int len);
82734-
82735-void sk_filter_charge(struct sock *sk, struct sk_filter *fp);
82736-void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
82737-
82738-u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
82739-void bpf_int_jit_compile(struct sk_filter *fp);
82740-
82741-#define BPF_ANC BIT(15)
82742-
82743-static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
82744-{
82745- BUG_ON(ftest->code & BPF_ANC);
82746-
82747- switch (ftest->code) {
82748- case BPF_LD | BPF_W | BPF_ABS:
82749- case BPF_LD | BPF_H | BPF_ABS:
82750- case BPF_LD | BPF_B | BPF_ABS:
82751-#define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
82752- return BPF_ANC | SKF_AD_##CODE
82753- switch (ftest->k) {
82754- BPF_ANCILLARY(PROTOCOL);
82755- BPF_ANCILLARY(PKTTYPE);
82756- BPF_ANCILLARY(IFINDEX);
82757- BPF_ANCILLARY(NLATTR);
82758- BPF_ANCILLARY(NLATTR_NEST);
82759- BPF_ANCILLARY(MARK);
82760- BPF_ANCILLARY(QUEUE);
82761- BPF_ANCILLARY(HATYPE);
82762- BPF_ANCILLARY(RXHASH);
82763- BPF_ANCILLARY(CPU);
82764- BPF_ANCILLARY(ALU_XOR_X);
82765- BPF_ANCILLARY(VLAN_TAG);
82766- BPF_ANCILLARY(VLAN_TAG_PRESENT);
82767- BPF_ANCILLARY(PAY_OFFSET);
82768- BPF_ANCILLARY(RANDOM);
82769- }
82770- /* Fallthrough. */
82771- default:
82772- return ftest->code;
82773- }
82774-}
82775+extern int sk_filter(struct sock *sk, struct sk_buff *skb);
82776+extern unsigned int sk_run_filter(const struct sk_buff *skb,
82777+ const struct sock_filter *filter);
82778+extern int sk_unattached_filter_create(struct sk_filter **pfp,
82779+ struct sock_fprog *fprog);
82780+extern void sk_unattached_filter_destroy(struct sk_filter *fp);
82781+extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
82782+extern int sk_detach_filter(struct sock *sk);
82783+extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
82784+extern int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned len);
82785+extern void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to);
82786
82787 #ifdef CONFIG_BPF_JIT
82788 #include <stdarg.h>
82789 #include <linux/linkage.h>
82790 #include <linux/printk.h>
82791
82792-void bpf_jit_compile(struct sk_filter *fp);
82793-void bpf_jit_free(struct sk_filter *fp);
82794+extern void bpf_jit_compile(struct sk_filter *fp);
82795+extern void bpf_jit_free(struct sk_filter *fp);
82796
82797 static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
82798 u32 pass, void *image)
82799@@ -423,22 +70,90 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
82800 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
82801 16, 1, image, proglen, false);
82802 }
82803+#define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns)
82804 #else
82805 #include <linux/slab.h>
82806-
82807 static inline void bpf_jit_compile(struct sk_filter *fp)
82808 {
82809 }
82810-
82811 static inline void bpf_jit_free(struct sk_filter *fp)
82812 {
82813 kfree(fp);
82814 }
82815-#endif /* CONFIG_BPF_JIT */
82816+#define SK_RUN_FILTER(FILTER, SKB) sk_run_filter(SKB, FILTER->insns)
82817+#endif
82818
82819 static inline int bpf_tell_extensions(void)
82820 {
82821 return SKF_AD_MAX;
82822 }
82823
82824+enum {
82825+ BPF_S_RET_K = 1,
82826+ BPF_S_RET_A,
82827+ BPF_S_ALU_ADD_K,
82828+ BPF_S_ALU_ADD_X,
82829+ BPF_S_ALU_SUB_K,
82830+ BPF_S_ALU_SUB_X,
82831+ BPF_S_ALU_MUL_K,
82832+ BPF_S_ALU_MUL_X,
82833+ BPF_S_ALU_DIV_X,
82834+ BPF_S_ALU_MOD_K,
82835+ BPF_S_ALU_MOD_X,
82836+ BPF_S_ALU_AND_K,
82837+ BPF_S_ALU_AND_X,
82838+ BPF_S_ALU_OR_K,
82839+ BPF_S_ALU_OR_X,
82840+ BPF_S_ALU_XOR_K,
82841+ BPF_S_ALU_XOR_X,
82842+ BPF_S_ALU_LSH_K,
82843+ BPF_S_ALU_LSH_X,
82844+ BPF_S_ALU_RSH_K,
82845+ BPF_S_ALU_RSH_X,
82846+ BPF_S_ALU_NEG,
82847+ BPF_S_LD_W_ABS,
82848+ BPF_S_LD_H_ABS,
82849+ BPF_S_LD_B_ABS,
82850+ BPF_S_LD_W_LEN,
82851+ BPF_S_LD_W_IND,
82852+ BPF_S_LD_H_IND,
82853+ BPF_S_LD_B_IND,
82854+ BPF_S_LD_IMM,
82855+ BPF_S_LDX_W_LEN,
82856+ BPF_S_LDX_B_MSH,
82857+ BPF_S_LDX_IMM,
82858+ BPF_S_MISC_TAX,
82859+ BPF_S_MISC_TXA,
82860+ BPF_S_ALU_DIV_K,
82861+ BPF_S_LD_MEM,
82862+ BPF_S_LDX_MEM,
82863+ BPF_S_ST,
82864+ BPF_S_STX,
82865+ BPF_S_JMP_JA,
82866+ BPF_S_JMP_JEQ_K,
82867+ BPF_S_JMP_JEQ_X,
82868+ BPF_S_JMP_JGE_K,
82869+ BPF_S_JMP_JGE_X,
82870+ BPF_S_JMP_JGT_K,
82871+ BPF_S_JMP_JGT_X,
82872+ BPF_S_JMP_JSET_K,
82873+ BPF_S_JMP_JSET_X,
82874+ /* Ancillary data */
82875+ BPF_S_ANC_PROTOCOL,
82876+ BPF_S_ANC_PKTTYPE,
82877+ BPF_S_ANC_IFINDEX,
82878+ BPF_S_ANC_NLATTR,
82879+ BPF_S_ANC_NLATTR_NEST,
82880+ BPF_S_ANC_MARK,
82881+ BPF_S_ANC_QUEUE,
82882+ BPF_S_ANC_HATYPE,
82883+ BPF_S_ANC_RXHASH,
82884+ BPF_S_ANC_CPU,
82885+ BPF_S_ANC_ALU_XOR_X,
82886+ BPF_S_ANC_SECCOMP_LD_W,
82887+ BPF_S_ANC_VLAN_TAG,
82888+ BPF_S_ANC_VLAN_TAG_PRESENT,
82889+ BPF_S_ANC_PAY_OFFSET,
82890+};
82891+
82892 #endif /* __LINUX_FILTER_H__ */
82893diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
82894index 8293262..2b3b8bd 100644
82895--- a/include/linux/frontswap.h
82896+++ b/include/linux/frontswap.h
82897@@ -11,7 +11,7 @@ struct frontswap_ops {
82898 int (*load)(unsigned, pgoff_t, struct page *);
82899 void (*invalidate_page)(unsigned, pgoff_t);
82900 void (*invalidate_area)(unsigned);
82901-};
82902+} __no_const;
82903
82904 extern bool frontswap_enabled;
82905 extern struct frontswap_ops *
82906diff --git a/include/linux/fs.h b/include/linux/fs.h
82907index e11d60c..901317a 100644
82908--- a/include/linux/fs.h
82909+++ b/include/linux/fs.h
82910@@ -401,7 +401,7 @@ struct address_space {
82911 spinlock_t private_lock; /* for use by the address_space */
82912 struct list_head private_list; /* ditto */
82913 void *private_data; /* ditto */
82914-} __attribute__((aligned(sizeof(long))));
82915+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
82916 /*
82917 * On most architectures that alignment is already the case; but
82918 * must be enforced here for CRIS, to let the least significant bit
82919@@ -444,7 +444,7 @@ struct block_device {
82920 int bd_fsfreeze_count;
82921 /* Mutex for freeze */
82922 struct mutex bd_fsfreeze_mutex;
82923-};
82924+} __randomize_layout;
82925
82926 /*
82927 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
82928@@ -588,7 +588,7 @@ struct inode {
82929 #endif
82930
82931 void *i_private; /* fs or device private pointer */
82932-};
82933+} __randomize_layout;
82934
82935 static inline int inode_unhashed(struct inode *inode)
82936 {
82937@@ -781,7 +781,7 @@ struct file {
82938 struct list_head f_tfile_llink;
82939 #endif /* #ifdef CONFIG_EPOLL */
82940 struct address_space *f_mapping;
82941-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
82942+} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */
82943
82944 struct file_handle {
82945 __u32 handle_bytes;
82946@@ -909,7 +909,7 @@ struct file_lock {
82947 int state; /* state of grant or error if -ve */
82948 } afs;
82949 } fl_u;
82950-};
82951+} __randomize_layout;
82952
82953 /* The following constant reflects the upper bound of the file/locking space */
82954 #ifndef OFFSET_MAX
82955@@ -1258,7 +1258,7 @@ struct super_block {
82956 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
82957 struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
82958 struct rcu_head rcu;
82959-};
82960+} __randomize_layout;
82961
82962 extern struct timespec current_fs_time(struct super_block *sb);
82963
82964@@ -1484,7 +1484,8 @@ struct file_operations {
82965 long (*fallocate)(struct file *file, int mode, loff_t offset,
82966 loff_t len);
82967 int (*show_fdinfo)(struct seq_file *m, struct file *f);
82968-};
82969+} __do_const __randomize_layout;
82970+typedef struct file_operations __no_const file_operations_no_const;
82971
82972 struct inode_operations {
82973 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
82974@@ -2769,4 +2770,14 @@ static inline bool dir_relax(struct inode *inode)
82975 return !IS_DEADDIR(inode);
82976 }
82977
82978+static inline bool is_sidechannel_device(const struct inode *inode)
82979+{
82980+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
82981+ umode_t mode = inode->i_mode;
82982+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
82983+#else
82984+ return false;
82985+#endif
82986+}
82987+
82988 #endif /* _LINUX_FS_H */
82989diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
82990index 0efc3e6..fd23610 100644
82991--- a/include/linux/fs_struct.h
82992+++ b/include/linux/fs_struct.h
82993@@ -6,13 +6,13 @@
82994 #include <linux/seqlock.h>
82995
82996 struct fs_struct {
82997- int users;
82998+ atomic_t users;
82999 spinlock_t lock;
83000 seqcount_t seq;
83001 int umask;
83002 int in_exec;
83003 struct path root, pwd;
83004-};
83005+} __randomize_layout;
83006
83007 extern struct kmem_cache *fs_cachep;
83008
83009diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
83010index 7714849..a4a5c7a 100644
83011--- a/include/linux/fscache-cache.h
83012+++ b/include/linux/fscache-cache.h
83013@@ -113,7 +113,7 @@ struct fscache_operation {
83014 fscache_operation_release_t release;
83015 };
83016
83017-extern atomic_t fscache_op_debug_id;
83018+extern atomic_unchecked_t fscache_op_debug_id;
83019 extern void fscache_op_work_func(struct work_struct *work);
83020
83021 extern void fscache_enqueue_operation(struct fscache_operation *);
83022@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
83023 INIT_WORK(&op->work, fscache_op_work_func);
83024 atomic_set(&op->usage, 1);
83025 op->state = FSCACHE_OP_ST_INITIALISED;
83026- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
83027+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
83028 op->processor = processor;
83029 op->release = release;
83030 INIT_LIST_HEAD(&op->pend_link);
83031diff --git a/include/linux/fscache.h b/include/linux/fscache.h
83032index 115bb81..e7b812b 100644
83033--- a/include/linux/fscache.h
83034+++ b/include/linux/fscache.h
83035@@ -152,7 +152,7 @@ struct fscache_cookie_def {
83036 * - this is mandatory for any object that may have data
83037 */
83038 void (*now_uncached)(void *cookie_netfs_data);
83039-};
83040+} __do_const;
83041
83042 /*
83043 * fscache cached network filesystem type
83044diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
83045index 1c804b0..1432c2b 100644
83046--- a/include/linux/fsnotify.h
83047+++ b/include/linux/fsnotify.h
83048@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
83049 struct inode *inode = file_inode(file);
83050 __u32 mask = FS_ACCESS;
83051
83052+ if (is_sidechannel_device(inode))
83053+ return;
83054+
83055 if (S_ISDIR(inode->i_mode))
83056 mask |= FS_ISDIR;
83057
83058@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
83059 struct inode *inode = file_inode(file);
83060 __u32 mask = FS_MODIFY;
83061
83062+ if (is_sidechannel_device(inode))
83063+ return;
83064+
83065 if (S_ISDIR(inode->i_mode))
83066 mask |= FS_ISDIR;
83067
83068@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
83069 */
83070 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
83071 {
83072- return kstrdup(name, GFP_KERNEL);
83073+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
83074 }
83075
83076 /*
83077diff --git a/include/linux/genhd.h b/include/linux/genhd.h
83078index ec274e0..e678159 100644
83079--- a/include/linux/genhd.h
83080+++ b/include/linux/genhd.h
83081@@ -194,7 +194,7 @@ struct gendisk {
83082 struct kobject *slave_dir;
83083
83084 struct timer_rand_state *random;
83085- atomic_t sync_io; /* RAID */
83086+ atomic_unchecked_t sync_io; /* RAID */
83087 struct disk_events *ev;
83088 #ifdef CONFIG_BLK_DEV_INTEGRITY
83089 struct blk_integrity *integrity;
83090@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
83091 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
83092
83093 /* drivers/char/random.c */
83094-extern void add_disk_randomness(struct gendisk *disk);
83095+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
83096 extern void rand_initialize_disk(struct gendisk *disk);
83097
83098 static inline sector_t get_start_sect(struct block_device *bdev)
83099diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
83100index c0894dd..2fbf10c 100644
83101--- a/include/linux/genl_magic_func.h
83102+++ b/include/linux/genl_magic_func.h
83103@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
83104 },
83105
83106 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
83107-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
83108+static struct genl_ops ZZZ_genl_ops[] = {
83109 #include GENL_MAGIC_INCLUDE_FILE
83110 };
83111
83112diff --git a/include/linux/gfp.h b/include/linux/gfp.h
83113index 6eb1fb3..30fe7e4 100644
83114--- a/include/linux/gfp.h
83115+++ b/include/linux/gfp.h
83116@@ -34,6 +34,13 @@ struct vm_area_struct;
83117 #define ___GFP_NO_KSWAPD 0x400000u
83118 #define ___GFP_OTHER_NODE 0x800000u
83119 #define ___GFP_WRITE 0x1000000u
83120+
83121+#ifdef CONFIG_PAX_USERCOPY_SLABS
83122+#define ___GFP_USERCOPY 0x2000000u
83123+#else
83124+#define ___GFP_USERCOPY 0
83125+#endif
83126+
83127 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
83128
83129 /*
83130@@ -90,6 +97,7 @@ struct vm_area_struct;
83131 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
83132 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
83133 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
83134+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
83135
83136 /*
83137 * This may seem redundant, but it's a way of annotating false positives vs.
83138@@ -97,7 +105,7 @@ struct vm_area_struct;
83139 */
83140 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
83141
83142-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
83143+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
83144 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
83145
83146 /* This equals 0, but use constants in case they ever change */
83147@@ -155,6 +163,8 @@ struct vm_area_struct;
83148 /* 4GB DMA on some platforms */
83149 #define GFP_DMA32 __GFP_DMA32
83150
83151+#define GFP_USERCOPY __GFP_USERCOPY
83152+
83153 /* Convert GFP flags to their corresponding migrate type */
83154 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
83155 {
83156diff --git a/include/linux/gracl.h b/include/linux/gracl.h
83157new file mode 100644
83158index 0000000..edb2cb6
83159--- /dev/null
83160+++ b/include/linux/gracl.h
83161@@ -0,0 +1,340 @@
83162+#ifndef GR_ACL_H
83163+#define GR_ACL_H
83164+
83165+#include <linux/grdefs.h>
83166+#include <linux/resource.h>
83167+#include <linux/capability.h>
83168+#include <linux/dcache.h>
83169+#include <asm/resource.h>
83170+
83171+/* Major status information */
83172+
83173+#define GR_VERSION "grsecurity 3.0"
83174+#define GRSECURITY_VERSION 0x3000
83175+
83176+enum {
83177+ GR_SHUTDOWN = 0,
83178+ GR_ENABLE = 1,
83179+ GR_SPROLE = 2,
83180+ GR_OLDRELOAD = 3,
83181+ GR_SEGVMOD = 4,
83182+ GR_STATUS = 5,
83183+ GR_UNSPROLE = 6,
83184+ GR_PASSSET = 7,
83185+ GR_SPROLEPAM = 8,
83186+ GR_RELOAD = 9,
83187+};
83188+
83189+/* Password setup definitions
83190+ * kernel/grhash.c */
83191+enum {
83192+ GR_PW_LEN = 128,
83193+ GR_SALT_LEN = 16,
83194+ GR_SHA_LEN = 32,
83195+};
83196+
83197+enum {
83198+ GR_SPROLE_LEN = 64,
83199+};
83200+
83201+enum {
83202+ GR_NO_GLOB = 0,
83203+ GR_REG_GLOB,
83204+ GR_CREATE_GLOB
83205+};
83206+
83207+#define GR_NLIMITS 32
83208+
83209+/* Begin Data Structures */
83210+
83211+struct sprole_pw {
83212+ unsigned char *rolename;
83213+ unsigned char salt[GR_SALT_LEN];
83214+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
83215+};
83216+
83217+struct name_entry {
83218+ __u32 key;
83219+ ino_t inode;
83220+ dev_t device;
83221+ char *name;
83222+ __u16 len;
83223+ __u8 deleted;
83224+ struct name_entry *prev;
83225+ struct name_entry *next;
83226+};
83227+
83228+struct inodev_entry {
83229+ struct name_entry *nentry;
83230+ struct inodev_entry *prev;
83231+ struct inodev_entry *next;
83232+};
83233+
83234+struct acl_role_db {
83235+ struct acl_role_label **r_hash;
83236+ __u32 r_size;
83237+};
83238+
83239+struct inodev_db {
83240+ struct inodev_entry **i_hash;
83241+ __u32 i_size;
83242+};
83243+
83244+struct name_db {
83245+ struct name_entry **n_hash;
83246+ __u32 n_size;
83247+};
83248+
83249+struct crash_uid {
83250+ uid_t uid;
83251+ unsigned long expires;
83252+};
83253+
83254+struct gr_hash_struct {
83255+ void **table;
83256+ void **nametable;
83257+ void *first;
83258+ __u32 table_size;
83259+ __u32 used_size;
83260+ int type;
83261+};
83262+
83263+/* Userspace Grsecurity ACL data structures */
83264+
83265+struct acl_subject_label {
83266+ char *filename;
83267+ ino_t inode;
83268+ dev_t device;
83269+ __u32 mode;
83270+ kernel_cap_t cap_mask;
83271+ kernel_cap_t cap_lower;
83272+ kernel_cap_t cap_invert_audit;
83273+
83274+ struct rlimit res[GR_NLIMITS];
83275+ __u32 resmask;
83276+
83277+ __u8 user_trans_type;
83278+ __u8 group_trans_type;
83279+ uid_t *user_transitions;
83280+ gid_t *group_transitions;
83281+ __u16 user_trans_num;
83282+ __u16 group_trans_num;
83283+
83284+ __u32 sock_families[2];
83285+ __u32 ip_proto[8];
83286+ __u32 ip_type;
83287+ struct acl_ip_label **ips;
83288+ __u32 ip_num;
83289+ __u32 inaddr_any_override;
83290+
83291+ __u32 crashes;
83292+ unsigned long expires;
83293+
83294+ struct acl_subject_label *parent_subject;
83295+ struct gr_hash_struct *hash;
83296+ struct acl_subject_label *prev;
83297+ struct acl_subject_label *next;
83298+
83299+ struct acl_object_label **obj_hash;
83300+ __u32 obj_hash_size;
83301+ __u16 pax_flags;
83302+};
83303+
83304+struct role_allowed_ip {
83305+ __u32 addr;
83306+ __u32 netmask;
83307+
83308+ struct role_allowed_ip *prev;
83309+ struct role_allowed_ip *next;
83310+};
83311+
83312+struct role_transition {
83313+ char *rolename;
83314+
83315+ struct role_transition *prev;
83316+ struct role_transition *next;
83317+};
83318+
83319+struct acl_role_label {
83320+ char *rolename;
83321+ uid_t uidgid;
83322+ __u16 roletype;
83323+
83324+ __u16 auth_attempts;
83325+ unsigned long expires;
83326+
83327+ struct acl_subject_label *root_label;
83328+ struct gr_hash_struct *hash;
83329+
83330+ struct acl_role_label *prev;
83331+ struct acl_role_label *next;
83332+
83333+ struct role_transition *transitions;
83334+ struct role_allowed_ip *allowed_ips;
83335+ uid_t *domain_children;
83336+ __u16 domain_child_num;
83337+
83338+ umode_t umask;
83339+
83340+ struct acl_subject_label **subj_hash;
83341+ __u32 subj_hash_size;
83342+};
83343+
83344+struct user_acl_role_db {
83345+ struct acl_role_label **r_table;
83346+ __u32 num_pointers; /* Number of allocations to track */
83347+ __u32 num_roles; /* Number of roles */
83348+ __u32 num_domain_children; /* Number of domain children */
83349+ __u32 num_subjects; /* Number of subjects */
83350+ __u32 num_objects; /* Number of objects */
83351+};
83352+
83353+struct acl_object_label {
83354+ char *filename;
83355+ ino_t inode;
83356+ dev_t device;
83357+ __u32 mode;
83358+
83359+ struct acl_subject_label *nested;
83360+ struct acl_object_label *globbed;
83361+
83362+ /* next two structures not used */
83363+
83364+ struct acl_object_label *prev;
83365+ struct acl_object_label *next;
83366+};
83367+
83368+struct acl_ip_label {
83369+ char *iface;
83370+ __u32 addr;
83371+ __u32 netmask;
83372+ __u16 low, high;
83373+ __u8 mode;
83374+ __u32 type;
83375+ __u32 proto[8];
83376+
83377+ /* next two structures not used */
83378+
83379+ struct acl_ip_label *prev;
83380+ struct acl_ip_label *next;
83381+};
83382+
83383+struct gr_arg {
83384+ struct user_acl_role_db role_db;
83385+ unsigned char pw[GR_PW_LEN];
83386+ unsigned char salt[GR_SALT_LEN];
83387+ unsigned char sum[GR_SHA_LEN];
83388+ unsigned char sp_role[GR_SPROLE_LEN];
83389+ struct sprole_pw *sprole_pws;
83390+ dev_t segv_device;
83391+ ino_t segv_inode;
83392+ uid_t segv_uid;
83393+ __u16 num_sprole_pws;
83394+ __u16 mode;
83395+};
83396+
83397+struct gr_arg_wrapper {
83398+ struct gr_arg *arg;
83399+ __u32 version;
83400+ __u32 size;
83401+};
83402+
83403+struct subject_map {
83404+ struct acl_subject_label *user;
83405+ struct acl_subject_label *kernel;
83406+ struct subject_map *prev;
83407+ struct subject_map *next;
83408+};
83409+
83410+struct acl_subj_map_db {
83411+ struct subject_map **s_hash;
83412+ __u32 s_size;
83413+};
83414+
83415+struct gr_policy_state {
83416+ struct sprole_pw **acl_special_roles;
83417+ __u16 num_sprole_pws;
83418+ struct acl_role_label *kernel_role;
83419+ struct acl_role_label *role_list;
83420+ struct acl_role_label *default_role;
83421+ struct acl_role_db acl_role_set;
83422+ struct acl_subj_map_db subj_map_set;
83423+ struct name_db name_set;
83424+ struct inodev_db inodev_set;
83425+};
83426+
83427+struct gr_alloc_state {
83428+ unsigned long alloc_stack_next;
83429+ unsigned long alloc_stack_size;
83430+ void **alloc_stack;
83431+};
83432+
83433+struct gr_reload_state {
83434+ struct gr_policy_state oldpolicy;
83435+ struct gr_alloc_state oldalloc;
83436+ struct gr_policy_state newpolicy;
83437+ struct gr_alloc_state newalloc;
83438+ struct gr_policy_state *oldpolicy_ptr;
83439+ struct gr_alloc_state *oldalloc_ptr;
83440+ unsigned char oldmode;
83441+};
83442+
83443+/* End Data Structures Section */
83444+
83445+/* Hash functions generated by empirical testing by Brad Spengler
83446+ Makes good use of the low bits of the inode. Generally 0-1 times
83447+ in loop for successful match. 0-3 for unsuccessful match.
83448+ Shift/add algorithm with modulus of table size and an XOR*/
83449+
83450+static __inline__ unsigned int
83451+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
83452+{
83453+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
83454+}
83455+
83456+ static __inline__ unsigned int
83457+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
83458+{
83459+ return ((const unsigned long)userp % sz);
83460+}
83461+
83462+static __inline__ unsigned int
83463+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
83464+{
83465+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
83466+}
83467+
83468+static __inline__ unsigned int
83469+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
83470+{
83471+ return full_name_hash((const unsigned char *)name, len) % sz;
83472+}
83473+
83474+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
83475+ subj = NULL; \
83476+ iter = 0; \
83477+ while (iter < role->subj_hash_size) { \
83478+ if (subj == NULL) \
83479+ subj = role->subj_hash[iter]; \
83480+ if (subj == NULL) { \
83481+ iter++; \
83482+ continue; \
83483+ }
83484+
83485+#define FOR_EACH_SUBJECT_END(subj,iter) \
83486+ subj = subj->next; \
83487+ if (subj == NULL) \
83488+ iter++; \
83489+ }
83490+
83491+
83492+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
83493+ subj = role->hash->first; \
83494+ while (subj != NULL) {
83495+
83496+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
83497+ subj = subj->next; \
83498+ }
83499+
83500+#endif
83501+
83502diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
83503new file mode 100644
83504index 0000000..33ebd1f
83505--- /dev/null
83506+++ b/include/linux/gracl_compat.h
83507@@ -0,0 +1,156 @@
83508+#ifndef GR_ACL_COMPAT_H
83509+#define GR_ACL_COMPAT_H
83510+
83511+#include <linux/resource.h>
83512+#include <asm/resource.h>
83513+
83514+struct sprole_pw_compat {
83515+ compat_uptr_t rolename;
83516+ unsigned char salt[GR_SALT_LEN];
83517+ unsigned char sum[GR_SHA_LEN];
83518+};
83519+
83520+struct gr_hash_struct_compat {
83521+ compat_uptr_t table;
83522+ compat_uptr_t nametable;
83523+ compat_uptr_t first;
83524+ __u32 table_size;
83525+ __u32 used_size;
83526+ int type;
83527+};
83528+
83529+struct acl_subject_label_compat {
83530+ compat_uptr_t filename;
83531+ compat_ino_t inode;
83532+ __u32 device;
83533+ __u32 mode;
83534+ kernel_cap_t cap_mask;
83535+ kernel_cap_t cap_lower;
83536+ kernel_cap_t cap_invert_audit;
83537+
83538+ struct compat_rlimit res[GR_NLIMITS];
83539+ __u32 resmask;
83540+
83541+ __u8 user_trans_type;
83542+ __u8 group_trans_type;
83543+ compat_uptr_t user_transitions;
83544+ compat_uptr_t group_transitions;
83545+ __u16 user_trans_num;
83546+ __u16 group_trans_num;
83547+
83548+ __u32 sock_families[2];
83549+ __u32 ip_proto[8];
83550+ __u32 ip_type;
83551+ compat_uptr_t ips;
83552+ __u32 ip_num;
83553+ __u32 inaddr_any_override;
83554+
83555+ __u32 crashes;
83556+ compat_ulong_t expires;
83557+
83558+ compat_uptr_t parent_subject;
83559+ compat_uptr_t hash;
83560+ compat_uptr_t prev;
83561+ compat_uptr_t next;
83562+
83563+ compat_uptr_t obj_hash;
83564+ __u32 obj_hash_size;
83565+ __u16 pax_flags;
83566+};
83567+
83568+struct role_allowed_ip_compat {
83569+ __u32 addr;
83570+ __u32 netmask;
83571+
83572+ compat_uptr_t prev;
83573+ compat_uptr_t next;
83574+};
83575+
83576+struct role_transition_compat {
83577+ compat_uptr_t rolename;
83578+
83579+ compat_uptr_t prev;
83580+ compat_uptr_t next;
83581+};
83582+
83583+struct acl_role_label_compat {
83584+ compat_uptr_t rolename;
83585+ uid_t uidgid;
83586+ __u16 roletype;
83587+
83588+ __u16 auth_attempts;
83589+ compat_ulong_t expires;
83590+
83591+ compat_uptr_t root_label;
83592+ compat_uptr_t hash;
83593+
83594+ compat_uptr_t prev;
83595+ compat_uptr_t next;
83596+
83597+ compat_uptr_t transitions;
83598+ compat_uptr_t allowed_ips;
83599+ compat_uptr_t domain_children;
83600+ __u16 domain_child_num;
83601+
83602+ umode_t umask;
83603+
83604+ compat_uptr_t subj_hash;
83605+ __u32 subj_hash_size;
83606+};
83607+
83608+struct user_acl_role_db_compat {
83609+ compat_uptr_t r_table;
83610+ __u32 num_pointers;
83611+ __u32 num_roles;
83612+ __u32 num_domain_children;
83613+ __u32 num_subjects;
83614+ __u32 num_objects;
83615+};
83616+
83617+struct acl_object_label_compat {
83618+ compat_uptr_t filename;
83619+ compat_ino_t inode;
83620+ __u32 device;
83621+ __u32 mode;
83622+
83623+ compat_uptr_t nested;
83624+ compat_uptr_t globbed;
83625+
83626+ compat_uptr_t prev;
83627+ compat_uptr_t next;
83628+};
83629+
83630+struct acl_ip_label_compat {
83631+ compat_uptr_t iface;
83632+ __u32 addr;
83633+ __u32 netmask;
83634+ __u16 low, high;
83635+ __u8 mode;
83636+ __u32 type;
83637+ __u32 proto[8];
83638+
83639+ compat_uptr_t prev;
83640+ compat_uptr_t next;
83641+};
83642+
83643+struct gr_arg_compat {
83644+ struct user_acl_role_db_compat role_db;
83645+ unsigned char pw[GR_PW_LEN];
83646+ unsigned char salt[GR_SALT_LEN];
83647+ unsigned char sum[GR_SHA_LEN];
83648+ unsigned char sp_role[GR_SPROLE_LEN];
83649+ compat_uptr_t sprole_pws;
83650+ __u32 segv_device;
83651+ compat_ino_t segv_inode;
83652+ uid_t segv_uid;
83653+ __u16 num_sprole_pws;
83654+ __u16 mode;
83655+};
83656+
83657+struct gr_arg_wrapper_compat {
83658+ compat_uptr_t arg;
83659+ __u32 version;
83660+ __u32 size;
83661+};
83662+
83663+#endif
83664diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
83665new file mode 100644
83666index 0000000..323ecf2
83667--- /dev/null
83668+++ b/include/linux/gralloc.h
83669@@ -0,0 +1,9 @@
83670+#ifndef __GRALLOC_H
83671+#define __GRALLOC_H
83672+
83673+void acl_free_all(void);
83674+int acl_alloc_stack_init(unsigned long size);
83675+void *acl_alloc(unsigned long len);
83676+void *acl_alloc_num(unsigned long num, unsigned long len);
83677+
83678+#endif
83679diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
83680new file mode 100644
83681index 0000000..be66033
83682--- /dev/null
83683+++ b/include/linux/grdefs.h
83684@@ -0,0 +1,140 @@
83685+#ifndef GRDEFS_H
83686+#define GRDEFS_H
83687+
83688+/* Begin grsecurity status declarations */
83689+
83690+enum {
83691+ GR_READY = 0x01,
83692+ GR_STATUS_INIT = 0x00 // disabled state
83693+};
83694+
83695+/* Begin ACL declarations */
83696+
83697+/* Role flags */
83698+
83699+enum {
83700+ GR_ROLE_USER = 0x0001,
83701+ GR_ROLE_GROUP = 0x0002,
83702+ GR_ROLE_DEFAULT = 0x0004,
83703+ GR_ROLE_SPECIAL = 0x0008,
83704+ GR_ROLE_AUTH = 0x0010,
83705+ GR_ROLE_NOPW = 0x0020,
83706+ GR_ROLE_GOD = 0x0040,
83707+ GR_ROLE_LEARN = 0x0080,
83708+ GR_ROLE_TPE = 0x0100,
83709+ GR_ROLE_DOMAIN = 0x0200,
83710+ GR_ROLE_PAM = 0x0400,
83711+ GR_ROLE_PERSIST = 0x0800
83712+};
83713+
83714+/* ACL Subject and Object mode flags */
83715+enum {
83716+ GR_DELETED = 0x80000000
83717+};
83718+
83719+/* ACL Object-only mode flags */
83720+enum {
83721+ GR_READ = 0x00000001,
83722+ GR_APPEND = 0x00000002,
83723+ GR_WRITE = 0x00000004,
83724+ GR_EXEC = 0x00000008,
83725+ GR_FIND = 0x00000010,
83726+ GR_INHERIT = 0x00000020,
83727+ GR_SETID = 0x00000040,
83728+ GR_CREATE = 0x00000080,
83729+ GR_DELETE = 0x00000100,
83730+ GR_LINK = 0x00000200,
83731+ GR_AUDIT_READ = 0x00000400,
83732+ GR_AUDIT_APPEND = 0x00000800,
83733+ GR_AUDIT_WRITE = 0x00001000,
83734+ GR_AUDIT_EXEC = 0x00002000,
83735+ GR_AUDIT_FIND = 0x00004000,
83736+ GR_AUDIT_INHERIT= 0x00008000,
83737+ GR_AUDIT_SETID = 0x00010000,
83738+ GR_AUDIT_CREATE = 0x00020000,
83739+ GR_AUDIT_DELETE = 0x00040000,
83740+ GR_AUDIT_LINK = 0x00080000,
83741+ GR_PTRACERD = 0x00100000,
83742+ GR_NOPTRACE = 0x00200000,
83743+ GR_SUPPRESS = 0x00400000,
83744+ GR_NOLEARN = 0x00800000,
83745+ GR_INIT_TRANSFER= 0x01000000
83746+};
83747+
83748+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
83749+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
83750+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
83751+
83752+/* ACL subject-only mode flags */
83753+enum {
83754+ GR_KILL = 0x00000001,
83755+ GR_VIEW = 0x00000002,
83756+ GR_PROTECTED = 0x00000004,
83757+ GR_LEARN = 0x00000008,
83758+ GR_OVERRIDE = 0x00000010,
83759+ /* just a placeholder, this mode is only used in userspace */
83760+ GR_DUMMY = 0x00000020,
83761+ GR_PROTSHM = 0x00000040,
83762+ GR_KILLPROC = 0x00000080,
83763+ GR_KILLIPPROC = 0x00000100,
83764+ /* just a placeholder, this mode is only used in userspace */
83765+ GR_NOTROJAN = 0x00000200,
83766+ GR_PROTPROCFD = 0x00000400,
83767+ GR_PROCACCT = 0x00000800,
83768+ GR_RELAXPTRACE = 0x00001000,
83769+ //GR_NESTED = 0x00002000,
83770+ GR_INHERITLEARN = 0x00004000,
83771+ GR_PROCFIND = 0x00008000,
83772+ GR_POVERRIDE = 0x00010000,
83773+ GR_KERNELAUTH = 0x00020000,
83774+ GR_ATSECURE = 0x00040000,
83775+ GR_SHMEXEC = 0x00080000
83776+};
83777+
83778+enum {
83779+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
83780+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
83781+ GR_PAX_ENABLE_MPROTECT = 0x0004,
83782+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
83783+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
83784+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
83785+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
83786+ GR_PAX_DISABLE_MPROTECT = 0x0400,
83787+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
83788+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
83789+};
83790+
83791+enum {
83792+ GR_ID_USER = 0x01,
83793+ GR_ID_GROUP = 0x02,
83794+};
83795+
83796+enum {
83797+ GR_ID_ALLOW = 0x01,
83798+ GR_ID_DENY = 0x02,
83799+};
83800+
83801+#define GR_CRASH_RES 31
83802+#define GR_UIDTABLE_MAX 500
83803+
83804+/* begin resource learning section */
83805+enum {
83806+ GR_RLIM_CPU_BUMP = 60,
83807+ GR_RLIM_FSIZE_BUMP = 50000,
83808+ GR_RLIM_DATA_BUMP = 10000,
83809+ GR_RLIM_STACK_BUMP = 1000,
83810+ GR_RLIM_CORE_BUMP = 10000,
83811+ GR_RLIM_RSS_BUMP = 500000,
83812+ GR_RLIM_NPROC_BUMP = 1,
83813+ GR_RLIM_NOFILE_BUMP = 5,
83814+ GR_RLIM_MEMLOCK_BUMP = 50000,
83815+ GR_RLIM_AS_BUMP = 500000,
83816+ GR_RLIM_LOCKS_BUMP = 2,
83817+ GR_RLIM_SIGPENDING_BUMP = 5,
83818+ GR_RLIM_MSGQUEUE_BUMP = 10000,
83819+ GR_RLIM_NICE_BUMP = 1,
83820+ GR_RLIM_RTPRIO_BUMP = 1,
83821+ GR_RLIM_RTTIME_BUMP = 1000000
83822+};
83823+
83824+#endif
83825diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
83826new file mode 100644
83827index 0000000..d25522e
83828--- /dev/null
83829+++ b/include/linux/grinternal.h
83830@@ -0,0 +1,229 @@
83831+#ifndef __GRINTERNAL_H
83832+#define __GRINTERNAL_H
83833+
83834+#ifdef CONFIG_GRKERNSEC
83835+
83836+#include <linux/fs.h>
83837+#include <linux/mnt_namespace.h>
83838+#include <linux/nsproxy.h>
83839+#include <linux/gracl.h>
83840+#include <linux/grdefs.h>
83841+#include <linux/grmsg.h>
83842+
83843+void gr_add_learn_entry(const char *fmt, ...)
83844+ __attribute__ ((format (printf, 1, 2)));
83845+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
83846+ const struct vfsmount *mnt);
83847+__u32 gr_check_create(const struct dentry *new_dentry,
83848+ const struct dentry *parent,
83849+ const struct vfsmount *mnt, const __u32 mode);
83850+int gr_check_protected_task(const struct task_struct *task);
83851+__u32 to_gr_audit(const __u32 reqmode);
83852+int gr_set_acls(const int type);
83853+int gr_acl_is_enabled(void);
83854+char gr_roletype_to_char(void);
83855+
83856+void gr_handle_alertkill(struct task_struct *task);
83857+char *gr_to_filename(const struct dentry *dentry,
83858+ const struct vfsmount *mnt);
83859+char *gr_to_filename1(const struct dentry *dentry,
83860+ const struct vfsmount *mnt);
83861+char *gr_to_filename2(const struct dentry *dentry,
83862+ const struct vfsmount *mnt);
83863+char *gr_to_filename3(const struct dentry *dentry,
83864+ const struct vfsmount *mnt);
83865+
83866+extern int grsec_enable_ptrace_readexec;
83867+extern int grsec_enable_harden_ptrace;
83868+extern int grsec_enable_link;
83869+extern int grsec_enable_fifo;
83870+extern int grsec_enable_execve;
83871+extern int grsec_enable_shm;
83872+extern int grsec_enable_execlog;
83873+extern int grsec_enable_signal;
83874+extern int grsec_enable_audit_ptrace;
83875+extern int grsec_enable_forkfail;
83876+extern int grsec_enable_time;
83877+extern int grsec_enable_rofs;
83878+extern int grsec_deny_new_usb;
83879+extern int grsec_enable_chroot_shmat;
83880+extern int grsec_enable_chroot_mount;
83881+extern int grsec_enable_chroot_double;
83882+extern int grsec_enable_chroot_pivot;
83883+extern int grsec_enable_chroot_chdir;
83884+extern int grsec_enable_chroot_chmod;
83885+extern int grsec_enable_chroot_mknod;
83886+extern int grsec_enable_chroot_fchdir;
83887+extern int grsec_enable_chroot_nice;
83888+extern int grsec_enable_chroot_execlog;
83889+extern int grsec_enable_chroot_caps;
83890+extern int grsec_enable_chroot_sysctl;
83891+extern int grsec_enable_chroot_unix;
83892+extern int grsec_enable_symlinkown;
83893+extern kgid_t grsec_symlinkown_gid;
83894+extern int grsec_enable_tpe;
83895+extern kgid_t grsec_tpe_gid;
83896+extern int grsec_enable_tpe_all;
83897+extern int grsec_enable_tpe_invert;
83898+extern int grsec_enable_socket_all;
83899+extern kgid_t grsec_socket_all_gid;
83900+extern int grsec_enable_socket_client;
83901+extern kgid_t grsec_socket_client_gid;
83902+extern int grsec_enable_socket_server;
83903+extern kgid_t grsec_socket_server_gid;
83904+extern kgid_t grsec_audit_gid;
83905+extern int grsec_enable_group;
83906+extern int grsec_enable_log_rwxmaps;
83907+extern int grsec_enable_mount;
83908+extern int grsec_enable_chdir;
83909+extern int grsec_resource_logging;
83910+extern int grsec_enable_blackhole;
83911+extern int grsec_lastack_retries;
83912+extern int grsec_enable_brute;
83913+extern int grsec_enable_harden_ipc;
83914+extern int grsec_lock;
83915+
83916+extern spinlock_t grsec_alert_lock;
83917+extern unsigned long grsec_alert_wtime;
83918+extern unsigned long grsec_alert_fyet;
83919+
83920+extern spinlock_t grsec_audit_lock;
83921+
83922+extern rwlock_t grsec_exec_file_lock;
83923+
83924+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
83925+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
83926+ (tsk)->exec_file->f_path.mnt) : "/")
83927+
83928+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
83929+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
83930+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
83931+
83932+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
83933+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
83934+ (tsk)->exec_file->f_path.mnt) : "/")
83935+
83936+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
83937+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
83938+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
83939+
83940+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
83941+
83942+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
83943+
83944+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
83945+{
83946+ if (file1 && file2) {
83947+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
83948+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
83949+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
83950+ return true;
83951+ }
83952+
83953+ return false;
83954+}
83955+
83956+#define GR_CHROOT_CAPS {{ \
83957+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
83958+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
83959+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
83960+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
83961+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
83962+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
83963+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
83964+
83965+#define security_learn(normal_msg,args...) \
83966+({ \
83967+ read_lock(&grsec_exec_file_lock); \
83968+ gr_add_learn_entry(normal_msg "\n", ## args); \
83969+ read_unlock(&grsec_exec_file_lock); \
83970+})
83971+
83972+enum {
83973+ GR_DO_AUDIT,
83974+ GR_DONT_AUDIT,
83975+ /* used for non-audit messages that we shouldn't kill the task on */
83976+ GR_DONT_AUDIT_GOOD
83977+};
83978+
83979+enum {
83980+ GR_TTYSNIFF,
83981+ GR_RBAC,
83982+ GR_RBAC_STR,
83983+ GR_STR_RBAC,
83984+ GR_RBAC_MODE2,
83985+ GR_RBAC_MODE3,
83986+ GR_FILENAME,
83987+ GR_SYSCTL_HIDDEN,
83988+ GR_NOARGS,
83989+ GR_ONE_INT,
83990+ GR_ONE_INT_TWO_STR,
83991+ GR_ONE_STR,
83992+ GR_STR_INT,
83993+ GR_TWO_STR_INT,
83994+ GR_TWO_INT,
83995+ GR_TWO_U64,
83996+ GR_THREE_INT,
83997+ GR_FIVE_INT_TWO_STR,
83998+ GR_TWO_STR,
83999+ GR_THREE_STR,
84000+ GR_FOUR_STR,
84001+ GR_STR_FILENAME,
84002+ GR_FILENAME_STR,
84003+ GR_FILENAME_TWO_INT,
84004+ GR_FILENAME_TWO_INT_STR,
84005+ GR_TEXTREL,
84006+ GR_PTRACE,
84007+ GR_RESOURCE,
84008+ GR_CAP,
84009+ GR_SIG,
84010+ GR_SIG2,
84011+ GR_CRASH1,
84012+ GR_CRASH2,
84013+ GR_PSACCT,
84014+ GR_RWXMAP,
84015+ GR_RWXMAPVMA
84016+};
84017+
84018+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
84019+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
84020+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
84021+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
84022+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
84023+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
84024+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
84025+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
84026+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
84027+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
84028+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
84029+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
84030+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
84031+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
84032+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
84033+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
84034+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
84035+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
84036+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
84037+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
84038+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
84039+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
84040+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
84041+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
84042+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
84043+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
84044+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
84045+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
84046+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
84047+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
84048+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
84049+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
84050+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
84051+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
84052+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
84053+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
84054+
84055+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
84056+
84057+#endif
84058+
84059+#endif
84060diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
84061new file mode 100644
84062index 0000000..b02ba9d
84063--- /dev/null
84064+++ b/include/linux/grmsg.h
84065@@ -0,0 +1,117 @@
84066+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
84067+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
84068+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
84069+#define GR_STOPMOD_MSG "denied modification of module state by "
84070+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
84071+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
84072+#define GR_IOPERM_MSG "denied use of ioperm() by "
84073+#define GR_IOPL_MSG "denied use of iopl() by "
84074+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
84075+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
84076+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
84077+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
84078+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
84079+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
84080+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
84081+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
84082+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
84083+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
84084+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
84085+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
84086+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
84087+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
84088+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
84089+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
84090+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
84091+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
84092+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
84093+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
84094+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
84095+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
84096+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
84097+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
84098+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
84099+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
84100+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
84101+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
84102+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
84103+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
84104+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
84105+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
84106+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
84107+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
84108+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
84109+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
84110+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
84111+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
84112+#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by "
84113+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
84114+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
84115+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
84116+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
84117+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
84118+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
84119+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
84120+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
84121+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
84122+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
84123+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
84124+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
84125+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
84126+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
84127+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
84128+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
84129+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
84130+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
84131+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
84132+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
84133+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
84134+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
84135+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
84136+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
84137+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
84138+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
84139+#define GR_FAILFORK_MSG "failed fork with errno %s by "
84140+#define GR_NICE_CHROOT_MSG "denied priority change by "
84141+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
84142+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
84143+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
84144+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
84145+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
84146+#define GR_TIME_MSG "time set by "
84147+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
84148+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
84149+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
84150+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
84151+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
84152+#define GR_BIND_MSG "denied bind() by "
84153+#define GR_CONNECT_MSG "denied connect() by "
84154+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
84155+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
84156+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
84157+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
84158+#define GR_CAP_ACL_MSG "use of %s denied for "
84159+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
84160+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
84161+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
84162+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
84163+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
84164+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
84165+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
84166+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
84167+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
84168+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
84169+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
84170+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
84171+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
84172+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
84173+#define GR_VM86_MSG "denied use of vm86 by "
84174+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
84175+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
84176+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
84177+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
84178+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
84179+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
84180+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
84181+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
84182+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
84183diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
84184new file mode 100644
84185index 0000000..10b9635
84186--- /dev/null
84187+++ b/include/linux/grsecurity.h
84188@@ -0,0 +1,254 @@
84189+#ifndef GR_SECURITY_H
84190+#define GR_SECURITY_H
84191+#include <linux/fs.h>
84192+#include <linux/fs_struct.h>
84193+#include <linux/binfmts.h>
84194+#include <linux/gracl.h>
84195+
84196+/* notify of brain-dead configs */
84197+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
84198+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
84199+#endif
84200+#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
84201+#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled"
84202+#endif
84203+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
84204+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
84205+#endif
84206+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
84207+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
84208+#endif
84209+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
84210+#error "CONFIG_PAX enabled, but no PaX options are enabled."
84211+#endif
84212+
84213+int gr_handle_new_usb(void);
84214+
84215+void gr_handle_brute_attach(int dumpable);
84216+void gr_handle_brute_check(void);
84217+void gr_handle_kernel_exploit(void);
84218+
84219+char gr_roletype_to_char(void);
84220+
84221+int gr_proc_is_restricted(void);
84222+
84223+int gr_acl_enable_at_secure(void);
84224+
84225+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
84226+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
84227+
84228+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap);
84229+
84230+void gr_del_task_from_ip_table(struct task_struct *p);
84231+
84232+int gr_pid_is_chrooted(struct task_struct *p);
84233+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
84234+int gr_handle_chroot_nice(void);
84235+int gr_handle_chroot_sysctl(const int op);
84236+int gr_handle_chroot_setpriority(struct task_struct *p,
84237+ const int niceval);
84238+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
84239+int gr_chroot_fhandle(void);
84240+int gr_handle_chroot_chroot(const struct dentry *dentry,
84241+ const struct vfsmount *mnt);
84242+void gr_handle_chroot_chdir(const struct path *path);
84243+int gr_handle_chroot_chmod(const struct dentry *dentry,
84244+ const struct vfsmount *mnt, const int mode);
84245+int gr_handle_chroot_mknod(const struct dentry *dentry,
84246+ const struct vfsmount *mnt, const int mode);
84247+int gr_handle_chroot_mount(const struct dentry *dentry,
84248+ const struct vfsmount *mnt,
84249+ const char *dev_name);
84250+int gr_handle_chroot_pivot(void);
84251+int gr_handle_chroot_unix(const pid_t pid);
84252+
84253+int gr_handle_rawio(const struct inode *inode);
84254+
84255+void gr_handle_ioperm(void);
84256+void gr_handle_iopl(void);
84257+void gr_handle_msr_write(void);
84258+
84259+umode_t gr_acl_umask(void);
84260+
84261+int gr_tpe_allow(const struct file *file);
84262+
84263+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
84264+void gr_clear_chroot_entries(struct task_struct *task);
84265+
84266+void gr_log_forkfail(const int retval);
84267+void gr_log_timechange(void);
84268+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
84269+void gr_log_chdir(const struct dentry *dentry,
84270+ const struct vfsmount *mnt);
84271+void gr_log_chroot_exec(const struct dentry *dentry,
84272+ const struct vfsmount *mnt);
84273+void gr_log_remount(const char *devname, const int retval);
84274+void gr_log_unmount(const char *devname, const int retval);
84275+void gr_log_mount(const char *from, const char *to, const int retval);
84276+void gr_log_textrel(struct vm_area_struct *vma);
84277+void gr_log_ptgnustack(struct file *file);
84278+void gr_log_rwxmmap(struct file *file);
84279+void gr_log_rwxmprotect(struct vm_area_struct *vma);
84280+
84281+int gr_handle_follow_link(const struct inode *parent,
84282+ const struct inode *inode,
84283+ const struct dentry *dentry,
84284+ const struct vfsmount *mnt);
84285+int gr_handle_fifo(const struct dentry *dentry,
84286+ const struct vfsmount *mnt,
84287+ const struct dentry *dir, const int flag,
84288+ const int acc_mode);
84289+int gr_handle_hardlink(const struct dentry *dentry,
84290+ const struct vfsmount *mnt,
84291+ struct inode *inode,
84292+ const int mode, const struct filename *to);
84293+
84294+int gr_is_capable(const int cap);
84295+int gr_is_capable_nolog(const int cap);
84296+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
84297+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
84298+
84299+void gr_copy_label(struct task_struct *tsk);
84300+void gr_handle_crash(struct task_struct *task, const int sig);
84301+int gr_handle_signal(const struct task_struct *p, const int sig);
84302+int gr_check_crash_uid(const kuid_t uid);
84303+int gr_check_protected_task(const struct task_struct *task);
84304+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
84305+int gr_acl_handle_mmap(const struct file *file,
84306+ const unsigned long prot);
84307+int gr_acl_handle_mprotect(const struct file *file,
84308+ const unsigned long prot);
84309+int gr_check_hidden_task(const struct task_struct *tsk);
84310+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
84311+ const struct vfsmount *mnt);
84312+__u32 gr_acl_handle_utime(const struct dentry *dentry,
84313+ const struct vfsmount *mnt);
84314+__u32 gr_acl_handle_access(const struct dentry *dentry,
84315+ const struct vfsmount *mnt, const int fmode);
84316+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
84317+ const struct vfsmount *mnt, umode_t *mode);
84318+__u32 gr_acl_handle_chown(const struct dentry *dentry,
84319+ const struct vfsmount *mnt);
84320+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
84321+ const struct vfsmount *mnt);
84322+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
84323+ const struct vfsmount *mnt);
84324+int gr_handle_ptrace(struct task_struct *task, const long request);
84325+int gr_handle_proc_ptrace(struct task_struct *task);
84326+__u32 gr_acl_handle_execve(const struct dentry *dentry,
84327+ const struct vfsmount *mnt);
84328+int gr_check_crash_exec(const struct file *filp);
84329+int gr_acl_is_enabled(void);
84330+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
84331+ const kgid_t gid);
84332+int gr_set_proc_label(const struct dentry *dentry,
84333+ const struct vfsmount *mnt,
84334+ const int unsafe_flags);
84335+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
84336+ const struct vfsmount *mnt);
84337+__u32 gr_acl_handle_open(const struct dentry *dentry,
84338+ const struct vfsmount *mnt, int acc_mode);
84339+__u32 gr_acl_handle_creat(const struct dentry *dentry,
84340+ const struct dentry *p_dentry,
84341+ const struct vfsmount *p_mnt,
84342+ int open_flags, int acc_mode, const int imode);
84343+void gr_handle_create(const struct dentry *dentry,
84344+ const struct vfsmount *mnt);
84345+void gr_handle_proc_create(const struct dentry *dentry,
84346+ const struct inode *inode);
84347+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
84348+ const struct dentry *parent_dentry,
84349+ const struct vfsmount *parent_mnt,
84350+ const int mode);
84351+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
84352+ const struct dentry *parent_dentry,
84353+ const struct vfsmount *parent_mnt);
84354+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
84355+ const struct vfsmount *mnt);
84356+void gr_handle_delete(const ino_t ino, const dev_t dev);
84357+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
84358+ const struct vfsmount *mnt);
84359+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
84360+ const struct dentry *parent_dentry,
84361+ const struct vfsmount *parent_mnt,
84362+ const struct filename *from);
84363+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
84364+ const struct dentry *parent_dentry,
84365+ const struct vfsmount *parent_mnt,
84366+ const struct dentry *old_dentry,
84367+ const struct vfsmount *old_mnt, const struct filename *to);
84368+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
84369+int gr_acl_handle_rename(struct dentry *new_dentry,
84370+ struct dentry *parent_dentry,
84371+ const struct vfsmount *parent_mnt,
84372+ struct dentry *old_dentry,
84373+ struct inode *old_parent_inode,
84374+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags);
84375+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
84376+ struct dentry *old_dentry,
84377+ struct dentry *new_dentry,
84378+ struct vfsmount *mnt, const __u8 replace, unsigned int flags);
84379+__u32 gr_check_link(const struct dentry *new_dentry,
84380+ const struct dentry *parent_dentry,
84381+ const struct vfsmount *parent_mnt,
84382+ const struct dentry *old_dentry,
84383+ const struct vfsmount *old_mnt);
84384+int gr_acl_handle_filldir(const struct file *file, const char *name,
84385+ const unsigned int namelen, const ino_t ino);
84386+
84387+__u32 gr_acl_handle_unix(const struct dentry *dentry,
84388+ const struct vfsmount *mnt);
84389+void gr_acl_handle_exit(void);
84390+void gr_acl_handle_psacct(struct task_struct *task, const long code);
84391+int gr_acl_handle_procpidmem(const struct task_struct *task);
84392+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
84393+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
84394+void gr_audit_ptrace(struct task_struct *task);
84395+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
84396+void gr_put_exec_file(struct task_struct *task);
84397+
84398+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
84399+
84400+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
84401+extern void gr_learn_resource(const struct task_struct *task, const int res,
84402+ const unsigned long wanted, const int gt);
84403+#else
84404+static inline void gr_learn_resource(const struct task_struct *task, const int res,
84405+ const unsigned long wanted, const int gt)
84406+{
84407+}
84408+#endif
84409+
84410+#ifdef CONFIG_GRKERNSEC_RESLOG
84411+extern void gr_log_resource(const struct task_struct *task, const int res,
84412+ const unsigned long wanted, const int gt);
84413+#else
84414+static inline void gr_log_resource(const struct task_struct *task, const int res,
84415+ const unsigned long wanted, const int gt)
84416+{
84417+}
84418+#endif
84419+
84420+#ifdef CONFIG_GRKERNSEC
84421+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
84422+void gr_handle_vm86(void);
84423+void gr_handle_mem_readwrite(u64 from, u64 to);
84424+
84425+void gr_log_badprocpid(const char *entry);
84426+
84427+extern int grsec_enable_dmesg;
84428+extern int grsec_disable_privio;
84429+
84430+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
84431+extern kgid_t grsec_proc_gid;
84432+#endif
84433+
84434+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
84435+extern int grsec_enable_chroot_findtask;
84436+#endif
84437+#ifdef CONFIG_GRKERNSEC_SETXID
84438+extern int grsec_enable_setxid;
84439+#endif
84440+#endif
84441+
84442+#endif
84443diff --git a/include/linux/grsock.h b/include/linux/grsock.h
84444new file mode 100644
84445index 0000000..e7ffaaf
84446--- /dev/null
84447+++ b/include/linux/grsock.h
84448@@ -0,0 +1,19 @@
84449+#ifndef __GRSOCK_H
84450+#define __GRSOCK_H
84451+
84452+extern void gr_attach_curr_ip(const struct sock *sk);
84453+extern int gr_handle_sock_all(const int family, const int type,
84454+ const int protocol);
84455+extern int gr_handle_sock_server(const struct sockaddr *sck);
84456+extern int gr_handle_sock_server_other(const struct sock *sck);
84457+extern int gr_handle_sock_client(const struct sockaddr *sck);
84458+extern int gr_search_connect(struct socket * sock,
84459+ struct sockaddr_in * addr);
84460+extern int gr_search_bind(struct socket * sock,
84461+ struct sockaddr_in * addr);
84462+extern int gr_search_listen(struct socket * sock);
84463+extern int gr_search_accept(struct socket * sock);
84464+extern int gr_search_socket(const int domain, const int type,
84465+ const int protocol);
84466+
84467+#endif
84468diff --git a/include/linux/hash.h b/include/linux/hash.h
84469index bd1754c..8240892 100644
84470--- a/include/linux/hash.h
84471+++ b/include/linux/hash.h
84472@@ -83,7 +83,7 @@ static inline u32 hash32_ptr(const void *ptr)
84473 struct fast_hash_ops {
84474 u32 (*hash)(const void *data, u32 len, u32 seed);
84475 u32 (*hash2)(const u32 *data, u32 len, u32 seed);
84476-};
84477+} __no_const;
84478
84479 /**
84480 * arch_fast_hash - Caclulates a hash over a given buffer that can have
84481diff --git a/include/linux/highmem.h b/include/linux/highmem.h
84482index 7fb31da..08b5114 100644
84483--- a/include/linux/highmem.h
84484+++ b/include/linux/highmem.h
84485@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
84486 kunmap_atomic(kaddr);
84487 }
84488
84489+static inline void sanitize_highpage(struct page *page)
84490+{
84491+ void *kaddr;
84492+ unsigned long flags;
84493+
84494+ local_irq_save(flags);
84495+ kaddr = kmap_atomic(page);
84496+ clear_page(kaddr);
84497+ kunmap_atomic(kaddr);
84498+ local_irq_restore(flags);
84499+}
84500+
84501 static inline void zero_user_segments(struct page *page,
84502 unsigned start1, unsigned end1,
84503 unsigned start2, unsigned end2)
84504diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
84505index 1c7b89a..7dda400 100644
84506--- a/include/linux/hwmon-sysfs.h
84507+++ b/include/linux/hwmon-sysfs.h
84508@@ -25,7 +25,8 @@
84509 struct sensor_device_attribute{
84510 struct device_attribute dev_attr;
84511 int index;
84512-};
84513+} __do_const;
84514+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
84515 #define to_sensor_dev_attr(_dev_attr) \
84516 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
84517
84518@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
84519 struct device_attribute dev_attr;
84520 u8 index;
84521 u8 nr;
84522-};
84523+} __do_const;
84524+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
84525 #define to_sensor_dev_attr_2(_dev_attr) \
84526 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
84527
84528diff --git a/include/linux/i2c.h b/include/linux/i2c.h
84529index b556e0a..c10a515 100644
84530--- a/include/linux/i2c.h
84531+++ b/include/linux/i2c.h
84532@@ -378,6 +378,7 @@ struct i2c_algorithm {
84533 /* To determine what the adapter supports */
84534 u32 (*functionality) (struct i2c_adapter *);
84535 };
84536+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
84537
84538 /**
84539 * struct i2c_bus_recovery_info - I2C bus recovery information
84540diff --git a/include/linux/i2o.h b/include/linux/i2o.h
84541index d23c3c2..eb63c81 100644
84542--- a/include/linux/i2o.h
84543+++ b/include/linux/i2o.h
84544@@ -565,7 +565,7 @@ struct i2o_controller {
84545 struct i2o_device *exec; /* Executive */
84546 #if BITS_PER_LONG == 64
84547 spinlock_t context_list_lock; /* lock for context_list */
84548- atomic_t context_list_counter; /* needed for unique contexts */
84549+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
84550 struct list_head context_list; /* list of context id's
84551 and pointers */
84552 #endif
84553diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
84554index aff7ad8..3942bbd 100644
84555--- a/include/linux/if_pppox.h
84556+++ b/include/linux/if_pppox.h
84557@@ -76,7 +76,7 @@ struct pppox_proto {
84558 int (*ioctl)(struct socket *sock, unsigned int cmd,
84559 unsigned long arg);
84560 struct module *owner;
84561-};
84562+} __do_const;
84563
84564 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
84565 extern void unregister_pppox_proto(int proto_num);
84566diff --git a/include/linux/init.h b/include/linux/init.h
84567index 2df8e8d..3e1280d 100644
84568--- a/include/linux/init.h
84569+++ b/include/linux/init.h
84570@@ -37,9 +37,17 @@
84571 * section.
84572 */
84573
84574+#define add_init_latent_entropy __latent_entropy
84575+
84576+#ifdef CONFIG_MEMORY_HOTPLUG
84577+#define add_meminit_latent_entropy
84578+#else
84579+#define add_meminit_latent_entropy __latent_entropy
84580+#endif
84581+
84582 /* These are for everybody (although not all archs will actually
84583 discard it in modules) */
84584-#define __init __section(.init.text) __cold notrace
84585+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
84586 #define __initdata __section(.init.data)
84587 #define __initconst __constsection(.init.rodata)
84588 #define __exitdata __section(.exit.data)
84589@@ -100,7 +108,7 @@
84590 #define __cpuexitconst
84591
84592 /* Used for MEMORY_HOTPLUG */
84593-#define __meminit __section(.meminit.text) __cold notrace
84594+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
84595 #define __meminitdata __section(.meminit.data)
84596 #define __meminitconst __constsection(.meminit.rodata)
84597 #define __memexit __section(.memexit.text) __exitused __cold notrace
84598diff --git a/include/linux/init_task.h b/include/linux/init_task.h
84599index 6df7f9f..d0bf699 100644
84600--- a/include/linux/init_task.h
84601+++ b/include/linux/init_task.h
84602@@ -156,6 +156,12 @@ extern struct task_group root_task_group;
84603
84604 #define INIT_TASK_COMM "swapper"
84605
84606+#ifdef CONFIG_X86
84607+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
84608+#else
84609+#define INIT_TASK_THREAD_INFO
84610+#endif
84611+
84612 #ifdef CONFIG_RT_MUTEXES
84613 # define INIT_RT_MUTEXES(tsk) \
84614 .pi_waiters = RB_ROOT, \
84615@@ -203,6 +209,7 @@ extern struct task_group root_task_group;
84616 RCU_POINTER_INITIALIZER(cred, &init_cred), \
84617 .comm = INIT_TASK_COMM, \
84618 .thread = INIT_THREAD, \
84619+ INIT_TASK_THREAD_INFO \
84620 .fs = &init_fs, \
84621 .files = &init_files, \
84622 .signal = &init_signals, \
84623diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
84624index 698ad05..8601bb7 100644
84625--- a/include/linux/interrupt.h
84626+++ b/include/linux/interrupt.h
84627@@ -418,8 +418,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
84628
84629 struct softirq_action
84630 {
84631- void (*action)(struct softirq_action *);
84632-};
84633+ void (*action)(void);
84634+} __no_const;
84635
84636 asmlinkage void do_softirq(void);
84637 asmlinkage void __do_softirq(void);
84638@@ -433,7 +433,7 @@ static inline void do_softirq_own_stack(void)
84639 }
84640 #endif
84641
84642-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
84643+extern void open_softirq(int nr, void (*action)(void));
84644 extern void softirq_init(void);
84645 extern void __raise_softirq_irqoff(unsigned int nr);
84646
84647diff --git a/include/linux/iommu.h b/include/linux/iommu.h
84648index b96a5b2..2732d1c 100644
84649--- a/include/linux/iommu.h
84650+++ b/include/linux/iommu.h
84651@@ -131,7 +131,7 @@ struct iommu_ops {
84652 u32 (*domain_get_windows)(struct iommu_domain *domain);
84653
84654 unsigned long pgsize_bitmap;
84655-};
84656+} __do_const;
84657
84658 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
84659 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
84660diff --git a/include/linux/ioport.h b/include/linux/ioport.h
84661index 5e3a906..3131d0f 100644
84662--- a/include/linux/ioport.h
84663+++ b/include/linux/ioport.h
84664@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
84665 int adjust_resource(struct resource *res, resource_size_t start,
84666 resource_size_t size);
84667 resource_size_t resource_alignment(struct resource *res);
84668-static inline resource_size_t resource_size(const struct resource *res)
84669+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
84670 {
84671 return res->end - res->start + 1;
84672 }
84673diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
84674index 35e7eca..6afb7ad 100644
84675--- a/include/linux/ipc_namespace.h
84676+++ b/include/linux/ipc_namespace.h
84677@@ -69,7 +69,7 @@ struct ipc_namespace {
84678 struct user_namespace *user_ns;
84679
84680 unsigned int proc_inum;
84681-};
84682+} __randomize_layout;
84683
84684 extern struct ipc_namespace init_ipc_ns;
84685 extern atomic_t nr_ipc_ns;
84686diff --git a/include/linux/irq.h b/include/linux/irq.h
84687index 0d998d8..3a1c782 100644
84688--- a/include/linux/irq.h
84689+++ b/include/linux/irq.h
84690@@ -344,7 +344,8 @@ struct irq_chip {
84691 void (*irq_release_resources)(struct irq_data *data);
84692
84693 unsigned long flags;
84694-};
84695+} __do_const;
84696+typedef struct irq_chip __no_const irq_chip_no_const;
84697
84698 /*
84699 * irq_chip specific flags
84700diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
84701index 45e2d8c..26d85da 100644
84702--- a/include/linux/irqchip/arm-gic.h
84703+++ b/include/linux/irqchip/arm-gic.h
84704@@ -75,9 +75,11 @@
84705
84706 #ifndef __ASSEMBLY__
84707
84708+#include <linux/irq.h>
84709+
84710 struct device_node;
84711
84712-extern struct irq_chip gic_arch_extn;
84713+extern irq_chip_no_const gic_arch_extn;
84714
84715 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
84716 u32 offset, struct device_node *);
84717diff --git a/include/linux/isdn_ppp.h b/include/linux/isdn_ppp.h
84718index 8e10f57..d5f62bc 100644
84719--- a/include/linux/isdn_ppp.h
84720+++ b/include/linux/isdn_ppp.h
84721@@ -180,8 +180,9 @@ struct ippp_struct {
84722 struct slcompress *slcomp;
84723 #endif
84724 #ifdef CONFIG_IPPP_FILTER
84725- struct sk_filter *pass_filter; /* filter for packets to pass */
84726- struct sk_filter *active_filter; /* filter for pkts to reset idle */
84727+ struct sock_filter *pass_filter; /* filter for packets to pass */
84728+ struct sock_filter *active_filter; /* filter for pkts to reset idle */
84729+ unsigned pass_len, active_len;
84730 #endif
84731 unsigned long debug;
84732 struct isdn_ppp_compressor *compressor,*decompressor;
84733diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
84734index 1f44466..b481806 100644
84735--- a/include/linux/jiffies.h
84736+++ b/include/linux/jiffies.h
84737@@ -292,20 +292,20 @@ extern unsigned long preset_lpj;
84738 /*
84739 * Convert various time units to each other:
84740 */
84741-extern unsigned int jiffies_to_msecs(const unsigned long j);
84742-extern unsigned int jiffies_to_usecs(const unsigned long j);
84743+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
84744+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
84745
84746-static inline u64 jiffies_to_nsecs(const unsigned long j)
84747+static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
84748 {
84749 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
84750 }
84751
84752-extern unsigned long msecs_to_jiffies(const unsigned int m);
84753-extern unsigned long usecs_to_jiffies(const unsigned int u);
84754+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
84755+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
84756 extern unsigned long timespec_to_jiffies(const struct timespec *value);
84757 extern void jiffies_to_timespec(const unsigned long jiffies,
84758- struct timespec *value);
84759-extern unsigned long timeval_to_jiffies(const struct timeval *value);
84760+ struct timespec *value) __intentional_overflow(-1);
84761+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
84762 extern void jiffies_to_timeval(const unsigned long jiffies,
84763 struct timeval *value);
84764
84765diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
84766index 6883e19..e854fcb 100644
84767--- a/include/linux/kallsyms.h
84768+++ b/include/linux/kallsyms.h
84769@@ -15,7 +15,8 @@
84770
84771 struct module;
84772
84773-#ifdef CONFIG_KALLSYMS
84774+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
84775+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
84776 /* Lookup the address for a symbol. Returns 0 if not found. */
84777 unsigned long kallsyms_lookup_name(const char *name);
84778
84779@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
84780 /* Stupid that this does nothing, but I didn't create this mess. */
84781 #define __print_symbol(fmt, addr)
84782 #endif /*CONFIG_KALLSYMS*/
84783+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
84784+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
84785+extern unsigned long kallsyms_lookup_name(const char *name);
84786+extern void __print_symbol(const char *fmt, unsigned long address);
84787+extern int sprint_backtrace(char *buffer, unsigned long address);
84788+extern int sprint_symbol(char *buffer, unsigned long address);
84789+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
84790+const char *kallsyms_lookup(unsigned long addr,
84791+ unsigned long *symbolsize,
84792+ unsigned long *offset,
84793+ char **modname, char *namebuf);
84794+extern int kallsyms_lookup_size_offset(unsigned long addr,
84795+ unsigned long *symbolsize,
84796+ unsigned long *offset);
84797+#endif
84798
84799 /* This macro allows us to keep printk typechecking */
84800 static __printf(1, 2)
84801diff --git a/include/linux/key-type.h b/include/linux/key-type.h
84802index a74c3a8..28d3f21 100644
84803--- a/include/linux/key-type.h
84804+++ b/include/linux/key-type.h
84805@@ -131,7 +131,7 @@ struct key_type {
84806 /* internal fields */
84807 struct list_head link; /* link in types list */
84808 struct lock_class_key lock_class; /* key->sem lock class */
84809-};
84810+} __do_const;
84811
84812 extern struct key_type key_type_keyring;
84813
84814diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
84815index 6b06d37..c134867 100644
84816--- a/include/linux/kgdb.h
84817+++ b/include/linux/kgdb.h
84818@@ -52,7 +52,7 @@ extern int kgdb_connected;
84819 extern int kgdb_io_module_registered;
84820
84821 extern atomic_t kgdb_setting_breakpoint;
84822-extern atomic_t kgdb_cpu_doing_single_step;
84823+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
84824
84825 extern struct task_struct *kgdb_usethread;
84826 extern struct task_struct *kgdb_contthread;
84827@@ -254,7 +254,7 @@ struct kgdb_arch {
84828 void (*correct_hw_break)(void);
84829
84830 void (*enable_nmi)(bool on);
84831-};
84832+} __do_const;
84833
84834 /**
84835 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
84836@@ -279,7 +279,7 @@ struct kgdb_io {
84837 void (*pre_exception) (void);
84838 void (*post_exception) (void);
84839 int is_console;
84840-};
84841+} __do_const;
84842
84843 extern struct kgdb_arch arch_kgdb_ops;
84844
84845diff --git a/include/linux/kmod.h b/include/linux/kmod.h
84846index 0555cc6..40116ce 100644
84847--- a/include/linux/kmod.h
84848+++ b/include/linux/kmod.h
84849@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
84850 * usually useless though. */
84851 extern __printf(2, 3)
84852 int __request_module(bool wait, const char *name, ...);
84853+extern __printf(3, 4)
84854+int ___request_module(bool wait, char *param_name, const char *name, ...);
84855 #define request_module(mod...) __request_module(true, mod)
84856 #define request_module_nowait(mod...) __request_module(false, mod)
84857 #define try_then_request_module(x, mod...) \
84858@@ -57,6 +59,9 @@ struct subprocess_info {
84859 struct work_struct work;
84860 struct completion *complete;
84861 char *path;
84862+#ifdef CONFIG_GRKERNSEC
84863+ char *origpath;
84864+#endif
84865 char **argv;
84866 char **envp;
84867 int wait;
84868diff --git a/include/linux/kobject.h b/include/linux/kobject.h
84869index 2d61b90..a1d0a13 100644
84870--- a/include/linux/kobject.h
84871+++ b/include/linux/kobject.h
84872@@ -118,7 +118,7 @@ struct kobj_type {
84873 struct attribute **default_attrs;
84874 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
84875 const void *(*namespace)(struct kobject *kobj);
84876-};
84877+} __do_const;
84878
84879 struct kobj_uevent_env {
84880 char *argv[3];
84881@@ -142,6 +142,7 @@ struct kobj_attribute {
84882 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
84883 const char *buf, size_t count);
84884 };
84885+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
84886
84887 extern const struct sysfs_ops kobj_sysfs_ops;
84888
84889@@ -169,7 +170,7 @@ struct kset {
84890 spinlock_t list_lock;
84891 struct kobject kobj;
84892 const struct kset_uevent_ops *uevent_ops;
84893-};
84894+} __randomize_layout;
84895
84896 extern void kset_init(struct kset *kset);
84897 extern int __must_check kset_register(struct kset *kset);
84898diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
84899index df32d25..fb52e27 100644
84900--- a/include/linux/kobject_ns.h
84901+++ b/include/linux/kobject_ns.h
84902@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
84903 const void *(*netlink_ns)(struct sock *sk);
84904 const void *(*initial_ns)(void);
84905 void (*drop_ns)(void *);
84906-};
84907+} __do_const;
84908
84909 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
84910 int kobj_ns_type_registered(enum kobj_ns_type type);
84911diff --git a/include/linux/kref.h b/include/linux/kref.h
84912index 484604d..0f6c5b6 100644
84913--- a/include/linux/kref.h
84914+++ b/include/linux/kref.h
84915@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
84916 static inline int kref_sub(struct kref *kref, unsigned int count,
84917 void (*release)(struct kref *kref))
84918 {
84919- WARN_ON(release == NULL);
84920+ BUG_ON(release == NULL);
84921
84922 if (atomic_sub_and_test((int) count, &kref->refcount)) {
84923 release(kref);
84924diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
84925index ec4e3bd..14db03a 100644
84926--- a/include/linux/kvm_host.h
84927+++ b/include/linux/kvm_host.h
84928@@ -468,7 +468,7 @@ static inline void kvm_irqfd_exit(void)
84929 {
84930 }
84931 #endif
84932-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
84933+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
84934 struct module *module);
84935 void kvm_exit(void);
84936
84937@@ -634,7 +634,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
84938 struct kvm_guest_debug *dbg);
84939 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
84940
84941-int kvm_arch_init(void *opaque);
84942+int kvm_arch_init(const void *opaque);
84943 void kvm_arch_exit(void);
84944
84945 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
84946diff --git a/include/linux/libata.h b/include/linux/libata.h
84947index 92abb49..e7fff2a 100644
84948--- a/include/linux/libata.h
84949+++ b/include/linux/libata.h
84950@@ -976,7 +976,7 @@ struct ata_port_operations {
84951 * fields must be pointers.
84952 */
84953 const struct ata_port_operations *inherits;
84954-};
84955+} __do_const;
84956
84957 struct ata_port_info {
84958 unsigned long flags;
84959diff --git a/include/linux/linkage.h b/include/linux/linkage.h
84960index a6a42dd..6c5ebce 100644
84961--- a/include/linux/linkage.h
84962+++ b/include/linux/linkage.h
84963@@ -36,6 +36,7 @@
84964 #endif
84965
84966 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
84967+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
84968 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
84969
84970 /*
84971diff --git a/include/linux/list.h b/include/linux/list.h
84972index ef95941..82db65a 100644
84973--- a/include/linux/list.h
84974+++ b/include/linux/list.h
84975@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
84976 extern void list_del(struct list_head *entry);
84977 #endif
84978
84979+extern void __pax_list_add(struct list_head *new,
84980+ struct list_head *prev,
84981+ struct list_head *next);
84982+static inline void pax_list_add(struct list_head *new, struct list_head *head)
84983+{
84984+ __pax_list_add(new, head, head->next);
84985+}
84986+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
84987+{
84988+ __pax_list_add(new, head->prev, head);
84989+}
84990+extern void pax_list_del(struct list_head *entry);
84991+
84992 /**
84993 * list_replace - replace old entry by new one
84994 * @old : the element to be replaced
84995@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
84996 INIT_LIST_HEAD(entry);
84997 }
84998
84999+extern void pax_list_del_init(struct list_head *entry);
85000+
85001 /**
85002 * list_move - delete from one list and add as another's head
85003 * @list: the entry to move
85004diff --git a/include/linux/math64.h b/include/linux/math64.h
85005index c45c089..298841c 100644
85006--- a/include/linux/math64.h
85007+++ b/include/linux/math64.h
85008@@ -15,7 +15,7 @@
85009 * This is commonly provided by 32bit archs to provide an optimized 64bit
85010 * divide.
85011 */
85012-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
85013+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
85014 {
85015 *remainder = dividend % divisor;
85016 return dividend / divisor;
85017@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
85018 /**
85019 * div64_u64 - unsigned 64bit divide with 64bit divisor
85020 */
85021-static inline u64 div64_u64(u64 dividend, u64 divisor)
85022+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
85023 {
85024 return dividend / divisor;
85025 }
85026@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
85027 #define div64_ul(x, y) div_u64((x), (y))
85028
85029 #ifndef div_u64_rem
85030-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
85031+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
85032 {
85033 *remainder = do_div(dividend, divisor);
85034 return dividend;
85035@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
85036 #endif
85037
85038 #ifndef div64_u64
85039-extern u64 div64_u64(u64 dividend, u64 divisor);
85040+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
85041 #endif
85042
85043 #ifndef div64_s64
85044@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
85045 * divide.
85046 */
85047 #ifndef div_u64
85048-static inline u64 div_u64(u64 dividend, u32 divisor)
85049+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
85050 {
85051 u32 remainder;
85052 return div_u64_rem(dividend, divisor, &remainder);
85053diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
85054index f230a97..714c006 100644
85055--- a/include/linux/mempolicy.h
85056+++ b/include/linux/mempolicy.h
85057@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
85058 }
85059
85060 #define vma_policy(vma) ((vma)->vm_policy)
85061+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
85062+{
85063+ vma->vm_policy = pol;
85064+}
85065
85066 static inline void mpol_get(struct mempolicy *pol)
85067 {
85068@@ -228,6 +232,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
85069 }
85070
85071 #define vma_policy(vma) NULL
85072+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
85073+{
85074+}
85075
85076 static inline int
85077 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
85078diff --git a/include/linux/mm.h b/include/linux/mm.h
85079index e03dd29..eaf923c 100644
85080--- a/include/linux/mm.h
85081+++ b/include/linux/mm.h
85082@@ -127,6 +127,11 @@ extern unsigned int kobjsize(const void *objp);
85083 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
85084 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
85085 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
85086+
85087+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
85088+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
85089+#endif
85090+
85091 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
85092
85093 #ifdef CONFIG_MEM_SOFT_DIRTY
85094@@ -237,8 +242,8 @@ struct vm_operations_struct {
85095 /* called by access_process_vm when get_user_pages() fails, typically
85096 * for use by special VMAs that can switch between memory and hardware
85097 */
85098- int (*access)(struct vm_area_struct *vma, unsigned long addr,
85099- void *buf, int len, int write);
85100+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
85101+ void *buf, size_t len, int write);
85102
85103 /* Called by the /proc/PID/maps code to ask the vma whether it
85104 * has a special name. Returning non-NULL will also cause this
85105@@ -274,6 +279,7 @@ struct vm_operations_struct {
85106 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
85107 unsigned long size, pgoff_t pgoff);
85108 };
85109+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
85110
85111 struct mmu_gather;
85112 struct inode;
85113@@ -1144,8 +1150,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
85114 unsigned long *pfn);
85115 int follow_phys(struct vm_area_struct *vma, unsigned long address,
85116 unsigned int flags, unsigned long *prot, resource_size_t *phys);
85117-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
85118- void *buf, int len, int write);
85119+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
85120+ void *buf, size_t len, int write);
85121
85122 static inline void unmap_shared_mapping_range(struct address_space *mapping,
85123 loff_t const holebegin, loff_t const holelen)
85124@@ -1184,9 +1190,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
85125 }
85126 #endif
85127
85128-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
85129-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
85130- void *buf, int len, int write);
85131+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
85132+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
85133+ void *buf, size_t len, int write);
85134
85135 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
85136 unsigned long start, unsigned long nr_pages,
85137@@ -1219,34 +1225,6 @@ int set_page_dirty_lock(struct page *page);
85138 int clear_page_dirty_for_io(struct page *page);
85139 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
85140
85141-/* Is the vma a continuation of the stack vma above it? */
85142-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
85143-{
85144- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
85145-}
85146-
85147-static inline int stack_guard_page_start(struct vm_area_struct *vma,
85148- unsigned long addr)
85149-{
85150- return (vma->vm_flags & VM_GROWSDOWN) &&
85151- (vma->vm_start == addr) &&
85152- !vma_growsdown(vma->vm_prev, addr);
85153-}
85154-
85155-/* Is the vma a continuation of the stack vma below it? */
85156-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
85157-{
85158- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
85159-}
85160-
85161-static inline int stack_guard_page_end(struct vm_area_struct *vma,
85162- unsigned long addr)
85163-{
85164- return (vma->vm_flags & VM_GROWSUP) &&
85165- (vma->vm_end == addr) &&
85166- !vma_growsup(vma->vm_next, addr);
85167-}
85168-
85169 extern pid_t
85170 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
85171
85172@@ -1346,6 +1324,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
85173 }
85174 #endif
85175
85176+#ifdef CONFIG_MMU
85177+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
85178+#else
85179+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
85180+{
85181+ return __pgprot(0);
85182+}
85183+#endif
85184+
85185 int vma_wants_writenotify(struct vm_area_struct *vma);
85186
85187 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
85188@@ -1364,8 +1351,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
85189 {
85190 return 0;
85191 }
85192+
85193+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
85194+ unsigned long address)
85195+{
85196+ return 0;
85197+}
85198 #else
85199 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
85200+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
85201 #endif
85202
85203 #ifdef __PAGETABLE_PMD_FOLDED
85204@@ -1374,8 +1368,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
85205 {
85206 return 0;
85207 }
85208+
85209+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
85210+ unsigned long address)
85211+{
85212+ return 0;
85213+}
85214 #else
85215 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
85216+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
85217 #endif
85218
85219 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
85220@@ -1393,11 +1394,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
85221 NULL: pud_offset(pgd, address);
85222 }
85223
85224+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
85225+{
85226+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
85227+ NULL: pud_offset(pgd, address);
85228+}
85229+
85230 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
85231 {
85232 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
85233 NULL: pmd_offset(pud, address);
85234 }
85235+
85236+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
85237+{
85238+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
85239+ NULL: pmd_offset(pud, address);
85240+}
85241 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
85242
85243 #if USE_SPLIT_PTE_PTLOCKS
85244@@ -1796,7 +1809,7 @@ extern int install_special_mapping(struct mm_struct *mm,
85245 unsigned long addr, unsigned long len,
85246 unsigned long flags, struct page **pages);
85247
85248-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
85249+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
85250
85251 extern unsigned long mmap_region(struct file *file, unsigned long addr,
85252 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
85253@@ -1804,6 +1817,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
85254 unsigned long len, unsigned long prot, unsigned long flags,
85255 unsigned long pgoff, unsigned long *populate);
85256 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
85257+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
85258
85259 #ifdef CONFIG_MMU
85260 extern int __mm_populate(unsigned long addr, unsigned long len,
85261@@ -1832,10 +1846,11 @@ struct vm_unmapped_area_info {
85262 unsigned long high_limit;
85263 unsigned long align_mask;
85264 unsigned long align_offset;
85265+ unsigned long threadstack_offset;
85266 };
85267
85268-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
85269-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
85270+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
85271+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
85272
85273 /*
85274 * Search for an unmapped address range.
85275@@ -1847,7 +1862,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
85276 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
85277 */
85278 static inline unsigned long
85279-vm_unmapped_area(struct vm_unmapped_area_info *info)
85280+vm_unmapped_area(const struct vm_unmapped_area_info *info)
85281 {
85282 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
85283 return unmapped_area(info);
85284@@ -1909,6 +1924,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
85285 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
85286 struct vm_area_struct **pprev);
85287
85288+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
85289+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
85290+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
85291+
85292 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
85293 NULL if none. Assume start_addr < end_addr. */
85294 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
85295@@ -1937,15 +1956,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
85296 return vma;
85297 }
85298
85299-#ifdef CONFIG_MMU
85300-pgprot_t vm_get_page_prot(unsigned long vm_flags);
85301-#else
85302-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
85303-{
85304- return __pgprot(0);
85305-}
85306-#endif
85307-
85308 #ifdef CONFIG_NUMA_BALANCING
85309 unsigned long change_prot_numa(struct vm_area_struct *vma,
85310 unsigned long start, unsigned long end);
85311@@ -1997,6 +2007,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
85312 static inline void vm_stat_account(struct mm_struct *mm,
85313 unsigned long flags, struct file *file, long pages)
85314 {
85315+
85316+#ifdef CONFIG_PAX_RANDMMAP
85317+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
85318+#endif
85319+
85320 mm->total_vm += pages;
85321 }
85322 #endif /* CONFIG_PROC_FS */
85323@@ -2078,7 +2093,7 @@ extern int unpoison_memory(unsigned long pfn);
85324 extern int sysctl_memory_failure_early_kill;
85325 extern int sysctl_memory_failure_recovery;
85326 extern void shake_page(struct page *p, int access);
85327-extern atomic_long_t num_poisoned_pages;
85328+extern atomic_long_unchecked_t num_poisoned_pages;
85329 extern int soft_offline_page(struct page *page, int flags);
85330
85331 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
85332@@ -2113,5 +2128,11 @@ void __init setup_nr_node_ids(void);
85333 static inline void setup_nr_node_ids(void) {}
85334 #endif
85335
85336+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
85337+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
85338+#else
85339+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
85340+#endif
85341+
85342 #endif /* __KERNEL__ */
85343 #endif /* _LINUX_MM_H */
85344diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
85345index 96c5750..15668ba 100644
85346--- a/include/linux/mm_types.h
85347+++ b/include/linux/mm_types.h
85348@@ -308,7 +308,9 @@ struct vm_area_struct {
85349 #ifdef CONFIG_NUMA
85350 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
85351 #endif
85352-};
85353+
85354+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
85355+} __randomize_layout;
85356
85357 struct core_thread {
85358 struct task_struct *task;
85359@@ -454,7 +456,25 @@ struct mm_struct {
85360 bool tlb_flush_pending;
85361 #endif
85362 struct uprobes_state uprobes_state;
85363-};
85364+
85365+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
85366+ unsigned long pax_flags;
85367+#endif
85368+
85369+#ifdef CONFIG_PAX_DLRESOLVE
85370+ unsigned long call_dl_resolve;
85371+#endif
85372+
85373+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
85374+ unsigned long call_syscall;
85375+#endif
85376+
85377+#ifdef CONFIG_PAX_ASLR
85378+ unsigned long delta_mmap; /* randomized offset */
85379+ unsigned long delta_stack; /* randomized offset */
85380+#endif
85381+
85382+} __randomize_layout;
85383
85384 static inline void mm_init_cpumask(struct mm_struct *mm)
85385 {
85386diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
85387index c5d5278..f0b68c8 100644
85388--- a/include/linux/mmiotrace.h
85389+++ b/include/linux/mmiotrace.h
85390@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
85391 /* Called from ioremap.c */
85392 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
85393 void __iomem *addr);
85394-extern void mmiotrace_iounmap(volatile void __iomem *addr);
85395+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
85396
85397 /* For anyone to insert markers. Remember trailing newline. */
85398 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
85399@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
85400 {
85401 }
85402
85403-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
85404+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
85405 {
85406 }
85407
85408diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
85409index 6cbd1b6..b1d2f99 100644
85410--- a/include/linux/mmzone.h
85411+++ b/include/linux/mmzone.h
85412@@ -412,7 +412,7 @@ struct zone {
85413 unsigned long flags; /* zone flags, see below */
85414
85415 /* Zone statistics */
85416- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
85417+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
85418
85419 /*
85420 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
85421diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
85422index 44eeef0..a92d3f9 100644
85423--- a/include/linux/mod_devicetable.h
85424+++ b/include/linux/mod_devicetable.h
85425@@ -139,7 +139,7 @@ struct usb_device_id {
85426 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
85427 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
85428
85429-#define HID_ANY_ID (~0)
85430+#define HID_ANY_ID (~0U)
85431 #define HID_BUS_ANY 0xffff
85432 #define HID_GROUP_ANY 0x0000
85433
85434@@ -475,7 +475,7 @@ struct dmi_system_id {
85435 const char *ident;
85436 struct dmi_strmatch matches[4];
85437 void *driver_data;
85438-};
85439+} __do_const;
85440 /*
85441 * struct dmi_device_id appears during expansion of
85442 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
85443diff --git a/include/linux/module.h b/include/linux/module.h
85444index f520a76..5f898ef 100644
85445--- a/include/linux/module.h
85446+++ b/include/linux/module.h
85447@@ -17,9 +17,11 @@
85448 #include <linux/moduleparam.h>
85449 #include <linux/jump_label.h>
85450 #include <linux/export.h>
85451+#include <linux/fs.h>
85452
85453 #include <linux/percpu.h>
85454 #include <asm/module.h>
85455+#include <asm/pgtable.h>
85456
85457 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
85458 #define MODULE_SIG_STRING "~Module signature appended~\n"
85459@@ -42,7 +44,7 @@ struct module_kobject {
85460 struct kobject *drivers_dir;
85461 struct module_param_attrs *mp;
85462 struct completion *kobj_completion;
85463-};
85464+} __randomize_layout;
85465
85466 struct module_attribute {
85467 struct attribute attr;
85468@@ -54,12 +56,13 @@ struct module_attribute {
85469 int (*test)(struct module *);
85470 void (*free)(struct module *);
85471 };
85472+typedef struct module_attribute __no_const module_attribute_no_const;
85473
85474 struct module_version_attribute {
85475 struct module_attribute mattr;
85476 const char *module_name;
85477 const char *version;
85478-} __attribute__ ((__aligned__(sizeof(void *))));
85479+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
85480
85481 extern ssize_t __modver_version_show(struct module_attribute *,
85482 struct module_kobject *, char *);
85483@@ -235,7 +238,7 @@ struct module {
85484
85485 /* Sysfs stuff. */
85486 struct module_kobject mkobj;
85487- struct module_attribute *modinfo_attrs;
85488+ module_attribute_no_const *modinfo_attrs;
85489 const char *version;
85490 const char *srcversion;
85491 struct kobject *holders_dir;
85492@@ -284,19 +287,16 @@ struct module {
85493 int (*init)(void);
85494
85495 /* If this is non-NULL, vfree after init() returns */
85496- void *module_init;
85497+ void *module_init_rx, *module_init_rw;
85498
85499 /* Here is the actual code + data, vfree'd on unload. */
85500- void *module_core;
85501+ void *module_core_rx, *module_core_rw;
85502
85503 /* Here are the sizes of the init and core sections */
85504- unsigned int init_size, core_size;
85505+ unsigned int init_size_rw, core_size_rw;
85506
85507 /* The size of the executable code in each section. */
85508- unsigned int init_text_size, core_text_size;
85509-
85510- /* Size of RO sections of the module (text+rodata) */
85511- unsigned int init_ro_size, core_ro_size;
85512+ unsigned int init_size_rx, core_size_rx;
85513
85514 /* Arch-specific module values */
85515 struct mod_arch_specific arch;
85516@@ -352,6 +352,10 @@ struct module {
85517 #ifdef CONFIG_EVENT_TRACING
85518 struct ftrace_event_call **trace_events;
85519 unsigned int num_trace_events;
85520+ struct file_operations trace_id;
85521+ struct file_operations trace_enable;
85522+ struct file_operations trace_format;
85523+ struct file_operations trace_filter;
85524 #endif
85525 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
85526 unsigned int num_ftrace_callsites;
85527@@ -375,7 +379,7 @@ struct module {
85528 ctor_fn_t *ctors;
85529 unsigned int num_ctors;
85530 #endif
85531-};
85532+} __randomize_layout;
85533 #ifndef MODULE_ARCH_INIT
85534 #define MODULE_ARCH_INIT {}
85535 #endif
85536@@ -396,16 +400,46 @@ bool is_module_address(unsigned long addr);
85537 bool is_module_percpu_address(unsigned long addr);
85538 bool is_module_text_address(unsigned long addr);
85539
85540+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
85541+{
85542+
85543+#ifdef CONFIG_PAX_KERNEXEC
85544+ if (ktla_ktva(addr) >= (unsigned long)start &&
85545+ ktla_ktva(addr) < (unsigned long)start + size)
85546+ return 1;
85547+#endif
85548+
85549+ return ((void *)addr >= start && (void *)addr < start + size);
85550+}
85551+
85552+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
85553+{
85554+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
85555+}
85556+
85557+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
85558+{
85559+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
85560+}
85561+
85562+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
85563+{
85564+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
85565+}
85566+
85567+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
85568+{
85569+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
85570+}
85571+
85572 static inline int within_module_core(unsigned long addr, const struct module *mod)
85573 {
85574- return (unsigned long)mod->module_core <= addr &&
85575- addr < (unsigned long)mod->module_core + mod->core_size;
85576+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
85577 }
85578
85579 static inline int within_module_init(unsigned long addr, const struct module *mod)
85580 {
85581- return (unsigned long)mod->module_init <= addr &&
85582- addr < (unsigned long)mod->module_init + mod->init_size;
85583+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
85584 }
85585
85586 /* Search for module by name: must hold module_mutex. */
85587diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
85588index 560ca53..ef621ef 100644
85589--- a/include/linux/moduleloader.h
85590+++ b/include/linux/moduleloader.h
85591@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
85592 sections. Returns NULL on failure. */
85593 void *module_alloc(unsigned long size);
85594
85595+#ifdef CONFIG_PAX_KERNEXEC
85596+void *module_alloc_exec(unsigned long size);
85597+#else
85598+#define module_alloc_exec(x) module_alloc(x)
85599+#endif
85600+
85601 /* Free memory returned from module_alloc. */
85602 void module_free(struct module *mod, void *module_region);
85603
85604+#ifdef CONFIG_PAX_KERNEXEC
85605+void module_free_exec(struct module *mod, void *module_region);
85606+#else
85607+#define module_free_exec(x, y) module_free((x), (y))
85608+#endif
85609+
85610 /*
85611 * Apply the given relocation to the (simplified) ELF. Return -error
85612 * or 0.
85613@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
85614 unsigned int relsec,
85615 struct module *me)
85616 {
85617+#ifdef CONFIG_MODULES
85618 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
85619+#endif
85620 return -ENOEXEC;
85621 }
85622 #endif
85623@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
85624 unsigned int relsec,
85625 struct module *me)
85626 {
85627+#ifdef CONFIG_MODULES
85628 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
85629+#endif
85630 return -ENOEXEC;
85631 }
85632 #endif
85633diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
85634index b1990c5..2a6e611 100644
85635--- a/include/linux/moduleparam.h
85636+++ b/include/linux/moduleparam.h
85637@@ -293,7 +293,7 @@ static inline void __kernel_param_unlock(void)
85638 * @len is usually just sizeof(string).
85639 */
85640 #define module_param_string(name, string, len, perm) \
85641- static const struct kparam_string __param_string_##name \
85642+ static const struct kparam_string __param_string_##name __used \
85643 = { len, string }; \
85644 __module_param_call(MODULE_PARAM_PREFIX, name, \
85645 &param_ops_string, \
85646@@ -432,7 +432,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
85647 */
85648 #define module_param_array_named(name, array, type, nump, perm) \
85649 param_check_##type(name, &(array)[0]); \
85650- static const struct kparam_array __param_arr_##name \
85651+ static const struct kparam_array __param_arr_##name __used \
85652 = { .max = ARRAY_SIZE(array), .num = nump, \
85653 .ops = &param_ops_##type, \
85654 .elemsize = sizeof(array[0]), .elem = array }; \
85655diff --git a/include/linux/mount.h b/include/linux/mount.h
85656index 839bac2..a96b37c 100644
85657--- a/include/linux/mount.h
85658+++ b/include/linux/mount.h
85659@@ -59,7 +59,7 @@ struct vfsmount {
85660 struct dentry *mnt_root; /* root of the mounted tree */
85661 struct super_block *mnt_sb; /* pointer to superblock */
85662 int mnt_flags;
85663-};
85664+} __randomize_layout;
85665
85666 struct file; /* forward dec */
85667
85668diff --git a/include/linux/namei.h b/include/linux/namei.h
85669index 492de72..1bddcd4 100644
85670--- a/include/linux/namei.h
85671+++ b/include/linux/namei.h
85672@@ -19,7 +19,7 @@ struct nameidata {
85673 unsigned seq, m_seq;
85674 int last_type;
85675 unsigned depth;
85676- char *saved_names[MAX_NESTED_LINKS + 1];
85677+ const char *saved_names[MAX_NESTED_LINKS + 1];
85678 };
85679
85680 /*
85681@@ -83,12 +83,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
85682
85683 extern void nd_jump_link(struct nameidata *nd, struct path *path);
85684
85685-static inline void nd_set_link(struct nameidata *nd, char *path)
85686+static inline void nd_set_link(struct nameidata *nd, const char *path)
85687 {
85688 nd->saved_names[nd->depth] = path;
85689 }
85690
85691-static inline char *nd_get_link(struct nameidata *nd)
85692+static inline const char *nd_get_link(const struct nameidata *nd)
85693 {
85694 return nd->saved_names[nd->depth];
85695 }
85696diff --git a/include/linux/net.h b/include/linux/net.h
85697index 17d8339..81656c0 100644
85698--- a/include/linux/net.h
85699+++ b/include/linux/net.h
85700@@ -192,7 +192,7 @@ struct net_proto_family {
85701 int (*create)(struct net *net, struct socket *sock,
85702 int protocol, int kern);
85703 struct module *owner;
85704-};
85705+} __do_const;
85706
85707 struct iovec;
85708 struct kvec;
85709diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
85710index 66f9a04..056078d 100644
85711--- a/include/linux/netdevice.h
85712+++ b/include/linux/netdevice.h
85713@@ -1145,6 +1145,7 @@ struct net_device_ops {
85714 void *priv);
85715 int (*ndo_get_lock_subclass)(struct net_device *dev);
85716 };
85717+typedef struct net_device_ops __no_const net_device_ops_no_const;
85718
85719 /**
85720 * enum net_device_priv_flags - &struct net_device priv_flags
85721@@ -1312,11 +1313,11 @@ struct net_device {
85722 struct net_device_stats stats;
85723
85724 /* dropped packets by core network, Do not use this in drivers */
85725- atomic_long_t rx_dropped;
85726- atomic_long_t tx_dropped;
85727+ atomic_long_unchecked_t rx_dropped;
85728+ atomic_long_unchecked_t tx_dropped;
85729
85730 /* Stats to monitor carrier on<->off transitions */
85731- atomic_t carrier_changes;
85732+ atomic_unchecked_t carrier_changes;
85733
85734 #ifdef CONFIG_WIRELESS_EXT
85735 /* List of functions to handle Wireless Extensions (instead of ioctl).
85736diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
85737index 2077489..a15e561 100644
85738--- a/include/linux/netfilter.h
85739+++ b/include/linux/netfilter.h
85740@@ -84,7 +84,7 @@ struct nf_sockopt_ops {
85741 #endif
85742 /* Use the module struct to lock set/get code in place */
85743 struct module *owner;
85744-};
85745+} __do_const;
85746
85747 /* Function to register/unregister hook points. */
85748 int nf_register_hook(struct nf_hook_ops *reg);
85749diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
85750index e955d47..04a5338 100644
85751--- a/include/linux/netfilter/nfnetlink.h
85752+++ b/include/linux/netfilter/nfnetlink.h
85753@@ -19,7 +19,7 @@ struct nfnl_callback {
85754 const struct nlattr * const cda[]);
85755 const struct nla_policy *policy; /* netlink attribute policy */
85756 const u_int16_t attr_count; /* number of nlattr's */
85757-};
85758+} __do_const;
85759
85760 struct nfnetlink_subsystem {
85761 const char *name;
85762diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
85763new file mode 100644
85764index 0000000..33f4af8
85765--- /dev/null
85766+++ b/include/linux/netfilter/xt_gradm.h
85767@@ -0,0 +1,9 @@
85768+#ifndef _LINUX_NETFILTER_XT_GRADM_H
85769+#define _LINUX_NETFILTER_XT_GRADM_H 1
85770+
85771+struct xt_gradm_mtinfo {
85772+ __u16 flags;
85773+ __u16 invflags;
85774+};
85775+
85776+#endif
85777diff --git a/include/linux/nls.h b/include/linux/nls.h
85778index 520681b..2b7fabb 100644
85779--- a/include/linux/nls.h
85780+++ b/include/linux/nls.h
85781@@ -31,7 +31,7 @@ struct nls_table {
85782 const unsigned char *charset2upper;
85783 struct module *owner;
85784 struct nls_table *next;
85785-};
85786+} __do_const;
85787
85788 /* this value hold the maximum octet of charset */
85789 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
85790@@ -46,7 +46,7 @@ enum utf16_endian {
85791 /* nls_base.c */
85792 extern int __register_nls(struct nls_table *, struct module *);
85793 extern int unregister_nls(struct nls_table *);
85794-extern struct nls_table *load_nls(char *);
85795+extern struct nls_table *load_nls(const char *);
85796 extern void unload_nls(struct nls_table *);
85797 extern struct nls_table *load_nls_default(void);
85798 #define register_nls(nls) __register_nls((nls), THIS_MODULE)
85799diff --git a/include/linux/notifier.h b/include/linux/notifier.h
85800index d14a4c3..a078786 100644
85801--- a/include/linux/notifier.h
85802+++ b/include/linux/notifier.h
85803@@ -54,7 +54,8 @@ struct notifier_block {
85804 notifier_fn_t notifier_call;
85805 struct notifier_block __rcu *next;
85806 int priority;
85807-};
85808+} __do_const;
85809+typedef struct notifier_block __no_const notifier_block_no_const;
85810
85811 struct atomic_notifier_head {
85812 spinlock_t lock;
85813diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
85814index b2a0f15..4d7da32 100644
85815--- a/include/linux/oprofile.h
85816+++ b/include/linux/oprofile.h
85817@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
85818 int oprofilefs_create_ro_ulong(struct dentry * root,
85819 char const * name, ulong * val);
85820
85821-/** Create a file for read-only access to an atomic_t. */
85822+/** Create a file for read-only access to an atomic_unchecked_t. */
85823 int oprofilefs_create_ro_atomic(struct dentry * root,
85824- char const * name, atomic_t * val);
85825+ char const * name, atomic_unchecked_t * val);
85826
85827 /** create a directory */
85828 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
85829diff --git a/include/linux/padata.h b/include/linux/padata.h
85830index 4386946..f50c615 100644
85831--- a/include/linux/padata.h
85832+++ b/include/linux/padata.h
85833@@ -129,7 +129,7 @@ struct parallel_data {
85834 struct padata_serial_queue __percpu *squeue;
85835 atomic_t reorder_objects;
85836 atomic_t refcnt;
85837- atomic_t seq_nr;
85838+ atomic_unchecked_t seq_nr;
85839 struct padata_cpumask cpumask;
85840 spinlock_t lock ____cacheline_aligned;
85841 unsigned int processed;
85842diff --git a/include/linux/path.h b/include/linux/path.h
85843index d137218..be0c176 100644
85844--- a/include/linux/path.h
85845+++ b/include/linux/path.h
85846@@ -1,13 +1,15 @@
85847 #ifndef _LINUX_PATH_H
85848 #define _LINUX_PATH_H
85849
85850+#include <linux/compiler.h>
85851+
85852 struct dentry;
85853 struct vfsmount;
85854
85855 struct path {
85856 struct vfsmount *mnt;
85857 struct dentry *dentry;
85858-};
85859+} __randomize_layout;
85860
85861 extern void path_get(const struct path *);
85862 extern void path_put(const struct path *);
85863diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
85864index 5f2e559..7d59314 100644
85865--- a/include/linux/pci_hotplug.h
85866+++ b/include/linux/pci_hotplug.h
85867@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
85868 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
85869 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
85870 int (*reset_slot) (struct hotplug_slot *slot, int probe);
85871-};
85872+} __do_const;
85873+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
85874
85875 /**
85876 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
85877diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
85878index 707617a..28a2e7e 100644
85879--- a/include/linux/perf_event.h
85880+++ b/include/linux/perf_event.h
85881@@ -339,8 +339,8 @@ struct perf_event {
85882
85883 enum perf_event_active_state state;
85884 unsigned int attach_state;
85885- local64_t count;
85886- atomic64_t child_count;
85887+ local64_t count; /* PaX: fix it one day */
85888+ atomic64_unchecked_t child_count;
85889
85890 /*
85891 * These are the total time in nanoseconds that the event
85892@@ -391,8 +391,8 @@ struct perf_event {
85893 * These accumulate total time (in nanoseconds) that children
85894 * events have been enabled and running, respectively.
85895 */
85896- atomic64_t child_total_time_enabled;
85897- atomic64_t child_total_time_running;
85898+ atomic64_unchecked_t child_total_time_enabled;
85899+ atomic64_unchecked_t child_total_time_running;
85900
85901 /*
85902 * Protect attach/detach and child_list:
85903@@ -722,7 +722,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
85904 entry->ip[entry->nr++] = ip;
85905 }
85906
85907-extern int sysctl_perf_event_paranoid;
85908+extern int sysctl_perf_event_legitimately_concerned;
85909 extern int sysctl_perf_event_mlock;
85910 extern int sysctl_perf_event_sample_rate;
85911 extern int sysctl_perf_cpu_time_max_percent;
85912@@ -737,19 +737,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
85913 loff_t *ppos);
85914
85915
85916+static inline bool perf_paranoid_any(void)
85917+{
85918+ return sysctl_perf_event_legitimately_concerned > 2;
85919+}
85920+
85921 static inline bool perf_paranoid_tracepoint_raw(void)
85922 {
85923- return sysctl_perf_event_paranoid > -1;
85924+ return sysctl_perf_event_legitimately_concerned > -1;
85925 }
85926
85927 static inline bool perf_paranoid_cpu(void)
85928 {
85929- return sysctl_perf_event_paranoid > 0;
85930+ return sysctl_perf_event_legitimately_concerned > 0;
85931 }
85932
85933 static inline bool perf_paranoid_kernel(void)
85934 {
85935- return sysctl_perf_event_paranoid > 1;
85936+ return sysctl_perf_event_legitimately_concerned > 1;
85937 }
85938
85939 extern void perf_event_init(void);
85940@@ -880,7 +885,7 @@ struct perf_pmu_events_attr {
85941 struct device_attribute attr;
85942 u64 id;
85943 const char *event_str;
85944-};
85945+} __do_const;
85946
85947 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
85948 static struct perf_pmu_events_attr _var = { \
85949diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
85950index 7246ef3..1539ea4 100644
85951--- a/include/linux/pid_namespace.h
85952+++ b/include/linux/pid_namespace.h
85953@@ -43,7 +43,7 @@ struct pid_namespace {
85954 int hide_pid;
85955 int reboot; /* group exit code if this pidns was rebooted */
85956 unsigned int proc_inum;
85957-};
85958+} __randomize_layout;
85959
85960 extern struct pid_namespace init_pid_ns;
85961
85962diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
85963index eb8b8ac..62649e1 100644
85964--- a/include/linux/pipe_fs_i.h
85965+++ b/include/linux/pipe_fs_i.h
85966@@ -47,10 +47,10 @@ struct pipe_inode_info {
85967 struct mutex mutex;
85968 wait_queue_head_t wait;
85969 unsigned int nrbufs, curbuf, buffers;
85970- unsigned int readers;
85971- unsigned int writers;
85972- unsigned int files;
85973- unsigned int waiting_writers;
85974+ atomic_t readers;
85975+ atomic_t writers;
85976+ atomic_t files;
85977+ atomic_t waiting_writers;
85978 unsigned int r_counter;
85979 unsigned int w_counter;
85980 struct page *tmp_page;
85981diff --git a/include/linux/pm.h b/include/linux/pm.h
85982index 72c0fe0..26918ed 100644
85983--- a/include/linux/pm.h
85984+++ b/include/linux/pm.h
85985@@ -620,6 +620,7 @@ extern int dev_pm_put_subsys_data(struct device *dev);
85986 struct dev_pm_domain {
85987 struct dev_pm_ops ops;
85988 };
85989+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
85990
85991 /*
85992 * The PM_EVENT_ messages are also used by drivers implementing the legacy
85993diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
85994index 7c1d252..0e7061d 100644
85995--- a/include/linux/pm_domain.h
85996+++ b/include/linux/pm_domain.h
85997@@ -44,11 +44,11 @@ struct gpd_dev_ops {
85998 int (*thaw_early)(struct device *dev);
85999 int (*thaw)(struct device *dev);
86000 bool (*active_wakeup)(struct device *dev);
86001-};
86002+} __no_const;
86003
86004 struct gpd_cpu_data {
86005 unsigned int saved_exit_latency;
86006- struct cpuidle_state *idle_state;
86007+ cpuidle_state_no_const *idle_state;
86008 };
86009
86010 struct generic_pm_domain {
86011diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
86012index 43fd671..08c96ee 100644
86013--- a/include/linux/pm_runtime.h
86014+++ b/include/linux/pm_runtime.h
86015@@ -118,7 +118,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
86016
86017 static inline void pm_runtime_mark_last_busy(struct device *dev)
86018 {
86019- ACCESS_ONCE(dev->power.last_busy) = jiffies;
86020+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
86021 }
86022
86023 #else /* !CONFIG_PM_RUNTIME */
86024diff --git a/include/linux/pnp.h b/include/linux/pnp.h
86025index 195aafc..49a7bc2 100644
86026--- a/include/linux/pnp.h
86027+++ b/include/linux/pnp.h
86028@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
86029 struct pnp_fixup {
86030 char id[7];
86031 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
86032-};
86033+} __do_const;
86034
86035 /* config parameters */
86036 #define PNP_CONFIG_NORMAL 0x0001
86037diff --git a/include/linux/poison.h b/include/linux/poison.h
86038index 2110a81..13a11bb 100644
86039--- a/include/linux/poison.h
86040+++ b/include/linux/poison.h
86041@@ -19,8 +19,8 @@
86042 * under normal circumstances, used to verify that nobody uses
86043 * non-initialized list entries.
86044 */
86045-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
86046-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
86047+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
86048+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
86049
86050 /********** include/linux/timer.h **********/
86051 /*
86052diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
86053index d8b187c3..9a9257a 100644
86054--- a/include/linux/power/smartreflex.h
86055+++ b/include/linux/power/smartreflex.h
86056@@ -238,7 +238,7 @@ struct omap_sr_class_data {
86057 int (*notify)(struct omap_sr *sr, u32 status);
86058 u8 notify_flags;
86059 u8 class_type;
86060-};
86061+} __do_const;
86062
86063 /**
86064 * struct omap_sr_nvalue_table - Smartreflex n-target value info
86065diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
86066index 4ea1d37..80f4b33 100644
86067--- a/include/linux/ppp-comp.h
86068+++ b/include/linux/ppp-comp.h
86069@@ -84,7 +84,7 @@ struct compressor {
86070 struct module *owner;
86071 /* Extra skb space needed by the compressor algorithm */
86072 unsigned int comp_extra;
86073-};
86074+} __do_const;
86075
86076 /*
86077 * The return value from decompress routine is the length of the
86078diff --git a/include/linux/preempt.h b/include/linux/preempt.h
86079index de83b4e..c4b997d 100644
86080--- a/include/linux/preempt.h
86081+++ b/include/linux/preempt.h
86082@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
86083 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
86084 #endif
86085
86086+#define raw_preempt_count_add(val) __preempt_count_add(val)
86087+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
86088+
86089 #define __preempt_count_inc() __preempt_count_add(1)
86090 #define __preempt_count_dec() __preempt_count_sub(1)
86091
86092 #define preempt_count_inc() preempt_count_add(1)
86093+#define raw_preempt_count_inc() raw_preempt_count_add(1)
86094 #define preempt_count_dec() preempt_count_sub(1)
86095+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
86096
86097 #ifdef CONFIG_PREEMPT_COUNT
86098
86099@@ -41,6 +46,12 @@ do { \
86100 barrier(); \
86101 } while (0)
86102
86103+#define raw_preempt_disable() \
86104+do { \
86105+ raw_preempt_count_inc(); \
86106+ barrier(); \
86107+} while (0)
86108+
86109 #define sched_preempt_enable_no_resched() \
86110 do { \
86111 barrier(); \
86112@@ -49,6 +60,12 @@ do { \
86113
86114 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
86115
86116+#define raw_preempt_enable_no_resched() \
86117+do { \
86118+ barrier(); \
86119+ raw_preempt_count_dec(); \
86120+} while (0)
86121+
86122 #ifdef CONFIG_PREEMPT
86123 #define preempt_enable() \
86124 do { \
86125@@ -113,8 +130,10 @@ do { \
86126 * region.
86127 */
86128 #define preempt_disable() barrier()
86129+#define raw_preempt_disable() barrier()
86130 #define sched_preempt_enable_no_resched() barrier()
86131 #define preempt_enable_no_resched() barrier()
86132+#define raw_preempt_enable_no_resched() barrier()
86133 #define preempt_enable() barrier()
86134 #define preempt_check_resched() do { } while (0)
86135
86136@@ -128,11 +147,13 @@ do { \
86137 /*
86138 * Modules have no business playing preemption tricks.
86139 */
86140+#ifndef CONFIG_PAX_KERNEXEC
86141 #undef sched_preempt_enable_no_resched
86142 #undef preempt_enable_no_resched
86143 #undef preempt_enable_no_resched_notrace
86144 #undef preempt_check_resched
86145 #endif
86146+#endif
86147
86148 #define preempt_set_need_resched() \
86149 do { \
86150diff --git a/include/linux/printk.h b/include/linux/printk.h
86151index 319ff7e..608849a 100644
86152--- a/include/linux/printk.h
86153+++ b/include/linux/printk.h
86154@@ -121,6 +121,8 @@ static inline __printf(1, 2) __cold
86155 void early_printk(const char *s, ...) { }
86156 #endif
86157
86158+extern int kptr_restrict;
86159+
86160 #ifdef CONFIG_PRINTK
86161 asmlinkage __printf(5, 0)
86162 int vprintk_emit(int facility, int level,
86163@@ -155,7 +157,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
86164
86165 extern int printk_delay_msec;
86166 extern int dmesg_restrict;
86167-extern int kptr_restrict;
86168
86169 extern void wake_up_klogd(void);
86170
86171diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
86172index 9d117f6..d832b31 100644
86173--- a/include/linux/proc_fs.h
86174+++ b/include/linux/proc_fs.h
86175@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
86176 extern struct proc_dir_entry *proc_symlink(const char *,
86177 struct proc_dir_entry *, const char *);
86178 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
86179+extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
86180 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
86181 struct proc_dir_entry *, void *);
86182+extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
86183+ struct proc_dir_entry *, void *);
86184 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
86185 struct proc_dir_entry *);
86186
86187@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
86188 return proc_create_data(name, mode, parent, proc_fops, NULL);
86189 }
86190
86191+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
86192+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
86193+{
86194+#ifdef CONFIG_GRKERNSEC_PROC_USER
86195+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
86196+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
86197+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
86198+#else
86199+ return proc_create_data(name, mode, parent, proc_fops, NULL);
86200+#endif
86201+}
86202+
86203+
86204 extern void proc_set_size(struct proc_dir_entry *, loff_t);
86205 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
86206 extern void *PDE_DATA(const struct inode *);
86207@@ -56,8 +72,12 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
86208 struct proc_dir_entry *parent,const char *dest) { return NULL;}
86209 static inline struct proc_dir_entry *proc_mkdir(const char *name,
86210 struct proc_dir_entry *parent) {return NULL;}
86211+static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name,
86212+ struct proc_dir_entry *parent) { return NULL; }
86213 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
86214 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
86215+static inline struct proc_dir_entry *proc_mkdir_data_restrict(const char *name,
86216+ umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
86217 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
86218 umode_t mode, struct proc_dir_entry *parent) { return NULL; }
86219 #define proc_create(name, mode, parent, proc_fops) ({NULL;})
86220@@ -77,7 +97,7 @@ static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *p
86221 static inline struct proc_dir_entry *proc_net_mkdir(
86222 struct net *net, const char *name, struct proc_dir_entry *parent)
86223 {
86224- return proc_mkdir_data(name, 0, parent, net);
86225+ return proc_mkdir_data_restrict(name, 0, parent, net);
86226 }
86227
86228 #endif /* _LINUX_PROC_FS_H */
86229diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
86230index 34a1e10..70f6bde 100644
86231--- a/include/linux/proc_ns.h
86232+++ b/include/linux/proc_ns.h
86233@@ -14,7 +14,7 @@ struct proc_ns_operations {
86234 void (*put)(void *ns);
86235 int (*install)(struct nsproxy *nsproxy, void *ns);
86236 unsigned int (*inum)(void *ns);
86237-};
86238+} __do_const __randomize_layout;
86239
86240 struct proc_ns {
86241 void *ns;
86242diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h
86243index 7dfed71..1dc420b 100644
86244--- a/include/linux/ptp_classify.h
86245+++ b/include/linux/ptp_classify.h
86246@@ -23,8 +23,15 @@
86247 #ifndef _PTP_CLASSIFY_H_
86248 #define _PTP_CLASSIFY_H_
86249
86250+#include <linux/if_ether.h>
86251+#include <linux/if_vlan.h>
86252 #include <linux/ip.h>
86253-#include <linux/skbuff.h>
86254+#include <linux/filter.h>
86255+#ifdef __KERNEL__
86256+#include <linux/in.h>
86257+#else
86258+#include <netinet/in.h>
86259+#endif
86260
86261 #define PTP_CLASS_NONE 0x00 /* not a PTP event message */
86262 #define PTP_CLASS_V1 0x01 /* protocol version 1 */
86263@@ -37,7 +44,7 @@
86264 #define PTP_CLASS_PMASK 0xf0 /* mask for the packet type field */
86265
86266 #define PTP_CLASS_V1_IPV4 (PTP_CLASS_V1 | PTP_CLASS_IPV4)
86267-#define PTP_CLASS_V1_IPV6 (PTP_CLASS_V1 | PTP_CLASS_IPV6) /* probably DNE */
86268+#define PTP_CLASS_V1_IPV6 (PTP_CLASS_V1 | PTP_CLASS_IPV6) /*probably DNE*/
86269 #define PTP_CLASS_V2_IPV4 (PTP_CLASS_V2 | PTP_CLASS_IPV4)
86270 #define PTP_CLASS_V2_IPV6 (PTP_CLASS_V2 | PTP_CLASS_IPV6)
86271 #define PTP_CLASS_V2_L2 (PTP_CLASS_V2 | PTP_CLASS_L2)
86272@@ -46,34 +53,88 @@
86273 #define PTP_EV_PORT 319
86274 #define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */
86275
86276+#define OFF_ETYPE 12
86277+#define OFF_IHL 14
86278+#define OFF_FRAG 20
86279+#define OFF_PROTO4 23
86280+#define OFF_NEXT 6
86281+#define OFF_UDP_DST 2
86282+
86283 #define OFF_PTP_SOURCE_UUID 22 /* PTPv1 only */
86284 #define OFF_PTP_SEQUENCE_ID 30
86285 #define OFF_PTP_CONTROL 32 /* PTPv1 only */
86286
86287-/* Below defines should actually be removed at some point in time. */
86288+#define IPV4_HLEN(data) (((struct iphdr *)(data + OFF_IHL))->ihl << 2)
86289+
86290 #define IP6_HLEN 40
86291 #define UDP_HLEN 8
86292-#define OFF_IHL 14
86293+
86294+#define RELOFF_DST4 (ETH_HLEN + OFF_UDP_DST)
86295+#define OFF_DST6 (ETH_HLEN + IP6_HLEN + OFF_UDP_DST)
86296 #define OFF_PTP6 (ETH_HLEN + IP6_HLEN + UDP_HLEN)
86297-#define IPV4_HLEN(data) (((struct iphdr *)(data + OFF_IHL))->ihl << 2)
86298
86299-#if defined(CONFIG_NET_PTP_CLASSIFY)
86300-/**
86301- * ptp_classify_raw - classify a PTP packet
86302- * @skb: buffer
86303- *
86304- * Runs a minimal BPF dissector to classify a network packet to
86305- * determine the PTP class. In case the skb does not contain any
86306- * PTP protocol data, PTP_CLASS_NONE will be returned, otherwise
86307- * PTP_CLASS_V1_IPV{4,6}, PTP_CLASS_V2_IPV{4,6} or
86308- * PTP_CLASS_V2_{L2,VLAN}, depending on the packet content.
86309- */
86310-unsigned int ptp_classify_raw(const struct sk_buff *skb);
86311+#define OP_AND (BPF_ALU | BPF_AND | BPF_K)
86312+#define OP_JEQ (BPF_JMP | BPF_JEQ | BPF_K)
86313+#define OP_JSET (BPF_JMP | BPF_JSET | BPF_K)
86314+#define OP_LDB (BPF_LD | BPF_B | BPF_ABS)
86315+#define OP_LDH (BPF_LD | BPF_H | BPF_ABS)
86316+#define OP_LDHI (BPF_LD | BPF_H | BPF_IND)
86317+#define OP_LDX (BPF_LDX | BPF_B | BPF_MSH)
86318+#define OP_OR (BPF_ALU | BPF_OR | BPF_K)
86319+#define OP_RETA (BPF_RET | BPF_A)
86320+#define OP_RETK (BPF_RET | BPF_K)
86321
86322-void __init ptp_classifier_init(void);
86323-#else
86324-static inline void ptp_classifier_init(void)
86325+static inline int ptp_filter_init(struct sock_filter *f, int len)
86326 {
86327+ if (OP_LDH == f[0].code)
86328+ return sk_chk_filter(f, len);
86329+ else
86330+ return 0;
86331 }
86332+
86333+#define PTP_FILTER \
86334+ {OP_LDH, 0, 0, OFF_ETYPE }, /* */ \
86335+ {OP_JEQ, 0, 12, ETH_P_IP }, /* f goto L20 */ \
86336+ {OP_LDB, 0, 0, OFF_PROTO4 }, /* */ \
86337+ {OP_JEQ, 0, 9, IPPROTO_UDP }, /* f goto L10 */ \
86338+ {OP_LDH, 0, 0, OFF_FRAG }, /* */ \
86339+ {OP_JSET, 7, 0, 0x1fff }, /* t goto L11 */ \
86340+ {OP_LDX, 0, 0, OFF_IHL }, /* */ \
86341+ {OP_LDHI, 0, 0, RELOFF_DST4 }, /* */ \
86342+ {OP_JEQ, 0, 4, PTP_EV_PORT }, /* f goto L12 */ \
86343+ {OP_LDHI, 0, 0, ETH_HLEN + UDP_HLEN }, /* */ \
86344+ {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
86345+ {OP_OR, 0, 0, PTP_CLASS_IPV4 }, /* */ \
86346+ {OP_RETA, 0, 0, 0 }, /* */ \
86347+/*L1x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \
86348+/*L20*/ {OP_JEQ, 0, 9, ETH_P_IPV6 }, /* f goto L40 */ \
86349+ {OP_LDB, 0, 0, ETH_HLEN + OFF_NEXT }, /* */ \
86350+ {OP_JEQ, 0, 6, IPPROTO_UDP }, /* f goto L30 */ \
86351+ {OP_LDH, 0, 0, OFF_DST6 }, /* */ \
86352+ {OP_JEQ, 0, 4, PTP_EV_PORT }, /* f goto L31 */ \
86353+ {OP_LDH, 0, 0, OFF_PTP6 }, /* */ \
86354+ {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
86355+ {OP_OR, 0, 0, PTP_CLASS_IPV6 }, /* */ \
86356+ {OP_RETA, 0, 0, 0 }, /* */ \
86357+/*L3x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \
86358+/*L40*/ {OP_JEQ, 0, 9, ETH_P_8021Q }, /* f goto L50 */ \
86359+ {OP_LDH, 0, 0, OFF_ETYPE + 4 }, /* */ \
86360+ {OP_JEQ, 0, 15, ETH_P_1588 }, /* f goto L60 */ \
86361+ {OP_LDB, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \
86362+ {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \
86363+ {OP_JEQ, 0, 12, 0 }, /* f goto L6x */ \
86364+ {OP_LDH, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \
86365+ {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
86366+ {OP_OR, 0, 0, PTP_CLASS_VLAN }, /* */ \
86367+ {OP_RETA, 0, 0, 0 }, /* */ \
86368+/*L50*/ {OP_JEQ, 0, 7, ETH_P_1588 }, /* f goto L61 */ \
86369+ {OP_LDB, 0, 0, ETH_HLEN }, /* */ \
86370+ {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \
86371+ {OP_JEQ, 0, 4, 0 }, /* f goto L6x */ \
86372+ {OP_LDH, 0, 0, ETH_HLEN }, /* */ \
86373+ {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
86374+ {OP_OR, 0, 0, PTP_CLASS_L2 }, /* */ \
86375+ {OP_RETA, 0, 0, 0 }, /* */ \
86376+/*L6x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE },
86377+
86378 #endif
86379-#endif /* _PTP_CLASSIFY_H_ */
86380diff --git a/include/linux/quota.h b/include/linux/quota.h
86381index 0f3c5d3..bc559e3 100644
86382--- a/include/linux/quota.h
86383+++ b/include/linux/quota.h
86384@@ -70,7 +70,7 @@ struct kqid { /* Type in which we store the quota identifier */
86385
86386 extern bool qid_eq(struct kqid left, struct kqid right);
86387 extern bool qid_lt(struct kqid left, struct kqid right);
86388-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
86389+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
86390 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
86391 extern bool qid_valid(struct kqid qid);
86392
86393diff --git a/include/linux/random.h b/include/linux/random.h
86394index 57fbbff..2170304 100644
86395--- a/include/linux/random.h
86396+++ b/include/linux/random.h
86397@@ -9,9 +9,19 @@
86398 #include <uapi/linux/random.h>
86399
86400 extern void add_device_randomness(const void *, unsigned int);
86401+
86402+static inline void add_latent_entropy(void)
86403+{
86404+
86405+#ifdef LATENT_ENTROPY_PLUGIN
86406+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
86407+#endif
86408+
86409+}
86410+
86411 extern void add_input_randomness(unsigned int type, unsigned int code,
86412- unsigned int value);
86413-extern void add_interrupt_randomness(int irq, int irq_flags);
86414+ unsigned int value) __latent_entropy;
86415+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
86416
86417 extern void get_random_bytes(void *buf, int nbytes);
86418 extern void get_random_bytes_arch(void *buf, int nbytes);
86419@@ -22,10 +32,10 @@ extern int random_int_secret_init(void);
86420 extern const struct file_operations random_fops, urandom_fops;
86421 #endif
86422
86423-unsigned int get_random_int(void);
86424+unsigned int __intentional_overflow(-1) get_random_int(void);
86425 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
86426
86427-u32 prandom_u32(void);
86428+u32 prandom_u32(void) __intentional_overflow(-1);
86429 void prandom_bytes(void *buf, int nbytes);
86430 void prandom_seed(u32 seed);
86431 void prandom_reseed_late(void);
86432@@ -37,6 +47,11 @@ struct rnd_state {
86433 u32 prandom_u32_state(struct rnd_state *state);
86434 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
86435
86436+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
86437+{
86438+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
86439+}
86440+
86441 /**
86442 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
86443 * @ep_ro: right open interval endpoint
86444@@ -49,7 +64,7 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
86445 *
86446 * Returns: pseudo-random number in interval [0, ep_ro)
86447 */
86448-static inline u32 prandom_u32_max(u32 ep_ro)
86449+static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro)
86450 {
86451 return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
86452 }
86453diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
86454index fea49b5..2ac22bb 100644
86455--- a/include/linux/rbtree_augmented.h
86456+++ b/include/linux/rbtree_augmented.h
86457@@ -80,7 +80,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
86458 old->rbaugmented = rbcompute(old); \
86459 } \
86460 rbstatic const struct rb_augment_callbacks rbname = { \
86461- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
86462+ .propagate = rbname ## _propagate, \
86463+ .copy = rbname ## _copy, \
86464+ .rotate = rbname ## _rotate \
86465 };
86466
86467
86468diff --git a/include/linux/rculist.h b/include/linux/rculist.h
86469index 8183b46..a388711 100644
86470--- a/include/linux/rculist.h
86471+++ b/include/linux/rculist.h
86472@@ -29,8 +29,8 @@
86473 */
86474 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
86475 {
86476- ACCESS_ONCE(list->next) = list;
86477- ACCESS_ONCE(list->prev) = list;
86478+ ACCESS_ONCE_RW(list->next) = list;
86479+ ACCESS_ONCE_RW(list->prev) = list;
86480 }
86481
86482 /*
86483@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
86484 struct list_head *prev, struct list_head *next);
86485 #endif
86486
86487+void __pax_list_add_rcu(struct list_head *new,
86488+ struct list_head *prev, struct list_head *next);
86489+
86490 /**
86491 * list_add_rcu - add a new entry to rcu-protected list
86492 * @new: new entry to be added
86493@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
86494 __list_add_rcu(new, head, head->next);
86495 }
86496
86497+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
86498+{
86499+ __pax_list_add_rcu(new, head, head->next);
86500+}
86501+
86502 /**
86503 * list_add_tail_rcu - add a new entry to rcu-protected list
86504 * @new: new entry to be added
86505@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
86506 __list_add_rcu(new, head->prev, head);
86507 }
86508
86509+static inline void pax_list_add_tail_rcu(struct list_head *new,
86510+ struct list_head *head)
86511+{
86512+ __pax_list_add_rcu(new, head->prev, head);
86513+}
86514+
86515 /**
86516 * list_del_rcu - deletes entry from list without re-initialization
86517 * @entry: the element to delete from the list.
86518@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
86519 entry->prev = LIST_POISON2;
86520 }
86521
86522+extern void pax_list_del_rcu(struct list_head *entry);
86523+
86524 /**
86525 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
86526 * @n: the element to delete from the hash list.
86527diff --git a/include/linux/reboot.h b/include/linux/reboot.h
86528index 48bf152..d38b785 100644
86529--- a/include/linux/reboot.h
86530+++ b/include/linux/reboot.h
86531@@ -44,9 +44,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
86532 */
86533
86534 extern void migrate_to_reboot_cpu(void);
86535-extern void machine_restart(char *cmd);
86536-extern void machine_halt(void);
86537-extern void machine_power_off(void);
86538+extern void machine_restart(char *cmd) __noreturn;
86539+extern void machine_halt(void) __noreturn;
86540+extern void machine_power_off(void) __noreturn;
86541
86542 extern void machine_shutdown(void);
86543 struct pt_regs;
86544@@ -57,9 +57,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
86545 */
86546
86547 extern void kernel_restart_prepare(char *cmd);
86548-extern void kernel_restart(char *cmd);
86549-extern void kernel_halt(void);
86550-extern void kernel_power_off(void);
86551+extern void kernel_restart(char *cmd) __noreturn;
86552+extern void kernel_halt(void) __noreturn;
86553+extern void kernel_power_off(void) __noreturn;
86554
86555 extern int C_A_D; /* for sysctl */
86556 void ctrl_alt_del(void);
86557@@ -73,7 +73,7 @@ extern int orderly_poweroff(bool force);
86558 * Emergency restart, callable from an interrupt handler.
86559 */
86560
86561-extern void emergency_restart(void);
86562+extern void emergency_restart(void) __noreturn;
86563 #include <asm/emergency-restart.h>
86564
86565 #endif /* _LINUX_REBOOT_H */
86566diff --git a/include/linux/regset.h b/include/linux/regset.h
86567index 8e0c9fe..ac4d221 100644
86568--- a/include/linux/regset.h
86569+++ b/include/linux/regset.h
86570@@ -161,7 +161,8 @@ struct user_regset {
86571 unsigned int align;
86572 unsigned int bias;
86573 unsigned int core_note_type;
86574-};
86575+} __do_const;
86576+typedef struct user_regset __no_const user_regset_no_const;
86577
86578 /**
86579 * struct user_regset_view - available regsets
86580diff --git a/include/linux/relay.h b/include/linux/relay.h
86581index d7c8359..818daf5 100644
86582--- a/include/linux/relay.h
86583+++ b/include/linux/relay.h
86584@@ -157,7 +157,7 @@ struct rchan_callbacks
86585 * The callback should return 0 if successful, negative if not.
86586 */
86587 int (*remove_buf_file)(struct dentry *dentry);
86588-};
86589+} __no_const;
86590
86591 /*
86592 * CONFIG_RELAY kernel API, kernel/relay.c
86593diff --git a/include/linux/rio.h b/include/linux/rio.h
86594index 6bda06f..bf39a9b 100644
86595--- a/include/linux/rio.h
86596+++ b/include/linux/rio.h
86597@@ -358,7 +358,7 @@ struct rio_ops {
86598 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
86599 u64 rstart, u32 size, u32 flags);
86600 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
86601-};
86602+} __no_const;
86603
86604 #define RIO_RESOURCE_MEM 0x00000100
86605 #define RIO_RESOURCE_DOORBELL 0x00000200
86606diff --git a/include/linux/rmap.h b/include/linux/rmap.h
86607index be57450..31cf65e 100644
86608--- a/include/linux/rmap.h
86609+++ b/include/linux/rmap.h
86610@@ -144,8 +144,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
86611 void anon_vma_init(void); /* create anon_vma_cachep */
86612 int anon_vma_prepare(struct vm_area_struct *);
86613 void unlink_anon_vmas(struct vm_area_struct *);
86614-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
86615-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
86616+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
86617+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
86618
86619 static inline void anon_vma_merge(struct vm_area_struct *vma,
86620 struct vm_area_struct *next)
86621diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
86622index a964f72..b475afb 100644
86623--- a/include/linux/scatterlist.h
86624+++ b/include/linux/scatterlist.h
86625@@ -1,6 +1,7 @@
86626 #ifndef _LINUX_SCATTERLIST_H
86627 #define _LINUX_SCATTERLIST_H
86628
86629+#include <linux/sched.h>
86630 #include <linux/string.h>
86631 #include <linux/bug.h>
86632 #include <linux/mm.h>
86633@@ -114,6 +115,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
86634 #ifdef CONFIG_DEBUG_SG
86635 BUG_ON(!virt_addr_valid(buf));
86636 #endif
86637+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86638+ if (object_starts_on_stack(buf)) {
86639+ void *adjbuf = buf - current->stack + current->lowmem_stack;
86640+ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf));
86641+ } else
86642+#endif
86643 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
86644 }
86645
86646diff --git a/include/linux/sched.h b/include/linux/sched.h
86647index 0376b05..82054c2 100644
86648--- a/include/linux/sched.h
86649+++ b/include/linux/sched.h
86650@@ -131,6 +131,7 @@ struct fs_struct;
86651 struct perf_event_context;
86652 struct blk_plug;
86653 struct filename;
86654+struct linux_binprm;
86655
86656 #define VMACACHE_BITS 2
86657 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
86658@@ -374,7 +375,7 @@ extern char __sched_text_start[], __sched_text_end[];
86659 extern int in_sched_functions(unsigned long addr);
86660
86661 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
86662-extern signed long schedule_timeout(signed long timeout);
86663+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
86664 extern signed long schedule_timeout_interruptible(signed long timeout);
86665 extern signed long schedule_timeout_killable(signed long timeout);
86666 extern signed long schedule_timeout_uninterruptible(signed long timeout);
86667@@ -385,6 +386,19 @@ struct nsproxy;
86668 struct user_namespace;
86669
86670 #ifdef CONFIG_MMU
86671+
86672+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
86673+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
86674+#else
86675+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
86676+{
86677+ return 0;
86678+}
86679+#endif
86680+
86681+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
86682+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
86683+
86684 extern void arch_pick_mmap_layout(struct mm_struct *mm);
86685 extern unsigned long
86686 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
86687@@ -682,6 +696,17 @@ struct signal_struct {
86688 #ifdef CONFIG_TASKSTATS
86689 struct taskstats *stats;
86690 #endif
86691+
86692+#ifdef CONFIG_GRKERNSEC
86693+ u32 curr_ip;
86694+ u32 saved_ip;
86695+ u32 gr_saddr;
86696+ u32 gr_daddr;
86697+ u16 gr_sport;
86698+ u16 gr_dport;
86699+ u8 used_accept:1;
86700+#endif
86701+
86702 #ifdef CONFIG_AUDIT
86703 unsigned audit_tty;
86704 unsigned audit_tty_log_passwd;
86705@@ -708,7 +733,7 @@ struct signal_struct {
86706 struct mutex cred_guard_mutex; /* guard against foreign influences on
86707 * credential calculations
86708 * (notably. ptrace) */
86709-};
86710+} __randomize_layout;
86711
86712 /*
86713 * Bits in flags field of signal_struct.
86714@@ -761,6 +786,14 @@ struct user_struct {
86715 struct key *session_keyring; /* UID's default session keyring */
86716 #endif
86717
86718+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
86719+ unsigned char kernel_banned;
86720+#endif
86721+#ifdef CONFIG_GRKERNSEC_BRUTE
86722+ unsigned char suid_banned;
86723+ unsigned long suid_ban_expires;
86724+#endif
86725+
86726 /* Hash table maintenance information */
86727 struct hlist_node uidhash_node;
86728 kuid_t uid;
86729@@ -768,7 +801,7 @@ struct user_struct {
86730 #ifdef CONFIG_PERF_EVENTS
86731 atomic_long_t locked_vm;
86732 #endif
86733-};
86734+} __randomize_layout;
86735
86736 extern int uids_sysfs_init(void);
86737
86738@@ -1224,6 +1257,9 @@ enum perf_event_task_context {
86739 struct task_struct {
86740 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
86741 void *stack;
86742+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86743+ void *lowmem_stack;
86744+#endif
86745 atomic_t usage;
86746 unsigned int flags; /* per process flags, defined below */
86747 unsigned int ptrace;
86748@@ -1349,8 +1385,8 @@ struct task_struct {
86749 struct list_head thread_node;
86750
86751 struct completion *vfork_done; /* for vfork() */
86752- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
86753- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
86754+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
86755+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
86756
86757 cputime_t utime, stime, utimescaled, stimescaled;
86758 cputime_t gtime;
86759@@ -1375,11 +1411,6 @@ struct task_struct {
86760 struct task_cputime cputime_expires;
86761 struct list_head cpu_timers[3];
86762
86763-/* process credentials */
86764- const struct cred __rcu *real_cred; /* objective and real subjective task
86765- * credentials (COW) */
86766- const struct cred __rcu *cred; /* effective (overridable) subjective task
86767- * credentials (COW) */
86768 char comm[TASK_COMM_LEN]; /* executable name excluding path
86769 - access with [gs]et_task_comm (which lock
86770 it with task_lock())
86771@@ -1396,6 +1427,10 @@ struct task_struct {
86772 #endif
86773 /* CPU-specific state of this task */
86774 struct thread_struct thread;
86775+/* thread_info moved to task_struct */
86776+#ifdef CONFIG_X86
86777+ struct thread_info tinfo;
86778+#endif
86779 /* filesystem information */
86780 struct fs_struct *fs;
86781 /* open file information */
86782@@ -1472,6 +1507,10 @@ struct task_struct {
86783 gfp_t lockdep_reclaim_gfp;
86784 #endif
86785
86786+/* process credentials */
86787+ const struct cred __rcu *real_cred; /* objective and real subjective task
86788+ * credentials (COW) */
86789+
86790 /* journalling filesystem info */
86791 void *journal_info;
86792
86793@@ -1510,6 +1549,10 @@ struct task_struct {
86794 /* cg_list protected by css_set_lock and tsk->alloc_lock */
86795 struct list_head cg_list;
86796 #endif
86797+
86798+ const struct cred __rcu *cred; /* effective (overridable) subjective task
86799+ * credentials (COW) */
86800+
86801 #ifdef CONFIG_FUTEX
86802 struct robust_list_head __user *robust_list;
86803 #ifdef CONFIG_COMPAT
86804@@ -1655,7 +1698,78 @@ struct task_struct {
86805 unsigned int sequential_io;
86806 unsigned int sequential_io_avg;
86807 #endif
86808-};
86809+
86810+#ifdef CONFIG_GRKERNSEC
86811+ /* grsecurity */
86812+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
86813+ u64 exec_id;
86814+#endif
86815+#ifdef CONFIG_GRKERNSEC_SETXID
86816+ const struct cred *delayed_cred;
86817+#endif
86818+ struct dentry *gr_chroot_dentry;
86819+ struct acl_subject_label *acl;
86820+ struct acl_subject_label *tmpacl;
86821+ struct acl_role_label *role;
86822+ struct file *exec_file;
86823+ unsigned long brute_expires;
86824+ u16 acl_role_id;
86825+ u8 inherited;
86826+ /* is this the task that authenticated to the special role */
86827+ u8 acl_sp_role;
86828+ u8 is_writable;
86829+ u8 brute;
86830+ u8 gr_is_chrooted;
86831+#endif
86832+
86833+} __randomize_layout;
86834+
86835+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
86836+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
86837+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
86838+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
86839+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
86840+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
86841+
86842+#ifdef CONFIG_PAX_SOFTMODE
86843+extern int pax_softmode;
86844+#endif
86845+
86846+extern int pax_check_flags(unsigned long *);
86847+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
86848+
86849+/* if tsk != current then task_lock must be held on it */
86850+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
86851+static inline unsigned long pax_get_flags(struct task_struct *tsk)
86852+{
86853+ if (likely(tsk->mm))
86854+ return tsk->mm->pax_flags;
86855+ else
86856+ return 0UL;
86857+}
86858+
86859+/* if tsk != current then task_lock must be held on it */
86860+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
86861+{
86862+ if (likely(tsk->mm)) {
86863+ tsk->mm->pax_flags = flags;
86864+ return 0;
86865+ }
86866+ return -EINVAL;
86867+}
86868+#endif
86869+
86870+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
86871+extern void pax_set_initial_flags(struct linux_binprm *bprm);
86872+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
86873+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
86874+#endif
86875+
86876+struct path;
86877+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
86878+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
86879+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
86880+extern void pax_report_refcount_overflow(struct pt_regs *regs);
86881
86882 /* Future-safe accessor for struct task_struct's cpus_allowed. */
86883 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
86884@@ -1737,7 +1851,7 @@ struct pid_namespace;
86885 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
86886 struct pid_namespace *ns);
86887
86888-static inline pid_t task_pid_nr(struct task_struct *tsk)
86889+static inline pid_t task_pid_nr(const struct task_struct *tsk)
86890 {
86891 return tsk->pid;
86892 }
86893@@ -2084,6 +2198,25 @@ extern u64 sched_clock_cpu(int cpu);
86894
86895 extern void sched_clock_init(void);
86896
86897+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86898+static inline void populate_stack(void)
86899+{
86900+ struct task_struct *curtask = current;
86901+ int c;
86902+ int *ptr = curtask->stack;
86903+ int *end = curtask->stack + THREAD_SIZE;
86904+
86905+ while (ptr < end) {
86906+ c = *(volatile int *)ptr;
86907+ ptr += PAGE_SIZE/sizeof(int);
86908+ }
86909+}
86910+#else
86911+static inline void populate_stack(void)
86912+{
86913+}
86914+#endif
86915+
86916 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
86917 static inline void sched_clock_tick(void)
86918 {
86919@@ -2217,7 +2350,9 @@ void yield(void);
86920 extern struct exec_domain default_exec_domain;
86921
86922 union thread_union {
86923+#ifndef CONFIG_X86
86924 struct thread_info thread_info;
86925+#endif
86926 unsigned long stack[THREAD_SIZE/sizeof(long)];
86927 };
86928
86929@@ -2250,6 +2385,7 @@ extern struct pid_namespace init_pid_ns;
86930 */
86931
86932 extern struct task_struct *find_task_by_vpid(pid_t nr);
86933+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
86934 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
86935 struct pid_namespace *ns);
86936
86937@@ -2412,7 +2548,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
86938 extern void exit_itimers(struct signal_struct *);
86939 extern void flush_itimer_signals(void);
86940
86941-extern void do_group_exit(int);
86942+extern __noreturn void do_group_exit(int);
86943
86944 extern int do_execve(struct filename *,
86945 const char __user * const __user *,
86946@@ -2614,9 +2750,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
86947
86948 #endif
86949
86950-static inline int object_is_on_stack(void *obj)
86951+static inline int object_starts_on_stack(const void *obj)
86952 {
86953- void *stack = task_stack_page(current);
86954+ const void *stack = task_stack_page(current);
86955
86956 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
86957 }
86958diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
86959index 596a0e0..bea77ec 100644
86960--- a/include/linux/sched/sysctl.h
86961+++ b/include/linux/sched/sysctl.h
86962@@ -34,6 +34,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
86963 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
86964
86965 extern int sysctl_max_map_count;
86966+extern unsigned long sysctl_heap_stack_gap;
86967
86968 extern unsigned int sysctl_sched_latency;
86969 extern unsigned int sysctl_sched_min_granularity;
86970diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
86971index 4054b09..6f19cfd 100644
86972--- a/include/linux/seccomp.h
86973+++ b/include/linux/seccomp.h
86974@@ -76,6 +76,7 @@ static inline int seccomp_mode(struct seccomp *s)
86975 #ifdef CONFIG_SECCOMP_FILTER
86976 extern void put_seccomp_filter(struct task_struct *tsk);
86977 extern void get_seccomp_filter(struct task_struct *tsk);
86978+extern u32 seccomp_bpf_load(int off);
86979 #else /* CONFIG_SECCOMP_FILTER */
86980 static inline void put_seccomp_filter(struct task_struct *tsk)
86981 {
86982diff --git a/include/linux/security.h b/include/linux/security.h
86983index 9c6b972..7e7c704 100644
86984--- a/include/linux/security.h
86985+++ b/include/linux/security.h
86986@@ -27,6 +27,7 @@
86987 #include <linux/slab.h>
86988 #include <linux/err.h>
86989 #include <linux/string.h>
86990+#include <linux/grsecurity.h>
86991
86992 struct linux_binprm;
86993 struct cred;
86994@@ -116,8 +117,6 @@ struct seq_file;
86995
86996 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
86997
86998-void reset_security_ops(void);
86999-
87000 #ifdef CONFIG_MMU
87001 extern unsigned long mmap_min_addr;
87002 extern unsigned long dac_mmap_min_addr;
87003@@ -1719,7 +1718,7 @@ struct security_operations {
87004 struct audit_context *actx);
87005 void (*audit_rule_free) (void *lsmrule);
87006 #endif /* CONFIG_AUDIT */
87007-};
87008+} __randomize_layout;
87009
87010 /* prototypes */
87011 extern int security_init(void);
87012diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
87013index dc368b8..e895209 100644
87014--- a/include/linux/semaphore.h
87015+++ b/include/linux/semaphore.h
87016@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
87017 }
87018
87019 extern void down(struct semaphore *sem);
87020-extern int __must_check down_interruptible(struct semaphore *sem);
87021+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
87022 extern int __must_check down_killable(struct semaphore *sem);
87023 extern int __must_check down_trylock(struct semaphore *sem);
87024 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
87025diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
87026index 52e0097..383f21d 100644
87027--- a/include/linux/seq_file.h
87028+++ b/include/linux/seq_file.h
87029@@ -27,6 +27,9 @@ struct seq_file {
87030 struct mutex lock;
87031 const struct seq_operations *op;
87032 int poll_event;
87033+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
87034+ u64 exec_id;
87035+#endif
87036 #ifdef CONFIG_USER_NS
87037 struct user_namespace *user_ns;
87038 #endif
87039@@ -39,6 +42,7 @@ struct seq_operations {
87040 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
87041 int (*show) (struct seq_file *m, void *v);
87042 };
87043+typedef struct seq_operations __no_const seq_operations_no_const;
87044
87045 #define SEQ_SKIP 1
87046
87047@@ -96,6 +100,7 @@ void seq_pad(struct seq_file *m, char c);
87048
87049 char *mangle_path(char *s, const char *p, const char *esc);
87050 int seq_open(struct file *, const struct seq_operations *);
87051+int seq_open_restrict(struct file *, const struct seq_operations *);
87052 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
87053 loff_t seq_lseek(struct file *, loff_t, int);
87054 int seq_release(struct inode *, struct file *);
87055@@ -138,6 +143,7 @@ static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
87056 }
87057
87058 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
87059+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
87060 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
87061 int single_release(struct inode *, struct file *);
87062 void *__seq_open_private(struct file *, const struct seq_operations *, int);
87063diff --git a/include/linux/shm.h b/include/linux/shm.h
87064index 57d7770..0936af6 100644
87065--- a/include/linux/shm.h
87066+++ b/include/linux/shm.h
87067@@ -20,6 +20,10 @@ struct shmid_kernel /* private to the kernel */
87068
87069 /* The task created the shm object. NULL if the task is dead. */
87070 struct task_struct *shm_creator;
87071+#ifdef CONFIG_GRKERNSEC
87072+ time_t shm_createtime;
87073+ pid_t shm_lapid;
87074+#endif
87075 };
87076
87077 /* shm_mode upper byte flags */
87078diff --git a/include/linux/signal.h b/include/linux/signal.h
87079index c9e6536..923b302 100644
87080--- a/include/linux/signal.h
87081+++ b/include/linux/signal.h
87082@@ -293,7 +293,7 @@ static inline void allow_signal(int sig)
87083 * know it'll be handled, so that they don't get converted to
87084 * SIGKILL or just silently dropped.
87085 */
87086- kernel_sigaction(sig, (__force __sighandler_t)2);
87087+ kernel_sigaction(sig, (__force_user __sighandler_t)2);
87088 }
87089
87090 static inline void disallow_signal(int sig)
87091diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
87092index ec89301..4fd29a6 100644
87093--- a/include/linux/skbuff.h
87094+++ b/include/linux/skbuff.h
87095@@ -725,7 +725,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
87096 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
87097 int node);
87098 struct sk_buff *build_skb(void *data, unsigned int frag_size);
87099-static inline struct sk_buff *alloc_skb(unsigned int size,
87100+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
87101 gfp_t priority)
87102 {
87103 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
87104@@ -1839,7 +1839,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
87105 return skb->inner_transport_header - skb->inner_network_header;
87106 }
87107
87108-static inline int skb_network_offset(const struct sk_buff *skb)
87109+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
87110 {
87111 return skb_network_header(skb) - skb->data;
87112 }
87113@@ -1911,7 +1911,7 @@ static inline void skb_pop_rcv_encapsulation(struct sk_buff *skb)
87114 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
87115 */
87116 #ifndef NET_SKB_PAD
87117-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
87118+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
87119 #endif
87120
87121 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
87122@@ -2518,7 +2518,7 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
87123 int *err);
87124 unsigned int datagram_poll(struct file *file, struct socket *sock,
87125 struct poll_table_struct *wait);
87126-int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
87127+int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
87128 struct iovec *to, int size);
87129 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
87130 struct iovec *iov);
87131@@ -2664,6 +2664,8 @@ static inline ktime_t net_invalid_timestamp(void)
87132 return ktime_set(0, 0);
87133 }
87134
87135+void skb_timestamping_init(void);
87136+
87137 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
87138
87139 void skb_clone_tx_timestamp(struct sk_buff *skb);
87140@@ -2907,6 +2909,9 @@ static inline void nf_reset(struct sk_buff *skb)
87141 nf_bridge_put(skb->nf_bridge);
87142 skb->nf_bridge = NULL;
87143 #endif
87144+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
87145+ skb->nf_trace = 0;
87146+#endif
87147 }
87148
87149 static inline void nf_reset_trace(struct sk_buff *skb)
87150diff --git a/include/linux/slab.h b/include/linux/slab.h
87151index 1d9abb7..b1e8b10 100644
87152--- a/include/linux/slab.h
87153+++ b/include/linux/slab.h
87154@@ -14,15 +14,29 @@
87155 #include <linux/gfp.h>
87156 #include <linux/types.h>
87157 #include <linux/workqueue.h>
87158-
87159+#include <linux/err.h>
87160
87161 /*
87162 * Flags to pass to kmem_cache_create().
87163 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
87164 */
87165 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
87166+
87167+#ifdef CONFIG_PAX_USERCOPY_SLABS
87168+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
87169+#else
87170+#define SLAB_USERCOPY 0x00000000UL
87171+#endif
87172+
87173 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
87174 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
87175+
87176+#ifdef CONFIG_PAX_MEMORY_SANITIZE
87177+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
87178+#else
87179+#define SLAB_NO_SANITIZE 0x00000000UL
87180+#endif
87181+
87182 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
87183 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
87184 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
87185@@ -98,10 +112,13 @@
87186 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
87187 * Both make kfree a no-op.
87188 */
87189-#define ZERO_SIZE_PTR ((void *)16)
87190+#define ZERO_SIZE_PTR \
87191+({ \
87192+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
87193+ (void *)(-MAX_ERRNO-1L); \
87194+})
87195
87196-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
87197- (unsigned long)ZERO_SIZE_PTR)
87198+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
87199
87200 #include <linux/kmemleak.h>
87201
87202@@ -144,6 +161,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
87203 void kfree(const void *);
87204 void kzfree(const void *);
87205 size_t ksize(const void *);
87206+const char *check_heap_object(const void *ptr, unsigned long n);
87207+bool is_usercopy_object(const void *ptr);
87208
87209 /*
87210 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
87211@@ -176,7 +195,7 @@ struct kmem_cache {
87212 unsigned int align; /* Alignment as calculated */
87213 unsigned long flags; /* Active flags on the slab */
87214 const char *name; /* Slab name for sysfs */
87215- int refcount; /* Use counter */
87216+ atomic_t refcount; /* Use counter */
87217 void (*ctor)(void *); /* Called on object slot creation */
87218 struct list_head list; /* List of all slab caches on the system */
87219 };
87220@@ -261,6 +280,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
87221 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
87222 #endif
87223
87224+#ifdef CONFIG_PAX_USERCOPY_SLABS
87225+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
87226+#endif
87227+
87228 /*
87229 * Figure out which kmalloc slab an allocation of a certain size
87230 * belongs to.
87231@@ -269,7 +292,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
87232 * 2 = 120 .. 192 bytes
87233 * n = 2^(n-1) .. 2^n -1
87234 */
87235-static __always_inline int kmalloc_index(size_t size)
87236+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
87237 {
87238 if (!size)
87239 return 0;
87240@@ -312,11 +335,11 @@ static __always_inline int kmalloc_index(size_t size)
87241 }
87242 #endif /* !CONFIG_SLOB */
87243
87244-void *__kmalloc(size_t size, gfp_t flags);
87245+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
87246 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
87247
87248 #ifdef CONFIG_NUMA
87249-void *__kmalloc_node(size_t size, gfp_t flags, int node);
87250+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
87251 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
87252 #else
87253 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
87254diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
87255index 8235dfb..47ce586 100644
87256--- a/include/linux/slab_def.h
87257+++ b/include/linux/slab_def.h
87258@@ -38,7 +38,7 @@ struct kmem_cache {
87259 /* 4) cache creation/removal */
87260 const char *name;
87261 struct list_head list;
87262- int refcount;
87263+ atomic_t refcount;
87264 int object_size;
87265 int align;
87266
87267@@ -54,10 +54,14 @@ struct kmem_cache {
87268 unsigned long node_allocs;
87269 unsigned long node_frees;
87270 unsigned long node_overflow;
87271- atomic_t allochit;
87272- atomic_t allocmiss;
87273- atomic_t freehit;
87274- atomic_t freemiss;
87275+ atomic_unchecked_t allochit;
87276+ atomic_unchecked_t allocmiss;
87277+ atomic_unchecked_t freehit;
87278+ atomic_unchecked_t freemiss;
87279+#ifdef CONFIG_PAX_MEMORY_SANITIZE
87280+ atomic_unchecked_t sanitized;
87281+ atomic_unchecked_t not_sanitized;
87282+#endif
87283
87284 /*
87285 * If debugging is enabled, then the allocator can add additional
87286diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
87287index d82abd4..408c3a0 100644
87288--- a/include/linux/slub_def.h
87289+++ b/include/linux/slub_def.h
87290@@ -74,7 +74,7 @@ struct kmem_cache {
87291 struct kmem_cache_order_objects max;
87292 struct kmem_cache_order_objects min;
87293 gfp_t allocflags; /* gfp flags to use on each alloc */
87294- int refcount; /* Refcount for slab cache destroy */
87295+ atomic_t refcount; /* Refcount for slab cache destroy */
87296 void (*ctor)(void *);
87297 int inuse; /* Offset to metadata */
87298 int align; /* Alignment */
87299diff --git a/include/linux/smp.h b/include/linux/smp.h
87300index 34347f2..8739978 100644
87301--- a/include/linux/smp.h
87302+++ b/include/linux/smp.h
87303@@ -174,7 +174,9 @@ static inline void kick_all_cpus_sync(void) { }
87304 #endif
87305
87306 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
87307+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
87308 #define put_cpu() preempt_enable()
87309+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
87310
87311 /*
87312 * Callback to arch code if there's nosmp or maxcpus=0 on the
87313diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
87314index 46cca4c..3323536 100644
87315--- a/include/linux/sock_diag.h
87316+++ b/include/linux/sock_diag.h
87317@@ -11,7 +11,7 @@ struct sock;
87318 struct sock_diag_handler {
87319 __u8 family;
87320 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
87321-};
87322+} __do_const;
87323
87324 int sock_diag_register(const struct sock_diag_handler *h);
87325 void sock_diag_unregister(const struct sock_diag_handler *h);
87326diff --git a/include/linux/sonet.h b/include/linux/sonet.h
87327index 680f9a3..f13aeb0 100644
87328--- a/include/linux/sonet.h
87329+++ b/include/linux/sonet.h
87330@@ -7,7 +7,7 @@
87331 #include <uapi/linux/sonet.h>
87332
87333 struct k_sonet_stats {
87334-#define __HANDLE_ITEM(i) atomic_t i
87335+#define __HANDLE_ITEM(i) atomic_unchecked_t i
87336 __SONET_ITEMS
87337 #undef __HANDLE_ITEM
87338 };
87339diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
87340index 07d8e53..dc934c9 100644
87341--- a/include/linux/sunrpc/addr.h
87342+++ b/include/linux/sunrpc/addr.h
87343@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
87344 {
87345 switch (sap->sa_family) {
87346 case AF_INET:
87347- return ntohs(((struct sockaddr_in *)sap)->sin_port);
87348+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
87349 case AF_INET6:
87350- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
87351+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
87352 }
87353 return 0;
87354 }
87355@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
87356 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
87357 const struct sockaddr *src)
87358 {
87359- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
87360+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
87361 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
87362
87363 dsin->sin_family = ssin->sin_family;
87364@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
87365 if (sa->sa_family != AF_INET6)
87366 return 0;
87367
87368- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
87369+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
87370 }
87371
87372 #endif /* _LINUX_SUNRPC_ADDR_H */
87373diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
87374index 70736b9..37f33db 100644
87375--- a/include/linux/sunrpc/clnt.h
87376+++ b/include/linux/sunrpc/clnt.h
87377@@ -97,7 +97,7 @@ struct rpc_procinfo {
87378 unsigned int p_timer; /* Which RTT timer to use */
87379 u32 p_statidx; /* Which procedure to account */
87380 const char * p_name; /* name of procedure */
87381-};
87382+} __do_const;
87383
87384 #ifdef __KERNEL__
87385
87386diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
87387index 1bc7cd0..7912dc2 100644
87388--- a/include/linux/sunrpc/svc.h
87389+++ b/include/linux/sunrpc/svc.h
87390@@ -417,7 +417,7 @@ struct svc_procedure {
87391 unsigned int pc_count; /* call count */
87392 unsigned int pc_cachetype; /* cache info (NFS) */
87393 unsigned int pc_xdrressize; /* maximum size of XDR reply */
87394-};
87395+} __do_const;
87396
87397 /*
87398 * Function prototypes.
87399diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
87400index 5cf99a0..c0a1b98 100644
87401--- a/include/linux/sunrpc/svc_rdma.h
87402+++ b/include/linux/sunrpc/svc_rdma.h
87403@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
87404 extern unsigned int svcrdma_max_requests;
87405 extern unsigned int svcrdma_max_req_size;
87406
87407-extern atomic_t rdma_stat_recv;
87408-extern atomic_t rdma_stat_read;
87409-extern atomic_t rdma_stat_write;
87410-extern atomic_t rdma_stat_sq_starve;
87411-extern atomic_t rdma_stat_rq_starve;
87412-extern atomic_t rdma_stat_rq_poll;
87413-extern atomic_t rdma_stat_rq_prod;
87414-extern atomic_t rdma_stat_sq_poll;
87415-extern atomic_t rdma_stat_sq_prod;
87416+extern atomic_unchecked_t rdma_stat_recv;
87417+extern atomic_unchecked_t rdma_stat_read;
87418+extern atomic_unchecked_t rdma_stat_write;
87419+extern atomic_unchecked_t rdma_stat_sq_starve;
87420+extern atomic_unchecked_t rdma_stat_rq_starve;
87421+extern atomic_unchecked_t rdma_stat_rq_poll;
87422+extern atomic_unchecked_t rdma_stat_rq_prod;
87423+extern atomic_unchecked_t rdma_stat_sq_poll;
87424+extern atomic_unchecked_t rdma_stat_sq_prod;
87425
87426 #define RPCRDMA_VERSION 1
87427
87428diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
87429index 8d71d65..f79586e 100644
87430--- a/include/linux/sunrpc/svcauth.h
87431+++ b/include/linux/sunrpc/svcauth.h
87432@@ -120,7 +120,7 @@ struct auth_ops {
87433 int (*release)(struct svc_rqst *rq);
87434 void (*domain_release)(struct auth_domain *);
87435 int (*set_client)(struct svc_rqst *rq);
87436-};
87437+} __do_const;
87438
87439 #define SVC_GARBAGE 1
87440 #define SVC_SYSERR 2
87441diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
87442index e7a018e..49f8b17 100644
87443--- a/include/linux/swiotlb.h
87444+++ b/include/linux/swiotlb.h
87445@@ -60,7 +60,8 @@ extern void
87446
87447 extern void
87448 swiotlb_free_coherent(struct device *hwdev, size_t size,
87449- void *vaddr, dma_addr_t dma_handle);
87450+ void *vaddr, dma_addr_t dma_handle,
87451+ struct dma_attrs *attrs);
87452
87453 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
87454 unsigned long offset, size_t size,
87455diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
87456index b0881a0..559a440 100644
87457--- a/include/linux/syscalls.h
87458+++ b/include/linux/syscalls.h
87459@@ -98,10 +98,16 @@ struct sigaltstack;
87460 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
87461
87462 #define __SC_DECL(t, a) t a
87463+#define __TYPE_IS_U(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
87464 #define __TYPE_IS_L(t) (__same_type((t)0, 0L))
87465 #define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
87466 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
87467-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
87468+#define __SC_LONG(t, a) __typeof( \
87469+ __builtin_choose_expr( \
87470+ sizeof(t) > sizeof(int), \
87471+ (t) 0, \
87472+ __builtin_choose_expr(__TYPE_IS_U(t), 0UL, 0L) \
87473+ )) a
87474 #define __SC_CAST(t, a) (t) a
87475 #define __SC_ARGS(t, a) a
87476 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
87477@@ -379,11 +385,11 @@ asmlinkage long sys_sync(void);
87478 asmlinkage long sys_fsync(unsigned int fd);
87479 asmlinkage long sys_fdatasync(unsigned int fd);
87480 asmlinkage long sys_bdflush(int func, long data);
87481-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
87482- char __user *type, unsigned long flags,
87483+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
87484+ const char __user *type, unsigned long flags,
87485 void __user *data);
87486-asmlinkage long sys_umount(char __user *name, int flags);
87487-asmlinkage long sys_oldumount(char __user *name);
87488+asmlinkage long sys_umount(const char __user *name, int flags);
87489+asmlinkage long sys_oldumount(const char __user *name);
87490 asmlinkage long sys_truncate(const char __user *path, long length);
87491 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
87492 asmlinkage long sys_stat(const char __user *filename,
87493@@ -595,7 +601,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
87494 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
87495 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
87496 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
87497- struct sockaddr __user *, int);
87498+ struct sockaddr __user *, int) __intentional_overflow(0);
87499 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
87500 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
87501 unsigned int vlen, unsigned flags);
87502diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
87503index 27b3b0b..e093dd9 100644
87504--- a/include/linux/syscore_ops.h
87505+++ b/include/linux/syscore_ops.h
87506@@ -16,7 +16,7 @@ struct syscore_ops {
87507 int (*suspend)(void);
87508 void (*resume)(void);
87509 void (*shutdown)(void);
87510-};
87511+} __do_const;
87512
87513 extern void register_syscore_ops(struct syscore_ops *ops);
87514 extern void unregister_syscore_ops(struct syscore_ops *ops);
87515diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
87516index 14a8ff2..fa95f3a 100644
87517--- a/include/linux/sysctl.h
87518+++ b/include/linux/sysctl.h
87519@@ -34,13 +34,13 @@ struct ctl_table_root;
87520 struct ctl_table_header;
87521 struct ctl_dir;
87522
87523-typedef struct ctl_table ctl_table;
87524-
87525 typedef int proc_handler (struct ctl_table *ctl, int write,
87526 void __user *buffer, size_t *lenp, loff_t *ppos);
87527
87528 extern int proc_dostring(struct ctl_table *, int,
87529 void __user *, size_t *, loff_t *);
87530+extern int proc_dostring_modpriv(struct ctl_table *, int,
87531+ void __user *, size_t *, loff_t *);
87532 extern int proc_dointvec(struct ctl_table *, int,
87533 void __user *, size_t *, loff_t *);
87534 extern int proc_dointvec_minmax(struct ctl_table *, int,
87535@@ -115,7 +115,9 @@ struct ctl_table
87536 struct ctl_table_poll *poll;
87537 void *extra1;
87538 void *extra2;
87539-};
87540+} __do_const __randomize_layout;
87541+typedef struct ctl_table __no_const ctl_table_no_const;
87542+typedef struct ctl_table ctl_table;
87543
87544 struct ctl_node {
87545 struct rb_node node;
87546diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
87547index f97d0db..c1187dc 100644
87548--- a/include/linux/sysfs.h
87549+++ b/include/linux/sysfs.h
87550@@ -34,7 +34,8 @@ struct attribute {
87551 struct lock_class_key *key;
87552 struct lock_class_key skey;
87553 #endif
87554-};
87555+} __do_const;
87556+typedef struct attribute __no_const attribute_no_const;
87557
87558 /**
87559 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
87560@@ -63,7 +64,8 @@ struct attribute_group {
87561 struct attribute *, int);
87562 struct attribute **attrs;
87563 struct bin_attribute **bin_attrs;
87564-};
87565+} __do_const;
87566+typedef struct attribute_group __no_const attribute_group_no_const;
87567
87568 /**
87569 * Use these macros to make defining attributes easier. See include/linux/device.h
87570@@ -128,7 +130,8 @@ struct bin_attribute {
87571 char *, loff_t, size_t);
87572 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
87573 struct vm_area_struct *vma);
87574-};
87575+} __do_const;
87576+typedef struct bin_attribute __no_const bin_attribute_no_const;
87577
87578 /**
87579 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
87580diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
87581index 387fa7d..3fcde6b 100644
87582--- a/include/linux/sysrq.h
87583+++ b/include/linux/sysrq.h
87584@@ -16,6 +16,7 @@
87585
87586 #include <linux/errno.h>
87587 #include <linux/types.h>
87588+#include <linux/compiler.h>
87589
87590 /* Possible values of bitmask for enabling sysrq functions */
87591 /* 0x0001 is reserved for enable everything */
87592@@ -33,7 +34,7 @@ struct sysrq_key_op {
87593 char *help_msg;
87594 char *action_msg;
87595 int enable_mask;
87596-};
87597+} __do_const;
87598
87599 #ifdef CONFIG_MAGIC_SYSRQ
87600
87601diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
87602index ff307b5..f1a4468 100644
87603--- a/include/linux/thread_info.h
87604+++ b/include/linux/thread_info.h
87605@@ -145,6 +145,13 @@ static inline bool test_and_clear_restore_sigmask(void)
87606 #error "no set_restore_sigmask() provided and default one won't work"
87607 #endif
87608
87609+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
87610+
87611+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
87612+{
87613+ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
87614+}
87615+
87616 #endif /* __KERNEL__ */
87617
87618 #endif /* _LINUX_THREAD_INFO_H */
87619diff --git a/include/linux/tty.h b/include/linux/tty.h
87620index 1c3316a..ae83b9f 100644
87621--- a/include/linux/tty.h
87622+++ b/include/linux/tty.h
87623@@ -202,7 +202,7 @@ struct tty_port {
87624 const struct tty_port_operations *ops; /* Port operations */
87625 spinlock_t lock; /* Lock protecting tty field */
87626 int blocked_open; /* Waiting to open */
87627- int count; /* Usage count */
87628+ atomic_t count; /* Usage count */
87629 wait_queue_head_t open_wait; /* Open waiters */
87630 wait_queue_head_t close_wait; /* Close waiters */
87631 wait_queue_head_t delta_msr_wait; /* Modem status change */
87632@@ -284,7 +284,7 @@ struct tty_struct {
87633 /* If the tty has a pending do_SAK, queue it here - akpm */
87634 struct work_struct SAK_work;
87635 struct tty_port *port;
87636-};
87637+} __randomize_layout;
87638
87639 /* Each of a tty's open files has private_data pointing to tty_file_private */
87640 struct tty_file_private {
87641@@ -550,7 +550,7 @@ extern int tty_port_open(struct tty_port *port,
87642 struct tty_struct *tty, struct file *filp);
87643 static inline int tty_port_users(struct tty_port *port)
87644 {
87645- return port->count + port->blocked_open;
87646+ return atomic_read(&port->count) + port->blocked_open;
87647 }
87648
87649 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
87650diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
87651index 756a609..89db85e 100644
87652--- a/include/linux/tty_driver.h
87653+++ b/include/linux/tty_driver.h
87654@@ -285,7 +285,7 @@ struct tty_operations {
87655 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
87656 #endif
87657 const struct file_operations *proc_fops;
87658-};
87659+} __do_const __randomize_layout;
87660
87661 struct tty_driver {
87662 int magic; /* magic number for this structure */
87663@@ -319,7 +319,7 @@ struct tty_driver {
87664
87665 const struct tty_operations *ops;
87666 struct list_head tty_drivers;
87667-};
87668+} __randomize_layout;
87669
87670 extern struct list_head tty_drivers;
87671
87672diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
87673index 00c9d68..bc0188b 100644
87674--- a/include/linux/tty_ldisc.h
87675+++ b/include/linux/tty_ldisc.h
87676@@ -215,7 +215,7 @@ struct tty_ldisc_ops {
87677
87678 struct module *owner;
87679
87680- int refcount;
87681+ atomic_t refcount;
87682 };
87683
87684 struct tty_ldisc {
87685diff --git a/include/linux/types.h b/include/linux/types.h
87686index a0bb704..f511c77 100644
87687--- a/include/linux/types.h
87688+++ b/include/linux/types.h
87689@@ -177,10 +177,26 @@ typedef struct {
87690 int counter;
87691 } atomic_t;
87692
87693+#ifdef CONFIG_PAX_REFCOUNT
87694+typedef struct {
87695+ int counter;
87696+} atomic_unchecked_t;
87697+#else
87698+typedef atomic_t atomic_unchecked_t;
87699+#endif
87700+
87701 #ifdef CONFIG_64BIT
87702 typedef struct {
87703 long counter;
87704 } atomic64_t;
87705+
87706+#ifdef CONFIG_PAX_REFCOUNT
87707+typedef struct {
87708+ long counter;
87709+} atomic64_unchecked_t;
87710+#else
87711+typedef atomic64_t atomic64_unchecked_t;
87712+#endif
87713 #endif
87714
87715 struct list_head {
87716diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
87717index ecd3319..8a36ded 100644
87718--- a/include/linux/uaccess.h
87719+++ b/include/linux/uaccess.h
87720@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
87721 long ret; \
87722 mm_segment_t old_fs = get_fs(); \
87723 \
87724- set_fs(KERNEL_DS); \
87725 pagefault_disable(); \
87726- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
87727- pagefault_enable(); \
87728+ set_fs(KERNEL_DS); \
87729+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
87730 set_fs(old_fs); \
87731+ pagefault_enable(); \
87732 ret; \
87733 })
87734
87735diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
87736index 2d1f9b6..d7a9fce 100644
87737--- a/include/linux/uidgid.h
87738+++ b/include/linux/uidgid.h
87739@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
87740
87741 #endif /* CONFIG_USER_NS */
87742
87743+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
87744+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
87745+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
87746+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
87747+
87748 #endif /* _LINUX_UIDGID_H */
87749diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
87750index 99c1b4d..562e6f3 100644
87751--- a/include/linux/unaligned/access_ok.h
87752+++ b/include/linux/unaligned/access_ok.h
87753@@ -4,34 +4,34 @@
87754 #include <linux/kernel.h>
87755 #include <asm/byteorder.h>
87756
87757-static inline u16 get_unaligned_le16(const void *p)
87758+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
87759 {
87760- return le16_to_cpup((__le16 *)p);
87761+ return le16_to_cpup((const __le16 *)p);
87762 }
87763
87764-static inline u32 get_unaligned_le32(const void *p)
87765+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
87766 {
87767- return le32_to_cpup((__le32 *)p);
87768+ return le32_to_cpup((const __le32 *)p);
87769 }
87770
87771-static inline u64 get_unaligned_le64(const void *p)
87772+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
87773 {
87774- return le64_to_cpup((__le64 *)p);
87775+ return le64_to_cpup((const __le64 *)p);
87776 }
87777
87778-static inline u16 get_unaligned_be16(const void *p)
87779+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
87780 {
87781- return be16_to_cpup((__be16 *)p);
87782+ return be16_to_cpup((const __be16 *)p);
87783 }
87784
87785-static inline u32 get_unaligned_be32(const void *p)
87786+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
87787 {
87788- return be32_to_cpup((__be32 *)p);
87789+ return be32_to_cpup((const __be32 *)p);
87790 }
87791
87792-static inline u64 get_unaligned_be64(const void *p)
87793+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
87794 {
87795- return be64_to_cpup((__be64 *)p);
87796+ return be64_to_cpup((const __be64 *)p);
87797 }
87798
87799 static inline void put_unaligned_le16(u16 val, void *p)
87800diff --git a/include/linux/usb.h b/include/linux/usb.h
87801index d2465bc..5256de4 100644
87802--- a/include/linux/usb.h
87803+++ b/include/linux/usb.h
87804@@ -571,7 +571,7 @@ struct usb_device {
87805 int maxchild;
87806
87807 u32 quirks;
87808- atomic_t urbnum;
87809+ atomic_unchecked_t urbnum;
87810
87811 unsigned long active_duration;
87812
87813@@ -1655,7 +1655,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
87814
87815 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
87816 __u8 request, __u8 requesttype, __u16 value, __u16 index,
87817- void *data, __u16 size, int timeout);
87818+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
87819 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
87820 void *data, int len, int *actual_length, int timeout);
87821 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
87822diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
87823index e452ba6..78f8e80 100644
87824--- a/include/linux/usb/renesas_usbhs.h
87825+++ b/include/linux/usb/renesas_usbhs.h
87826@@ -39,7 +39,7 @@ enum {
87827 */
87828 struct renesas_usbhs_driver_callback {
87829 int (*notify_hotplug)(struct platform_device *pdev);
87830-};
87831+} __no_const;
87832
87833 /*
87834 * callback functions for platform
87835diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
87836index 4836ba3..603f6ee 100644
87837--- a/include/linux/user_namespace.h
87838+++ b/include/linux/user_namespace.h
87839@@ -33,7 +33,7 @@ struct user_namespace {
87840 struct key *persistent_keyring_register;
87841 struct rw_semaphore persistent_keyring_register_sem;
87842 #endif
87843-};
87844+} __randomize_layout;
87845
87846 extern struct user_namespace init_user_ns;
87847
87848diff --git a/include/linux/utsname.h b/include/linux/utsname.h
87849index 239e277..22a5cf5 100644
87850--- a/include/linux/utsname.h
87851+++ b/include/linux/utsname.h
87852@@ -24,7 +24,7 @@ struct uts_namespace {
87853 struct new_utsname name;
87854 struct user_namespace *user_ns;
87855 unsigned int proc_inum;
87856-};
87857+} __randomize_layout;
87858 extern struct uts_namespace init_uts_ns;
87859
87860 #ifdef CONFIG_UTS_NS
87861diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
87862index 6f8fbcf..4efc177 100644
87863--- a/include/linux/vermagic.h
87864+++ b/include/linux/vermagic.h
87865@@ -25,9 +25,42 @@
87866 #define MODULE_ARCH_VERMAGIC ""
87867 #endif
87868
87869+#ifdef CONFIG_PAX_REFCOUNT
87870+#define MODULE_PAX_REFCOUNT "REFCOUNT "
87871+#else
87872+#define MODULE_PAX_REFCOUNT ""
87873+#endif
87874+
87875+#ifdef CONSTIFY_PLUGIN
87876+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
87877+#else
87878+#define MODULE_CONSTIFY_PLUGIN ""
87879+#endif
87880+
87881+#ifdef STACKLEAK_PLUGIN
87882+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
87883+#else
87884+#define MODULE_STACKLEAK_PLUGIN ""
87885+#endif
87886+
87887+#ifdef RANDSTRUCT_PLUGIN
87888+#include <generated/randomize_layout_hash.h>
87889+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
87890+#else
87891+#define MODULE_RANDSTRUCT_PLUGIN
87892+#endif
87893+
87894+#ifdef CONFIG_GRKERNSEC
87895+#define MODULE_GRSEC "GRSEC "
87896+#else
87897+#define MODULE_GRSEC ""
87898+#endif
87899+
87900 #define VERMAGIC_STRING \
87901 UTS_RELEASE " " \
87902 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
87903 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
87904- MODULE_ARCH_VERMAGIC
87905+ MODULE_ARCH_VERMAGIC \
87906+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
87907+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
87908
87909diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
87910index 502073a..a7de024 100644
87911--- a/include/linux/vga_switcheroo.h
87912+++ b/include/linux/vga_switcheroo.h
87913@@ -63,8 +63,8 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
87914
87915 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
87916
87917-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
87918-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
87919+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
87920+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
87921 #else
87922
87923 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
87924@@ -81,8 +81,8 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
87925
87926 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
87927
87928-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
87929-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
87930+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
87931+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
87932
87933 #endif
87934 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
87935diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
87936index 4b8a891..e9a2863 100644
87937--- a/include/linux/vmalloc.h
87938+++ b/include/linux/vmalloc.h
87939@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
87940 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
87941 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
87942 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
87943+
87944+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
87945+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
87946+#endif
87947+
87948 /* bits [20..32] reserved for arch specific ioremap internals */
87949
87950 /*
87951@@ -82,6 +87,10 @@ extern void *vmap(struct page **pages, unsigned int count,
87952 unsigned long flags, pgprot_t prot);
87953 extern void vunmap(const void *addr);
87954
87955+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
87956+extern void unmap_process_stacks(struct task_struct *task);
87957+#endif
87958+
87959 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
87960 unsigned long uaddr, void *kaddr,
87961 unsigned long size);
87962@@ -142,7 +151,7 @@ extern void free_vm_area(struct vm_struct *area);
87963
87964 /* for /dev/kmem */
87965 extern long vread(char *buf, char *addr, unsigned long count);
87966-extern long vwrite(char *buf, char *addr, unsigned long count);
87967+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
87968
87969 /*
87970 * Internals. Dont't use..
87971diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
87972index 82e7db7..f8ce3d0 100644
87973--- a/include/linux/vmstat.h
87974+++ b/include/linux/vmstat.h
87975@@ -108,18 +108,18 @@ static inline void vm_events_fold_cpu(int cpu)
87976 /*
87977 * Zone based page accounting with per cpu differentials.
87978 */
87979-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
87980+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
87981
87982 static inline void zone_page_state_add(long x, struct zone *zone,
87983 enum zone_stat_item item)
87984 {
87985- atomic_long_add(x, &zone->vm_stat[item]);
87986- atomic_long_add(x, &vm_stat[item]);
87987+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
87988+ atomic_long_add_unchecked(x, &vm_stat[item]);
87989 }
87990
87991-static inline unsigned long global_page_state(enum zone_stat_item item)
87992+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
87993 {
87994- long x = atomic_long_read(&vm_stat[item]);
87995+ long x = atomic_long_read_unchecked(&vm_stat[item]);
87996 #ifdef CONFIG_SMP
87997 if (x < 0)
87998 x = 0;
87999@@ -127,10 +127,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
88000 return x;
88001 }
88002
88003-static inline unsigned long zone_page_state(struct zone *zone,
88004+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
88005 enum zone_stat_item item)
88006 {
88007- long x = atomic_long_read(&zone->vm_stat[item]);
88008+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
88009 #ifdef CONFIG_SMP
88010 if (x < 0)
88011 x = 0;
88012@@ -147,7 +147,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
88013 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
88014 enum zone_stat_item item)
88015 {
88016- long x = atomic_long_read(&zone->vm_stat[item]);
88017+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
88018
88019 #ifdef CONFIG_SMP
88020 int cpu;
88021@@ -234,14 +234,14 @@ static inline void __mod_zone_page_state(struct zone *zone,
88022
88023 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
88024 {
88025- atomic_long_inc(&zone->vm_stat[item]);
88026- atomic_long_inc(&vm_stat[item]);
88027+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
88028+ atomic_long_inc_unchecked(&vm_stat[item]);
88029 }
88030
88031 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
88032 {
88033- atomic_long_dec(&zone->vm_stat[item]);
88034- atomic_long_dec(&vm_stat[item]);
88035+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
88036+ atomic_long_dec_unchecked(&vm_stat[item]);
88037 }
88038
88039 static inline void __inc_zone_page_state(struct page *page,
88040diff --git a/include/linux/xattr.h b/include/linux/xattr.h
88041index 91b0a68..0e9adf6 100644
88042--- a/include/linux/xattr.h
88043+++ b/include/linux/xattr.h
88044@@ -28,7 +28,7 @@ struct xattr_handler {
88045 size_t size, int handler_flags);
88046 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
88047 size_t size, int flags, int handler_flags);
88048-};
88049+} __do_const;
88050
88051 struct xattr {
88052 const char *name;
88053@@ -37,6 +37,9 @@ struct xattr {
88054 };
88055
88056 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
88057+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
88058+ssize_t pax_getxattr(struct dentry *, void *, size_t);
88059+#endif
88060 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
88061 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
88062 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
88063diff --git a/include/linux/zlib.h b/include/linux/zlib.h
88064index 9c5a6b4..09c9438 100644
88065--- a/include/linux/zlib.h
88066+++ b/include/linux/zlib.h
88067@@ -31,6 +31,7 @@
88068 #define _ZLIB_H
88069
88070 #include <linux/zconf.h>
88071+#include <linux/compiler.h>
88072
88073 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
88074 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
88075@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
88076
88077 /* basic functions */
88078
88079-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
88080+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
88081 /*
88082 Returns the number of bytes that needs to be allocated for a per-
88083 stream workspace with the specified parameters. A pointer to this
88084diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
88085index eec6e46..82d5641 100644
88086--- a/include/media/v4l2-dev.h
88087+++ b/include/media/v4l2-dev.h
88088@@ -77,7 +77,7 @@ struct v4l2_file_operations {
88089 int (*mmap) (struct file *, struct vm_area_struct *);
88090 int (*open) (struct file *);
88091 int (*release) (struct file *);
88092-};
88093+} __do_const;
88094
88095 /*
88096 * Newer version of video_device, handled by videodev2.c
88097diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
88098index ffb69da..040393e 100644
88099--- a/include/media/v4l2-device.h
88100+++ b/include/media/v4l2-device.h
88101@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
88102 this function returns 0. If the name ends with a digit (e.g. cx18),
88103 then the name will be set to cx18-0 since cx180 looks really odd. */
88104 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
88105- atomic_t *instance);
88106+ atomic_unchecked_t *instance);
88107
88108 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
88109 Since the parent disappears this ensures that v4l2_dev doesn't have an
88110diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
88111index d9fa68f..45c88d1 100644
88112--- a/include/net/9p/transport.h
88113+++ b/include/net/9p/transport.h
88114@@ -63,7 +63,7 @@ struct p9_trans_module {
88115 int (*cancelled)(struct p9_client *, struct p9_req_t *req);
88116 int (*zc_request)(struct p9_client *, struct p9_req_t *,
88117 char *, char *, int , int, int, int);
88118-};
88119+} __do_const;
88120
88121 void v9fs_register_trans(struct p9_trans_module *m);
88122 void v9fs_unregister_trans(struct p9_trans_module *m);
88123diff --git a/include/net/af_unix.h b/include/net/af_unix.h
88124index a175ba4..196eb82 100644
88125--- a/include/net/af_unix.h
88126+++ b/include/net/af_unix.h
88127@@ -36,7 +36,7 @@ struct unix_skb_parms {
88128 u32 secid; /* Security ID */
88129 #endif
88130 u32 consumed;
88131-};
88132+} __randomize_layout;
88133
88134 #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
88135 #define UNIXSID(skb) (&UNIXCB((skb)).secid)
88136diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
88137index 4abdcb2..945c5cc 100644
88138--- a/include/net/bluetooth/l2cap.h
88139+++ b/include/net/bluetooth/l2cap.h
88140@@ -601,7 +601,7 @@ struct l2cap_ops {
88141 long (*get_sndtimeo) (struct l2cap_chan *chan);
88142 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
88143 unsigned long len, int nb);
88144-};
88145+} __do_const;
88146
88147 struct l2cap_conn {
88148 struct hci_conn *hcon;
88149diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
88150index f2ae33d..c457cf0 100644
88151--- a/include/net/caif/cfctrl.h
88152+++ b/include/net/caif/cfctrl.h
88153@@ -52,7 +52,7 @@ struct cfctrl_rsp {
88154 void (*radioset_rsp)(void);
88155 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
88156 struct cflayer *client_layer);
88157-};
88158+} __no_const;
88159
88160 /* Link Setup Parameters for CAIF-Links. */
88161 struct cfctrl_link_param {
88162@@ -101,8 +101,8 @@ struct cfctrl_request_info {
88163 struct cfctrl {
88164 struct cfsrvl serv;
88165 struct cfctrl_rsp res;
88166- atomic_t req_seq_no;
88167- atomic_t rsp_seq_no;
88168+ atomic_unchecked_t req_seq_no;
88169+ atomic_unchecked_t rsp_seq_no;
88170 struct list_head list;
88171 /* Protects from simultaneous access to first_req list */
88172 spinlock_t info_list_lock;
88173diff --git a/include/net/flow.h b/include/net/flow.h
88174index 8109a15..504466d 100644
88175--- a/include/net/flow.h
88176+++ b/include/net/flow.h
88177@@ -231,6 +231,6 @@ void flow_cache_fini(struct net *net);
88178
88179 void flow_cache_flush(struct net *net);
88180 void flow_cache_flush_deferred(struct net *net);
88181-extern atomic_t flow_cache_genid;
88182+extern atomic_unchecked_t flow_cache_genid;
88183
88184 #endif
88185diff --git a/include/net/genetlink.h b/include/net/genetlink.h
88186index 93695f0..766d71c 100644
88187--- a/include/net/genetlink.h
88188+++ b/include/net/genetlink.h
88189@@ -120,7 +120,7 @@ struct genl_ops {
88190 u8 cmd;
88191 u8 internal_flags;
88192 u8 flags;
88193-};
88194+} __do_const;
88195
88196 int __genl_register_family(struct genl_family *family);
88197
88198diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
88199index 734d9b5..48a9a4b 100644
88200--- a/include/net/gro_cells.h
88201+++ b/include/net/gro_cells.h
88202@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
88203 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
88204
88205 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
88206- atomic_long_inc(&dev->rx_dropped);
88207+ atomic_long_inc_unchecked(&dev->rx_dropped);
88208 kfree_skb(skb);
88209 return;
88210 }
88211diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
88212index 7a43138..bc76865 100644
88213--- a/include/net/inet_connection_sock.h
88214+++ b/include/net/inet_connection_sock.h
88215@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
88216 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
88217 int (*bind_conflict)(const struct sock *sk,
88218 const struct inet_bind_bucket *tb, bool relax);
88219-};
88220+} __do_const;
88221
88222 /** inet_connection_sock - INET connection oriented sock
88223 *
88224diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
88225index 01d590e..f69c61d 100644
88226--- a/include/net/inetpeer.h
88227+++ b/include/net/inetpeer.h
88228@@ -47,7 +47,7 @@ struct inet_peer {
88229 */
88230 union {
88231 struct {
88232- atomic_t rid; /* Frag reception counter */
88233+ atomic_unchecked_t rid; /* Frag reception counter */
88234 };
88235 struct rcu_head rcu;
88236 struct inet_peer *gc_next;
88237diff --git a/include/net/ip.h b/include/net/ip.h
88238index 7596eb2..f7f5fad 100644
88239--- a/include/net/ip.h
88240+++ b/include/net/ip.h
88241@@ -309,7 +309,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
88242 }
88243 }
88244
88245-u32 ip_idents_reserve(u32 hash, int segs);
88246+u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1);
88247 void __ip_select_ident(struct iphdr *iph, int segs);
88248
88249 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
88250diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
88251index 9922093..a1755d6 100644
88252--- a/include/net/ip_fib.h
88253+++ b/include/net/ip_fib.h
88254@@ -169,7 +169,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
88255
88256 #define FIB_RES_SADDR(net, res) \
88257 ((FIB_RES_NH(res).nh_saddr_genid == \
88258- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
88259+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
88260 FIB_RES_NH(res).nh_saddr : \
88261 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
88262 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
88263diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
88264index 624a8a5..b1e2a24 100644
88265--- a/include/net/ip_vs.h
88266+++ b/include/net/ip_vs.h
88267@@ -558,7 +558,7 @@ struct ip_vs_conn {
88268 struct ip_vs_conn *control; /* Master control connection */
88269 atomic_t n_control; /* Number of controlled ones */
88270 struct ip_vs_dest *dest; /* real server */
88271- atomic_t in_pkts; /* incoming packet counter */
88272+ atomic_unchecked_t in_pkts; /* incoming packet counter */
88273
88274 /* packet transmitter for different forwarding methods. If it
88275 mangles the packet, it must return NF_DROP or better NF_STOLEN,
88276@@ -705,7 +705,7 @@ struct ip_vs_dest {
88277 __be16 port; /* port number of the server */
88278 union nf_inet_addr addr; /* IP address of the server */
88279 volatile unsigned int flags; /* dest status flags */
88280- atomic_t conn_flags; /* flags to copy to conn */
88281+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
88282 atomic_t weight; /* server weight */
88283
88284 atomic_t refcnt; /* reference counter */
88285@@ -960,11 +960,11 @@ struct netns_ipvs {
88286 /* ip_vs_lblc */
88287 int sysctl_lblc_expiration;
88288 struct ctl_table_header *lblc_ctl_header;
88289- struct ctl_table *lblc_ctl_table;
88290+ ctl_table_no_const *lblc_ctl_table;
88291 /* ip_vs_lblcr */
88292 int sysctl_lblcr_expiration;
88293 struct ctl_table_header *lblcr_ctl_header;
88294- struct ctl_table *lblcr_ctl_table;
88295+ ctl_table_no_const *lblcr_ctl_table;
88296 /* ip_vs_est */
88297 struct list_head est_list; /* estimator list */
88298 spinlock_t est_lock;
88299diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
88300index 8d4f588..2e37ad2 100644
88301--- a/include/net/irda/ircomm_tty.h
88302+++ b/include/net/irda/ircomm_tty.h
88303@@ -33,6 +33,7 @@
88304 #include <linux/termios.h>
88305 #include <linux/timer.h>
88306 #include <linux/tty.h> /* struct tty_struct */
88307+#include <asm/local.h>
88308
88309 #include <net/irda/irias_object.h>
88310 #include <net/irda/ircomm_core.h>
88311diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
88312index 714cc9a..ea05f3e 100644
88313--- a/include/net/iucv/af_iucv.h
88314+++ b/include/net/iucv/af_iucv.h
88315@@ -149,7 +149,7 @@ struct iucv_skb_cb {
88316 struct iucv_sock_list {
88317 struct hlist_head head;
88318 rwlock_t lock;
88319- atomic_t autobind_name;
88320+ atomic_unchecked_t autobind_name;
88321 };
88322
88323 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
88324diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
88325index f3be818..bf46196 100644
88326--- a/include/net/llc_c_ac.h
88327+++ b/include/net/llc_c_ac.h
88328@@ -87,7 +87,7 @@
88329 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
88330 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
88331
88332-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
88333+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
88334
88335 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
88336 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
88337diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
88338index 3948cf1..83b28c4 100644
88339--- a/include/net/llc_c_ev.h
88340+++ b/include/net/llc_c_ev.h
88341@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
88342 return (struct llc_conn_state_ev *)skb->cb;
88343 }
88344
88345-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
88346-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
88347+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
88348+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
88349
88350 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
88351 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
88352diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
88353index 0e79cfb..f46db31 100644
88354--- a/include/net/llc_c_st.h
88355+++ b/include/net/llc_c_st.h
88356@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
88357 u8 next_state;
88358 llc_conn_ev_qfyr_t *ev_qualifiers;
88359 llc_conn_action_t *ev_actions;
88360-};
88361+} __do_const;
88362
88363 struct llc_conn_state {
88364 u8 current_state;
88365diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
88366index a61b98c..aade1eb 100644
88367--- a/include/net/llc_s_ac.h
88368+++ b/include/net/llc_s_ac.h
88369@@ -23,7 +23,7 @@
88370 #define SAP_ACT_TEST_IND 9
88371
88372 /* All action functions must look like this */
88373-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
88374+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
88375
88376 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
88377 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
88378diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
88379index 567c681..cd73ac02 100644
88380--- a/include/net/llc_s_st.h
88381+++ b/include/net/llc_s_st.h
88382@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
88383 llc_sap_ev_t ev;
88384 u8 next_state;
88385 llc_sap_action_t *ev_actions;
88386-};
88387+} __do_const;
88388
88389 struct llc_sap_state {
88390 u8 curr_state;
88391diff --git a/include/net/mac80211.h b/include/net/mac80211.h
88392index 421b6ec..5a03729 100644
88393--- a/include/net/mac80211.h
88394+++ b/include/net/mac80211.h
88395@@ -4588,7 +4588,7 @@ struct rate_control_ops {
88396 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
88397
88398 u32 (*get_expected_throughput)(void *priv_sta);
88399-};
88400+} __do_const;
88401
88402 static inline int rate_supported(struct ieee80211_sta *sta,
88403 enum ieee80211_band band,
88404diff --git a/include/net/neighbour.h b/include/net/neighbour.h
88405index 47f4254..fd095bc 100644
88406--- a/include/net/neighbour.h
88407+++ b/include/net/neighbour.h
88408@@ -163,7 +163,7 @@ struct neigh_ops {
88409 void (*error_report)(struct neighbour *, struct sk_buff *);
88410 int (*output)(struct neighbour *, struct sk_buff *);
88411 int (*connected_output)(struct neighbour *, struct sk_buff *);
88412-};
88413+} __do_const;
88414
88415 struct pneigh_entry {
88416 struct pneigh_entry *next;
88417@@ -217,7 +217,7 @@ struct neigh_table {
88418 struct neigh_statistics __percpu *stats;
88419 struct neigh_hash_table __rcu *nht;
88420 struct pneigh_entry **phash_buckets;
88421-};
88422+} __randomize_layout;
88423
88424 static inline int neigh_parms_family(struct neigh_parms *p)
88425 {
88426diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
88427index 361d260..903d15f 100644
88428--- a/include/net/net_namespace.h
88429+++ b/include/net/net_namespace.h
88430@@ -129,8 +129,8 @@ struct net {
88431 struct netns_ipvs *ipvs;
88432 #endif
88433 struct sock *diag_nlsk;
88434- atomic_t fnhe_genid;
88435-};
88436+ atomic_unchecked_t fnhe_genid;
88437+} __randomize_layout;
88438
88439 #include <linux/seq_file_net.h>
88440
88441@@ -286,7 +286,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
88442 #define __net_init __init
88443 #define __net_exit __exit_refok
88444 #define __net_initdata __initdata
88445+#ifdef CONSTIFY_PLUGIN
88446 #define __net_initconst __initconst
88447+#else
88448+#define __net_initconst __initdata
88449+#endif
88450 #endif
88451
88452 struct pernet_operations {
88453@@ -296,7 +300,7 @@ struct pernet_operations {
88454 void (*exit_batch)(struct list_head *net_exit_list);
88455 int *id;
88456 size_t size;
88457-};
88458+} __do_const;
88459
88460 /*
88461 * Use these carefully. If you implement a network device and it
88462@@ -344,23 +348,23 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
88463
88464 static inline int rt_genid_ipv4(struct net *net)
88465 {
88466- return atomic_read(&net->ipv4.rt_genid);
88467+ return atomic_read_unchecked(&net->ipv4.rt_genid);
88468 }
88469
88470 static inline void rt_genid_bump_ipv4(struct net *net)
88471 {
88472- atomic_inc(&net->ipv4.rt_genid);
88473+ atomic_inc_unchecked(&net->ipv4.rt_genid);
88474 }
88475
88476 #if IS_ENABLED(CONFIG_IPV6)
88477 static inline int rt_genid_ipv6(struct net *net)
88478 {
88479- return atomic_read(&net->ipv6.rt_genid);
88480+ return atomic_read_unchecked(&net->ipv6.rt_genid);
88481 }
88482
88483 static inline void rt_genid_bump_ipv6(struct net *net)
88484 {
88485- atomic_inc(&net->ipv6.rt_genid);
88486+ atomic_inc_unchecked(&net->ipv6.rt_genid);
88487 }
88488 #else
88489 static inline int rt_genid_ipv6(struct net *net)
88490@@ -390,12 +394,12 @@ static inline void rt_genid_bump_all(struct net *net)
88491
88492 static inline int fnhe_genid(struct net *net)
88493 {
88494- return atomic_read(&net->fnhe_genid);
88495+ return atomic_read_unchecked(&net->fnhe_genid);
88496 }
88497
88498 static inline void fnhe_genid_bump(struct net *net)
88499 {
88500- atomic_inc(&net->fnhe_genid);
88501+ atomic_inc_unchecked(&net->fnhe_genid);
88502 }
88503
88504 #endif /* __NET_NET_NAMESPACE_H */
88505diff --git a/include/net/netdma.h b/include/net/netdma.h
88506index 8ba8ce2..99b7fff 100644
88507--- a/include/net/netdma.h
88508+++ b/include/net/netdma.h
88509@@ -24,7 +24,7 @@
88510 #include <linux/dmaengine.h>
88511 #include <linux/skbuff.h>
88512
88513-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
88514+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
88515 struct sk_buff *skb, int offset, struct iovec *to,
88516 size_t len, struct dma_pinned_list *pinned_list);
88517
88518diff --git a/include/net/netlink.h b/include/net/netlink.h
88519index 2b47eaa..6d5bcc2 100644
88520--- a/include/net/netlink.h
88521+++ b/include/net/netlink.h
88522@@ -521,7 +521,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
88523 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
88524 {
88525 if (mark)
88526- skb_trim(skb, (unsigned char *) mark - skb->data);
88527+ skb_trim(skb, (const unsigned char *) mark - skb->data);
88528 }
88529
88530 /**
88531diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
88532index 773cce3..6a11852 100644
88533--- a/include/net/netns/conntrack.h
88534+++ b/include/net/netns/conntrack.h
88535@@ -13,10 +13,10 @@ struct nf_conntrack_ecache;
88536 struct nf_proto_net {
88537 #ifdef CONFIG_SYSCTL
88538 struct ctl_table_header *ctl_table_header;
88539- struct ctl_table *ctl_table;
88540+ ctl_table_no_const *ctl_table;
88541 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
88542 struct ctl_table_header *ctl_compat_header;
88543- struct ctl_table *ctl_compat_table;
88544+ ctl_table_no_const *ctl_compat_table;
88545 #endif
88546 #endif
88547 unsigned int users;
88548@@ -59,7 +59,7 @@ struct nf_ip_net {
88549 struct nf_icmp_net icmpv6;
88550 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
88551 struct ctl_table_header *ctl_table_header;
88552- struct ctl_table *ctl_table;
88553+ ctl_table_no_const *ctl_table;
88554 #endif
88555 };
88556
88557diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
88558index aec5e12..807233f 100644
88559--- a/include/net/netns/ipv4.h
88560+++ b/include/net/netns/ipv4.h
88561@@ -82,7 +82,7 @@ struct netns_ipv4 {
88562
88563 struct ping_group_range ping_group_range;
88564
88565- atomic_t dev_addr_genid;
88566+ atomic_unchecked_t dev_addr_genid;
88567
88568 #ifdef CONFIG_SYSCTL
88569 unsigned long *sysctl_local_reserved_ports;
88570@@ -96,6 +96,6 @@ struct netns_ipv4 {
88571 struct fib_rules_ops *mr_rules_ops;
88572 #endif
88573 #endif
88574- atomic_t rt_genid;
88575+ atomic_unchecked_t rt_genid;
88576 };
88577 #endif
88578diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
88579index 19d3446..3c87195 100644
88580--- a/include/net/netns/ipv6.h
88581+++ b/include/net/netns/ipv6.h
88582@@ -74,8 +74,8 @@ struct netns_ipv6 {
88583 struct fib_rules_ops *mr6_rules_ops;
88584 #endif
88585 #endif
88586- atomic_t dev_addr_genid;
88587- atomic_t rt_genid;
88588+ atomic_unchecked_t dev_addr_genid;
88589+ atomic_unchecked_t rt_genid;
88590 };
88591
88592 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
88593diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
88594index 3492434..209f58c 100644
88595--- a/include/net/netns/xfrm.h
88596+++ b/include/net/netns/xfrm.h
88597@@ -64,7 +64,7 @@ struct netns_xfrm {
88598
88599 /* flow cache part */
88600 struct flow_cache flow_cache_global;
88601- atomic_t flow_cache_genid;
88602+ atomic_unchecked_t flow_cache_genid;
88603 struct list_head flow_cache_gc_list;
88604 spinlock_t flow_cache_gc_lock;
88605 struct work_struct flow_cache_gc_work;
88606diff --git a/include/net/ping.h b/include/net/ping.h
88607index 026479b..d9b2829 100644
88608--- a/include/net/ping.h
88609+++ b/include/net/ping.h
88610@@ -54,7 +54,7 @@ struct ping_iter_state {
88611
88612 extern struct proto ping_prot;
88613 #if IS_ENABLED(CONFIG_IPV6)
88614-extern struct pingv6_ops pingv6_ops;
88615+extern struct pingv6_ops *pingv6_ops;
88616 #endif
88617
88618 struct pingfakehdr {
88619diff --git a/include/net/protocol.h b/include/net/protocol.h
88620index d6fcc1f..ca277058 100644
88621--- a/include/net/protocol.h
88622+++ b/include/net/protocol.h
88623@@ -49,7 +49,7 @@ struct net_protocol {
88624 * socket lookup?
88625 */
88626 icmp_strict_tag_validation:1;
88627-};
88628+} __do_const;
88629
88630 #if IS_ENABLED(CONFIG_IPV6)
88631 struct inet6_protocol {
88632@@ -62,7 +62,7 @@ struct inet6_protocol {
88633 u8 type, u8 code, int offset,
88634 __be32 info);
88635 unsigned int flags; /* INET6_PROTO_xxx */
88636-};
88637+} __do_const;
88638
88639 #define INET6_PROTO_NOPOLICY 0x1
88640 #define INET6_PROTO_FINAL 0x2
88641diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
88642index 72240e5..8c14bef 100644
88643--- a/include/net/rtnetlink.h
88644+++ b/include/net/rtnetlink.h
88645@@ -93,7 +93,7 @@ struct rtnl_link_ops {
88646 int (*fill_slave_info)(struct sk_buff *skb,
88647 const struct net_device *dev,
88648 const struct net_device *slave_dev);
88649-};
88650+} __do_const;
88651
88652 int __rtnl_link_register(struct rtnl_link_ops *ops);
88653 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
88654diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
88655index 4a5b9a3..ca27d73 100644
88656--- a/include/net/sctp/checksum.h
88657+++ b/include/net/sctp/checksum.h
88658@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
88659 unsigned int offset)
88660 {
88661 struct sctphdr *sh = sctp_hdr(skb);
88662- __le32 ret, old = sh->checksum;
88663- const struct skb_checksum_ops ops = {
88664+ __le32 ret, old = sh->checksum;
88665+ static const struct skb_checksum_ops ops = {
88666 .update = sctp_csum_update,
88667 .combine = sctp_csum_combine,
88668 };
88669diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
88670index 7f4eeb3..37e8fe1 100644
88671--- a/include/net/sctp/sm.h
88672+++ b/include/net/sctp/sm.h
88673@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
88674 typedef struct {
88675 sctp_state_fn_t *fn;
88676 const char *name;
88677-} sctp_sm_table_entry_t;
88678+} __do_const sctp_sm_table_entry_t;
88679
88680 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
88681 * currently in use.
88682@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
88683 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
88684
88685 /* Extern declarations for major data structures. */
88686-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
88687+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
88688
88689
88690 /* Get the size of a DATA chunk payload. */
88691diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
88692index f38588bf..94c1795 100644
88693--- a/include/net/sctp/structs.h
88694+++ b/include/net/sctp/structs.h
88695@@ -507,7 +507,7 @@ struct sctp_pf {
88696 struct sctp_association *asoc);
88697 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
88698 struct sctp_af *af;
88699-};
88700+} __do_const;
88701
88702
88703 /* Structure to track chunk fragments that have been acked, but peer
88704diff --git a/include/net/sock.h b/include/net/sock.h
88705index 1563507..20d5d0e 100644
88706--- a/include/net/sock.h
88707+++ b/include/net/sock.h
88708@@ -349,7 +349,7 @@ struct sock {
88709 unsigned int sk_napi_id;
88710 unsigned int sk_ll_usec;
88711 #endif
88712- atomic_t sk_drops;
88713+ atomic_unchecked_t sk_drops;
88714 int sk_rcvbuf;
88715
88716 struct sk_filter __rcu *sk_filter;
88717@@ -1038,7 +1038,7 @@ struct proto {
88718 void (*destroy_cgroup)(struct mem_cgroup *memcg);
88719 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
88720 #endif
88721-};
88722+} __randomize_layout;
88723
88724 /*
88725 * Bits in struct cg_proto.flags
88726@@ -1225,7 +1225,7 @@ static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
88727 return ret >> PAGE_SHIFT;
88728 }
88729
88730-static inline long
88731+static inline long __intentional_overflow(-1)
88732 sk_memory_allocated(const struct sock *sk)
88733 {
88734 struct proto *prot = sk->sk_prot;
88735@@ -1370,7 +1370,7 @@ struct sock_iocb {
88736 struct scm_cookie *scm;
88737 struct msghdr *msg, async_msg;
88738 struct kiocb *kiocb;
88739-};
88740+} __randomize_layout;
88741
88742 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
88743 {
88744@@ -1623,6 +1623,33 @@ void sk_common_release(struct sock *sk);
88745 /* Initialise core socket variables */
88746 void sock_init_data(struct socket *sock, struct sock *sk);
88747
88748+void sk_filter_release_rcu(struct rcu_head *rcu);
88749+
88750+/**
88751+ * sk_filter_release - release a socket filter
88752+ * @fp: filter to remove
88753+ *
88754+ * Remove a filter from a socket and release its resources.
88755+ */
88756+
88757+static inline void sk_filter_release(struct sk_filter *fp)
88758+{
88759+ if (atomic_dec_and_test(&fp->refcnt))
88760+ call_rcu(&fp->rcu, sk_filter_release_rcu);
88761+}
88762+
88763+static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
88764+{
88765+ atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc);
88766+ sk_filter_release(fp);
88767+}
88768+
88769+static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
88770+{
88771+ atomic_inc(&fp->refcnt);
88772+ atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
88773+}
88774+
88775 /*
88776 * Socket reference counting postulates.
88777 *
88778@@ -1805,7 +1832,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
88779 }
88780
88781 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
88782- char __user *from, char *to,
88783+ char __user *from, unsigned char *to,
88784 int copy, int offset)
88785 {
88786 if (skb->ip_summed == CHECKSUM_NONE) {
88787@@ -2067,7 +2094,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
88788 }
88789 }
88790
88791-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
88792+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
88793
88794 /**
88795 * sk_page_frag - return an appropriate page_frag
88796diff --git a/include/net/tcp.h b/include/net/tcp.h
88797index 7286db8..f1aa7dc 100644
88798--- a/include/net/tcp.h
88799+++ b/include/net/tcp.h
88800@@ -535,7 +535,7 @@ void tcp_retransmit_timer(struct sock *sk);
88801 void tcp_xmit_retransmit_queue(struct sock *);
88802 void tcp_simple_retransmit(struct sock *);
88803 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
88804-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
88805+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
88806
88807 void tcp_send_probe0(struct sock *);
88808 void tcp_send_partial(struct sock *);
88809@@ -708,8 +708,8 @@ struct tcp_skb_cb {
88810 struct inet6_skb_parm h6;
88811 #endif
88812 } header; /* For incoming frames */
88813- __u32 seq; /* Starting sequence number */
88814- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
88815+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
88816+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
88817 __u32 when; /* used to compute rtt's */
88818 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
88819
88820@@ -723,7 +723,7 @@ struct tcp_skb_cb {
88821
88822 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
88823 /* 1 byte hole */
88824- __u32 ack_seq; /* Sequence number ACK'd */
88825+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
88826 };
88827
88828 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
88829diff --git a/include/net/xfrm.h b/include/net/xfrm.h
88830index 721e9c3b..3c81bbf 100644
88831--- a/include/net/xfrm.h
88832+++ b/include/net/xfrm.h
88833@@ -285,7 +285,6 @@ struct xfrm_dst;
88834 struct xfrm_policy_afinfo {
88835 unsigned short family;
88836 struct dst_ops *dst_ops;
88837- void (*garbage_collect)(struct net *net);
88838 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
88839 const xfrm_address_t *saddr,
88840 const xfrm_address_t *daddr);
88841@@ -303,7 +302,7 @@ struct xfrm_policy_afinfo {
88842 struct net_device *dev,
88843 const struct flowi *fl);
88844 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
88845-};
88846+} __do_const;
88847
88848 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
88849 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
88850@@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
88851 int (*transport_finish)(struct sk_buff *skb,
88852 int async);
88853 void (*local_error)(struct sk_buff *skb, u32 mtu);
88854-};
88855+} __do_const;
88856
88857 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
88858 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
88859@@ -437,7 +436,7 @@ struct xfrm_mode {
88860 struct module *owner;
88861 unsigned int encap;
88862 int flags;
88863-};
88864+} __do_const;
88865
88866 /* Flags for xfrm_mode. */
88867 enum {
88868@@ -534,7 +533,7 @@ struct xfrm_policy {
88869 struct timer_list timer;
88870
88871 struct flow_cache_object flo;
88872- atomic_t genid;
88873+ atomic_unchecked_t genid;
88874 u32 priority;
88875 u32 index;
88876 struct xfrm_mark mark;
88877@@ -1167,6 +1166,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
88878 }
88879
88880 void xfrm_garbage_collect(struct net *net);
88881+void xfrm_garbage_collect_deferred(struct net *net);
88882
88883 #else
88884
88885@@ -1205,6 +1205,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
88886 static inline void xfrm_garbage_collect(struct net *net)
88887 {
88888 }
88889+static inline void xfrm_garbage_collect_deferred(struct net *net)
88890+{
88891+}
88892 #endif
88893
88894 static __inline__
88895diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
88896index 1017e0b..227aa4d 100644
88897--- a/include/rdma/iw_cm.h
88898+++ b/include/rdma/iw_cm.h
88899@@ -122,7 +122,7 @@ struct iw_cm_verbs {
88900 int backlog);
88901
88902 int (*destroy_listen)(struct iw_cm_id *cm_id);
88903-};
88904+} __no_const;
88905
88906 /**
88907 * iw_create_cm_id - Create an IW CM identifier.
88908diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
88909index 52beadf..598734c 100644
88910--- a/include/scsi/libfc.h
88911+++ b/include/scsi/libfc.h
88912@@ -771,6 +771,7 @@ struct libfc_function_template {
88913 */
88914 void (*disc_stop_final) (struct fc_lport *);
88915 };
88916+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
88917
88918 /**
88919 * struct fc_disc - Discovery context
88920@@ -875,7 +876,7 @@ struct fc_lport {
88921 struct fc_vport *vport;
88922
88923 /* Operational Information */
88924- struct libfc_function_template tt;
88925+ libfc_function_template_no_const tt;
88926 u8 link_up;
88927 u8 qfull;
88928 enum fc_lport_state state;
88929diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
88930index 27ab310..60dc245 100644
88931--- a/include/scsi/scsi_device.h
88932+++ b/include/scsi/scsi_device.h
88933@@ -187,9 +187,9 @@ struct scsi_device {
88934 unsigned int max_device_blocked; /* what device_blocked counts down from */
88935 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
88936
88937- atomic_t iorequest_cnt;
88938- atomic_t iodone_cnt;
88939- atomic_t ioerr_cnt;
88940+ atomic_unchecked_t iorequest_cnt;
88941+ atomic_unchecked_t iodone_cnt;
88942+ atomic_unchecked_t ioerr_cnt;
88943
88944 struct device sdev_gendev,
88945 sdev_dev;
88946diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
88947index 8c79980..723f6f9 100644
88948--- a/include/scsi/scsi_transport_fc.h
88949+++ b/include/scsi/scsi_transport_fc.h
88950@@ -752,7 +752,8 @@ struct fc_function_template {
88951 unsigned long show_host_system_hostname:1;
88952
88953 unsigned long disable_target_scan:1;
88954-};
88955+} __do_const;
88956+typedef struct fc_function_template __no_const fc_function_template_no_const;
88957
88958
88959 /**
88960diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
88961index ae6c3b8..fd748ac 100644
88962--- a/include/sound/compress_driver.h
88963+++ b/include/sound/compress_driver.h
88964@@ -128,7 +128,7 @@ struct snd_compr_ops {
88965 struct snd_compr_caps *caps);
88966 int (*get_codec_caps) (struct snd_compr_stream *stream,
88967 struct snd_compr_codec_caps *codec);
88968-};
88969+} __no_const;
88970
88971 /**
88972 * struct snd_compr: Compressed device
88973diff --git a/include/sound/soc.h b/include/sound/soc.h
88974index ed9e2d7..aad0887 100644
88975--- a/include/sound/soc.h
88976+++ b/include/sound/soc.h
88977@@ -798,7 +798,7 @@ struct snd_soc_codec_driver {
88978 /* probe ordering - for components with runtime dependencies */
88979 int probe_order;
88980 int remove_order;
88981-};
88982+} __do_const;
88983
88984 /* SoC platform interface */
88985 struct snd_soc_platform_driver {
88986@@ -845,7 +845,7 @@ struct snd_soc_platform_driver {
88987 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
88988 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
88989 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
88990-};
88991+} __do_const;
88992
88993 struct snd_soc_platform {
88994 const char *name;
88995diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
88996index 9ec9864..e2ee1ee 100644
88997--- a/include/target/target_core_base.h
88998+++ b/include/target/target_core_base.h
88999@@ -761,7 +761,7 @@ struct se_device {
89000 atomic_long_t write_bytes;
89001 /* Active commands on this virtual SE device */
89002 atomic_t simple_cmds;
89003- atomic_t dev_ordered_id;
89004+ atomic_unchecked_t dev_ordered_id;
89005 atomic_t dev_ordered_sync;
89006 atomic_t dev_qf_count;
89007 int export_count;
89008diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
89009new file mode 100644
89010index 0000000..fb634b7
89011--- /dev/null
89012+++ b/include/trace/events/fs.h
89013@@ -0,0 +1,53 @@
89014+#undef TRACE_SYSTEM
89015+#define TRACE_SYSTEM fs
89016+
89017+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
89018+#define _TRACE_FS_H
89019+
89020+#include <linux/fs.h>
89021+#include <linux/tracepoint.h>
89022+
89023+TRACE_EVENT(do_sys_open,
89024+
89025+ TP_PROTO(const char *filename, int flags, int mode),
89026+
89027+ TP_ARGS(filename, flags, mode),
89028+
89029+ TP_STRUCT__entry(
89030+ __string( filename, filename )
89031+ __field( int, flags )
89032+ __field( int, mode )
89033+ ),
89034+
89035+ TP_fast_assign(
89036+ __assign_str(filename, filename);
89037+ __entry->flags = flags;
89038+ __entry->mode = mode;
89039+ ),
89040+
89041+ TP_printk("\"%s\" %x %o",
89042+ __get_str(filename), __entry->flags, __entry->mode)
89043+);
89044+
89045+TRACE_EVENT(open_exec,
89046+
89047+ TP_PROTO(const char *filename),
89048+
89049+ TP_ARGS(filename),
89050+
89051+ TP_STRUCT__entry(
89052+ __string( filename, filename )
89053+ ),
89054+
89055+ TP_fast_assign(
89056+ __assign_str(filename, filename);
89057+ ),
89058+
89059+ TP_printk("\"%s\"",
89060+ __get_str(filename))
89061+);
89062+
89063+#endif /* _TRACE_FS_H */
89064+
89065+/* This part must be outside protection */
89066+#include <trace/define_trace.h>
89067diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
89068index 1c09820..7f5ec79 100644
89069--- a/include/trace/events/irq.h
89070+++ b/include/trace/events/irq.h
89071@@ -36,7 +36,7 @@ struct softirq_action;
89072 */
89073 TRACE_EVENT(irq_handler_entry,
89074
89075- TP_PROTO(int irq, struct irqaction *action),
89076+ TP_PROTO(int irq, const struct irqaction *action),
89077
89078 TP_ARGS(irq, action),
89079
89080@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
89081 */
89082 TRACE_EVENT(irq_handler_exit,
89083
89084- TP_PROTO(int irq, struct irqaction *action, int ret),
89085+ TP_PROTO(int irq, const struct irqaction *action, int ret),
89086
89087 TP_ARGS(irq, action, ret),
89088
89089diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
89090index 7caf44c..23c6f27 100644
89091--- a/include/uapi/linux/a.out.h
89092+++ b/include/uapi/linux/a.out.h
89093@@ -39,6 +39,14 @@ enum machine_type {
89094 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
89095 };
89096
89097+/* Constants for the N_FLAGS field */
89098+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
89099+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
89100+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
89101+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
89102+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
89103+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
89104+
89105 #if !defined (N_MAGIC)
89106 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
89107 #endif
89108diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
89109index 22b6ad3..aeba37e 100644
89110--- a/include/uapi/linux/bcache.h
89111+++ b/include/uapi/linux/bcache.h
89112@@ -5,6 +5,7 @@
89113 * Bcache on disk data structures
89114 */
89115
89116+#include <linux/compiler.h>
89117 #include <asm/types.h>
89118
89119 #define BITMASK(name, type, field, offset, size) \
89120@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v) \
89121 /* Btree keys - all units are in sectors */
89122
89123 struct bkey {
89124- __u64 high;
89125- __u64 low;
89126+ __u64 high __intentional_overflow(-1);
89127+ __u64 low __intentional_overflow(-1);
89128 __u64 ptr[];
89129 };
89130
89131diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
89132index d876736..ccce5c0 100644
89133--- a/include/uapi/linux/byteorder/little_endian.h
89134+++ b/include/uapi/linux/byteorder/little_endian.h
89135@@ -42,51 +42,51 @@
89136
89137 static inline __le64 __cpu_to_le64p(const __u64 *p)
89138 {
89139- return (__force __le64)*p;
89140+ return (__force const __le64)*p;
89141 }
89142-static inline __u64 __le64_to_cpup(const __le64 *p)
89143+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
89144 {
89145- return (__force __u64)*p;
89146+ return (__force const __u64)*p;
89147 }
89148 static inline __le32 __cpu_to_le32p(const __u32 *p)
89149 {
89150- return (__force __le32)*p;
89151+ return (__force const __le32)*p;
89152 }
89153 static inline __u32 __le32_to_cpup(const __le32 *p)
89154 {
89155- return (__force __u32)*p;
89156+ return (__force const __u32)*p;
89157 }
89158 static inline __le16 __cpu_to_le16p(const __u16 *p)
89159 {
89160- return (__force __le16)*p;
89161+ return (__force const __le16)*p;
89162 }
89163 static inline __u16 __le16_to_cpup(const __le16 *p)
89164 {
89165- return (__force __u16)*p;
89166+ return (__force const __u16)*p;
89167 }
89168 static inline __be64 __cpu_to_be64p(const __u64 *p)
89169 {
89170- return (__force __be64)__swab64p(p);
89171+ return (__force const __be64)__swab64p(p);
89172 }
89173 static inline __u64 __be64_to_cpup(const __be64 *p)
89174 {
89175- return __swab64p((__u64 *)p);
89176+ return __swab64p((const __u64 *)p);
89177 }
89178 static inline __be32 __cpu_to_be32p(const __u32 *p)
89179 {
89180- return (__force __be32)__swab32p(p);
89181+ return (__force const __be32)__swab32p(p);
89182 }
89183-static inline __u32 __be32_to_cpup(const __be32 *p)
89184+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
89185 {
89186- return __swab32p((__u32 *)p);
89187+ return __swab32p((const __u32 *)p);
89188 }
89189 static inline __be16 __cpu_to_be16p(const __u16 *p)
89190 {
89191- return (__force __be16)__swab16p(p);
89192+ return (__force const __be16)__swab16p(p);
89193 }
89194 static inline __u16 __be16_to_cpup(const __be16 *p)
89195 {
89196- return __swab16p((__u16 *)p);
89197+ return __swab16p((const __u16 *)p);
89198 }
89199 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
89200 #define __le64_to_cpus(x) do { (void)(x); } while (0)
89201diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
89202index ef6103b..d4e65dd 100644
89203--- a/include/uapi/linux/elf.h
89204+++ b/include/uapi/linux/elf.h
89205@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
89206 #define PT_GNU_EH_FRAME 0x6474e550
89207
89208 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
89209+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
89210+
89211+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
89212+
89213+/* Constants for the e_flags field */
89214+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
89215+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
89216+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
89217+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
89218+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
89219+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
89220
89221 /*
89222 * Extended Numbering
89223@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
89224 #define DT_DEBUG 21
89225 #define DT_TEXTREL 22
89226 #define DT_JMPREL 23
89227+#define DT_FLAGS 30
89228+ #define DF_TEXTREL 0x00000004
89229 #define DT_ENCODING 32
89230 #define OLD_DT_LOOS 0x60000000
89231 #define DT_LOOS 0x6000000d
89232@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
89233 #define PF_W 0x2
89234 #define PF_X 0x1
89235
89236+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
89237+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
89238+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
89239+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
89240+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
89241+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
89242+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
89243+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
89244+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
89245+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
89246+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
89247+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
89248+
89249 typedef struct elf32_phdr{
89250 Elf32_Word p_type;
89251 Elf32_Off p_offset;
89252@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
89253 #define EI_OSABI 7
89254 #define EI_PAD 8
89255
89256+#define EI_PAX 14
89257+
89258 #define ELFMAG0 0x7f /* EI_MAG */
89259 #define ELFMAG1 'E'
89260 #define ELFMAG2 'L'
89261diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
89262index aa169c4..6a2771d 100644
89263--- a/include/uapi/linux/personality.h
89264+++ b/include/uapi/linux/personality.h
89265@@ -30,6 +30,7 @@ enum {
89266 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
89267 ADDR_NO_RANDOMIZE | \
89268 ADDR_COMPAT_LAYOUT | \
89269+ ADDR_LIMIT_3GB | \
89270 MMAP_PAGE_ZERO)
89271
89272 /*
89273diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
89274index 7530e74..e714828 100644
89275--- a/include/uapi/linux/screen_info.h
89276+++ b/include/uapi/linux/screen_info.h
89277@@ -43,7 +43,8 @@ struct screen_info {
89278 __u16 pages; /* 0x32 */
89279 __u16 vesa_attributes; /* 0x34 */
89280 __u32 capabilities; /* 0x36 */
89281- __u8 _reserved[6]; /* 0x3a */
89282+ __u16 vesapm_size; /* 0x3a */
89283+ __u8 _reserved[4]; /* 0x3c */
89284 } __attribute__((packed));
89285
89286 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
89287diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
89288index 0e011eb..82681b1 100644
89289--- a/include/uapi/linux/swab.h
89290+++ b/include/uapi/linux/swab.h
89291@@ -43,7 +43,7 @@
89292 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
89293 */
89294
89295-static inline __attribute_const__ __u16 __fswab16(__u16 val)
89296+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
89297 {
89298 #ifdef __HAVE_BUILTIN_BSWAP16__
89299 return __builtin_bswap16(val);
89300@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
89301 #endif
89302 }
89303
89304-static inline __attribute_const__ __u32 __fswab32(__u32 val)
89305+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
89306 {
89307 #ifdef __HAVE_BUILTIN_BSWAP32__
89308 return __builtin_bswap32(val);
89309@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
89310 #endif
89311 }
89312
89313-static inline __attribute_const__ __u64 __fswab64(__u64 val)
89314+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
89315 {
89316 #ifdef __HAVE_BUILTIN_BSWAP64__
89317 return __builtin_bswap64(val);
89318diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
89319index 6d67213..552fdd9 100644
89320--- a/include/uapi/linux/sysctl.h
89321+++ b/include/uapi/linux/sysctl.h
89322@@ -155,8 +155,6 @@ enum
89323 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
89324 };
89325
89326-
89327-
89328 /* CTL_VM names: */
89329 enum
89330 {
89331diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
89332index 168ff50..a921df2 100644
89333--- a/include/uapi/linux/videodev2.h
89334+++ b/include/uapi/linux/videodev2.h
89335@@ -1253,7 +1253,7 @@ struct v4l2_ext_control {
89336 union {
89337 __s32 value;
89338 __s64 value64;
89339- char *string;
89340+ char __user *string;
89341 };
89342 } __attribute__ ((packed));
89343
89344diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
89345index c38355c..17a57bc 100644
89346--- a/include/uapi/linux/xattr.h
89347+++ b/include/uapi/linux/xattr.h
89348@@ -73,5 +73,9 @@
89349 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
89350 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
89351
89352+/* User namespace */
89353+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
89354+#define XATTR_PAX_FLAGS_SUFFIX "flags"
89355+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
89356
89357 #endif /* _UAPI_LINUX_XATTR_H */
89358diff --git a/include/video/udlfb.h b/include/video/udlfb.h
89359index f9466fa..f4e2b81 100644
89360--- a/include/video/udlfb.h
89361+++ b/include/video/udlfb.h
89362@@ -53,10 +53,10 @@ struct dlfb_data {
89363 u32 pseudo_palette[256];
89364 int blank_mode; /*one of FB_BLANK_ */
89365 /* blit-only rendering path metrics, exposed through sysfs */
89366- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
89367- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
89368- atomic_t bytes_sent; /* to usb, after compression including overhead */
89369- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
89370+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
89371+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
89372+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
89373+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
89374 };
89375
89376 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
89377diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
89378index 30f5362..8ed8ac9 100644
89379--- a/include/video/uvesafb.h
89380+++ b/include/video/uvesafb.h
89381@@ -122,6 +122,7 @@ struct uvesafb_par {
89382 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
89383 u8 pmi_setpal; /* PMI for palette changes */
89384 u16 *pmi_base; /* protected mode interface location */
89385+ u8 *pmi_code; /* protected mode code location */
89386 void *pmi_start;
89387 void *pmi_pal;
89388 u8 *vbe_state_orig; /*
89389diff --git a/init/Kconfig b/init/Kconfig
89390index 9d76b99..d378b1e 100644
89391--- a/init/Kconfig
89392+++ b/init/Kconfig
89393@@ -1105,6 +1105,7 @@ endif # CGROUPS
89394
89395 config CHECKPOINT_RESTORE
89396 bool "Checkpoint/restore support" if EXPERT
89397+ depends on !GRKERNSEC
89398 default n
89399 help
89400 Enables additional kernel features in a sake of checkpoint/restore.
89401@@ -1589,7 +1590,7 @@ config SLUB_DEBUG
89402
89403 config COMPAT_BRK
89404 bool "Disable heap randomization"
89405- default y
89406+ default n
89407 help
89408 Randomizing heap placement makes heap exploits harder, but it
89409 also breaks ancient binaries (including anything libc5 based).
89410@@ -1877,7 +1878,7 @@ config INIT_ALL_POSSIBLE
89411 config STOP_MACHINE
89412 bool
89413 default y
89414- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
89415+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
89416 help
89417 Need stop_machine() primitive.
89418
89419diff --git a/init/Makefile b/init/Makefile
89420index 7bc47ee..6da2dc7 100644
89421--- a/init/Makefile
89422+++ b/init/Makefile
89423@@ -2,6 +2,9 @@
89424 # Makefile for the linux kernel.
89425 #
89426
89427+ccflags-y := $(GCC_PLUGINS_CFLAGS)
89428+asflags-y := $(GCC_PLUGINS_AFLAGS)
89429+
89430 obj-y := main.o version.o mounts.o
89431 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
89432 obj-y += noinitramfs.o
89433diff --git a/init/do_mounts.c b/init/do_mounts.c
89434index 82f2288..ea1430a 100644
89435--- a/init/do_mounts.c
89436+++ b/init/do_mounts.c
89437@@ -359,11 +359,11 @@ static void __init get_fs_names(char *page)
89438 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
89439 {
89440 struct super_block *s;
89441- int err = sys_mount(name, "/root", fs, flags, data);
89442+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
89443 if (err)
89444 return err;
89445
89446- sys_chdir("/root");
89447+ sys_chdir((const char __force_user *)"/root");
89448 s = current->fs->pwd.dentry->d_sb;
89449 ROOT_DEV = s->s_dev;
89450 printk(KERN_INFO
89451@@ -484,18 +484,18 @@ void __init change_floppy(char *fmt, ...)
89452 va_start(args, fmt);
89453 vsprintf(buf, fmt, args);
89454 va_end(args);
89455- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
89456+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
89457 if (fd >= 0) {
89458 sys_ioctl(fd, FDEJECT, 0);
89459 sys_close(fd);
89460 }
89461 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
89462- fd = sys_open("/dev/console", O_RDWR, 0);
89463+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
89464 if (fd >= 0) {
89465 sys_ioctl(fd, TCGETS, (long)&termios);
89466 termios.c_lflag &= ~ICANON;
89467 sys_ioctl(fd, TCSETSF, (long)&termios);
89468- sys_read(fd, &c, 1);
89469+ sys_read(fd, (char __user *)&c, 1);
89470 termios.c_lflag |= ICANON;
89471 sys_ioctl(fd, TCSETSF, (long)&termios);
89472 sys_close(fd);
89473@@ -589,8 +589,8 @@ void __init prepare_namespace(void)
89474 mount_root();
89475 out:
89476 devtmpfs_mount("dev");
89477- sys_mount(".", "/", NULL, MS_MOVE, NULL);
89478- sys_chroot(".");
89479+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
89480+ sys_chroot((const char __force_user *)".");
89481 }
89482
89483 static bool is_tmpfs;
89484diff --git a/init/do_mounts.h b/init/do_mounts.h
89485index f5b978a..69dbfe8 100644
89486--- a/init/do_mounts.h
89487+++ b/init/do_mounts.h
89488@@ -15,15 +15,15 @@ extern int root_mountflags;
89489
89490 static inline int create_dev(char *name, dev_t dev)
89491 {
89492- sys_unlink(name);
89493- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
89494+ sys_unlink((char __force_user *)name);
89495+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
89496 }
89497
89498 #if BITS_PER_LONG == 32
89499 static inline u32 bstat(char *name)
89500 {
89501 struct stat64 stat;
89502- if (sys_stat64(name, &stat) != 0)
89503+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
89504 return 0;
89505 if (!S_ISBLK(stat.st_mode))
89506 return 0;
89507@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
89508 static inline u32 bstat(char *name)
89509 {
89510 struct stat stat;
89511- if (sys_newstat(name, &stat) != 0)
89512+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
89513 return 0;
89514 if (!S_ISBLK(stat.st_mode))
89515 return 0;
89516diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
89517index 3e0878e..8a9d7a0 100644
89518--- a/init/do_mounts_initrd.c
89519+++ b/init/do_mounts_initrd.c
89520@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
89521 {
89522 sys_unshare(CLONE_FS | CLONE_FILES);
89523 /* stdin/stdout/stderr for /linuxrc */
89524- sys_open("/dev/console", O_RDWR, 0);
89525+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
89526 sys_dup(0);
89527 sys_dup(0);
89528 /* move initrd over / and chdir/chroot in initrd root */
89529- sys_chdir("/root");
89530- sys_mount(".", "/", NULL, MS_MOVE, NULL);
89531- sys_chroot(".");
89532+ sys_chdir((const char __force_user *)"/root");
89533+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
89534+ sys_chroot((const char __force_user *)".");
89535 sys_setsid();
89536 return 0;
89537 }
89538@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
89539 create_dev("/dev/root.old", Root_RAM0);
89540 /* mount initrd on rootfs' /root */
89541 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
89542- sys_mkdir("/old", 0700);
89543- sys_chdir("/old");
89544+ sys_mkdir((const char __force_user *)"/old", 0700);
89545+ sys_chdir((const char __force_user *)"/old");
89546
89547 /* try loading default modules from initrd */
89548 load_default_modules();
89549@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
89550 current->flags &= ~PF_FREEZER_SKIP;
89551
89552 /* move initrd to rootfs' /old */
89553- sys_mount("..", ".", NULL, MS_MOVE, NULL);
89554+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
89555 /* switch root and cwd back to / of rootfs */
89556- sys_chroot("..");
89557+ sys_chroot((const char __force_user *)"..");
89558
89559 if (new_decode_dev(real_root_dev) == Root_RAM0) {
89560- sys_chdir("/old");
89561+ sys_chdir((const char __force_user *)"/old");
89562 return;
89563 }
89564
89565- sys_chdir("/");
89566+ sys_chdir((const char __force_user *)"/");
89567 ROOT_DEV = new_decode_dev(real_root_dev);
89568 mount_root();
89569
89570 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
89571- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
89572+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
89573 if (!error)
89574 printk("okay\n");
89575 else {
89576- int fd = sys_open("/dev/root.old", O_RDWR, 0);
89577+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
89578 if (error == -ENOENT)
89579 printk("/initrd does not exist. Ignored.\n");
89580 else
89581 printk("failed\n");
89582 printk(KERN_NOTICE "Unmounting old root\n");
89583- sys_umount("/old", MNT_DETACH);
89584+ sys_umount((char __force_user *)"/old", MNT_DETACH);
89585 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
89586 if (fd < 0) {
89587 error = fd;
89588@@ -127,11 +127,11 @@ int __init initrd_load(void)
89589 * mounted in the normal path.
89590 */
89591 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
89592- sys_unlink("/initrd.image");
89593+ sys_unlink((const char __force_user *)"/initrd.image");
89594 handle_initrd();
89595 return 1;
89596 }
89597 }
89598- sys_unlink("/initrd.image");
89599+ sys_unlink((const char __force_user *)"/initrd.image");
89600 return 0;
89601 }
89602diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
89603index 8cb6db5..d729f50 100644
89604--- a/init/do_mounts_md.c
89605+++ b/init/do_mounts_md.c
89606@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
89607 partitioned ? "_d" : "", minor,
89608 md_setup_args[ent].device_names);
89609
89610- fd = sys_open(name, 0, 0);
89611+ fd = sys_open((char __force_user *)name, 0, 0);
89612 if (fd < 0) {
89613 printk(KERN_ERR "md: open failed - cannot start "
89614 "array %s\n", name);
89615@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
89616 * array without it
89617 */
89618 sys_close(fd);
89619- fd = sys_open(name, 0, 0);
89620+ fd = sys_open((char __force_user *)name, 0, 0);
89621 sys_ioctl(fd, BLKRRPART, 0);
89622 }
89623 sys_close(fd);
89624@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
89625
89626 wait_for_device_probe();
89627
89628- fd = sys_open("/dev/md0", 0, 0);
89629+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
89630 if (fd >= 0) {
89631 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
89632 sys_close(fd);
89633diff --git a/init/init_task.c b/init/init_task.c
89634index ba0a7f36..2bcf1d5 100644
89635--- a/init/init_task.c
89636+++ b/init/init_task.c
89637@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
89638 * Initial thread structure. Alignment of this is handled by a special
89639 * linker map entry.
89640 */
89641+#ifdef CONFIG_X86
89642+union thread_union init_thread_union __init_task_data;
89643+#else
89644 union thread_union init_thread_union __init_task_data =
89645 { INIT_THREAD_INFO(init_task) };
89646+#endif
89647diff --git a/init/initramfs.c b/init/initramfs.c
89648index a8497fa..35b3c90 100644
89649--- a/init/initramfs.c
89650+++ b/init/initramfs.c
89651@@ -84,7 +84,7 @@ static void __init free_hash(void)
89652 }
89653 }
89654
89655-static long __init do_utime(char *filename, time_t mtime)
89656+static long __init do_utime(char __force_user *filename, time_t mtime)
89657 {
89658 struct timespec t[2];
89659
89660@@ -119,7 +119,7 @@ static void __init dir_utime(void)
89661 struct dir_entry *de, *tmp;
89662 list_for_each_entry_safe(de, tmp, &dir_list, list) {
89663 list_del(&de->list);
89664- do_utime(de->name, de->mtime);
89665+ do_utime((char __force_user *)de->name, de->mtime);
89666 kfree(de->name);
89667 kfree(de);
89668 }
89669@@ -281,7 +281,7 @@ static int __init maybe_link(void)
89670 if (nlink >= 2) {
89671 char *old = find_link(major, minor, ino, mode, collected);
89672 if (old)
89673- return (sys_link(old, collected) < 0) ? -1 : 1;
89674+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
89675 }
89676 return 0;
89677 }
89678@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
89679 {
89680 struct stat st;
89681
89682- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
89683+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
89684 if (S_ISDIR(st.st_mode))
89685- sys_rmdir(path);
89686+ sys_rmdir((char __force_user *)path);
89687 else
89688- sys_unlink(path);
89689+ sys_unlink((char __force_user *)path);
89690 }
89691 }
89692
89693@@ -315,7 +315,7 @@ static int __init do_name(void)
89694 int openflags = O_WRONLY|O_CREAT;
89695 if (ml != 1)
89696 openflags |= O_TRUNC;
89697- wfd = sys_open(collected, openflags, mode);
89698+ wfd = sys_open((char __force_user *)collected, openflags, mode);
89699
89700 if (wfd >= 0) {
89701 sys_fchown(wfd, uid, gid);
89702@@ -327,17 +327,17 @@ static int __init do_name(void)
89703 }
89704 }
89705 } else if (S_ISDIR(mode)) {
89706- sys_mkdir(collected, mode);
89707- sys_chown(collected, uid, gid);
89708- sys_chmod(collected, mode);
89709+ sys_mkdir((char __force_user *)collected, mode);
89710+ sys_chown((char __force_user *)collected, uid, gid);
89711+ sys_chmod((char __force_user *)collected, mode);
89712 dir_add(collected, mtime);
89713 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
89714 S_ISFIFO(mode) || S_ISSOCK(mode)) {
89715 if (maybe_link() == 0) {
89716- sys_mknod(collected, mode, rdev);
89717- sys_chown(collected, uid, gid);
89718- sys_chmod(collected, mode);
89719- do_utime(collected, mtime);
89720+ sys_mknod((char __force_user *)collected, mode, rdev);
89721+ sys_chown((char __force_user *)collected, uid, gid);
89722+ sys_chmod((char __force_user *)collected, mode);
89723+ do_utime((char __force_user *)collected, mtime);
89724 }
89725 }
89726 return 0;
89727@@ -346,15 +346,15 @@ static int __init do_name(void)
89728 static int __init do_copy(void)
89729 {
89730 if (count >= body_len) {
89731- sys_write(wfd, victim, body_len);
89732+ sys_write(wfd, (char __force_user *)victim, body_len);
89733 sys_close(wfd);
89734- do_utime(vcollected, mtime);
89735+ do_utime((char __force_user *)vcollected, mtime);
89736 kfree(vcollected);
89737 eat(body_len);
89738 state = SkipIt;
89739 return 0;
89740 } else {
89741- sys_write(wfd, victim, count);
89742+ sys_write(wfd, (char __force_user *)victim, count);
89743 body_len -= count;
89744 eat(count);
89745 return 1;
89746@@ -365,9 +365,9 @@ static int __init do_symlink(void)
89747 {
89748 collected[N_ALIGN(name_len) + body_len] = '\0';
89749 clean_path(collected, 0);
89750- sys_symlink(collected + N_ALIGN(name_len), collected);
89751- sys_lchown(collected, uid, gid);
89752- do_utime(collected, mtime);
89753+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
89754+ sys_lchown((char __force_user *)collected, uid, gid);
89755+ do_utime((char __force_user *)collected, mtime);
89756 state = SkipIt;
89757 next_state = Reset;
89758 return 0;
89759diff --git a/init/main.c b/init/main.c
89760index e8ae1fe..f60f98c 100644
89761--- a/init/main.c
89762+++ b/init/main.c
89763@@ -98,6 +98,8 @@ extern void radix_tree_init(void);
89764 static inline void mark_rodata_ro(void) { }
89765 #endif
89766
89767+extern void grsecurity_init(void);
89768+
89769 /*
89770 * Debug helper: via this flag we know that we are in 'early bootup code'
89771 * where only the boot processor is running with IRQ disabled. This means
89772@@ -159,6 +161,75 @@ static int __init set_reset_devices(char *str)
89773
89774 __setup("reset_devices", set_reset_devices);
89775
89776+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
89777+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
89778+static int __init setup_grsec_proc_gid(char *str)
89779+{
89780+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
89781+ return 1;
89782+}
89783+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
89784+#endif
89785+
89786+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
89787+unsigned long pax_user_shadow_base __read_only;
89788+EXPORT_SYMBOL(pax_user_shadow_base);
89789+extern char pax_enter_kernel_user[];
89790+extern char pax_exit_kernel_user[];
89791+#endif
89792+
89793+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
89794+static int __init setup_pax_nouderef(char *str)
89795+{
89796+#ifdef CONFIG_X86_32
89797+ unsigned int cpu;
89798+ struct desc_struct *gdt;
89799+
89800+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
89801+ gdt = get_cpu_gdt_table(cpu);
89802+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
89803+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
89804+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
89805+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
89806+ }
89807+ loadsegment(ds, __KERNEL_DS);
89808+ loadsegment(es, __KERNEL_DS);
89809+ loadsegment(ss, __KERNEL_DS);
89810+#else
89811+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
89812+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
89813+ clone_pgd_mask = ~(pgdval_t)0UL;
89814+ pax_user_shadow_base = 0UL;
89815+ setup_clear_cpu_cap(X86_FEATURE_PCID);
89816+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
89817+#endif
89818+
89819+ return 0;
89820+}
89821+early_param("pax_nouderef", setup_pax_nouderef);
89822+
89823+#ifdef CONFIG_X86_64
89824+static int __init setup_pax_weakuderef(char *str)
89825+{
89826+ if (clone_pgd_mask != ~(pgdval_t)0UL)
89827+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
89828+ return 1;
89829+}
89830+__setup("pax_weakuderef", setup_pax_weakuderef);
89831+#endif
89832+#endif
89833+
89834+#ifdef CONFIG_PAX_SOFTMODE
89835+int pax_softmode;
89836+
89837+static int __init setup_pax_softmode(char *str)
89838+{
89839+ get_option(&str, &pax_softmode);
89840+ return 1;
89841+}
89842+__setup("pax_softmode=", setup_pax_softmode);
89843+#endif
89844+
89845 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
89846 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
89847 static const char *panic_later, *panic_param;
89848@@ -727,7 +798,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
89849 struct blacklist_entry *entry;
89850 char *fn_name;
89851
89852- fn_name = kasprintf(GFP_KERNEL, "%pf", fn);
89853+ fn_name = kasprintf(GFP_KERNEL, "%pX", fn);
89854 if (!fn_name)
89855 return false;
89856
89857@@ -779,7 +850,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
89858 {
89859 int count = preempt_count();
89860 int ret;
89861- char msgbuf[64];
89862+ const char *msg1 = "", *msg2 = "";
89863
89864 if (initcall_blacklisted(fn))
89865 return -EPERM;
89866@@ -789,18 +860,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
89867 else
89868 ret = fn();
89869
89870- msgbuf[0] = 0;
89871-
89872 if (preempt_count() != count) {
89873- sprintf(msgbuf, "preemption imbalance ");
89874+ msg1 = " preemption imbalance";
89875 preempt_count_set(count);
89876 }
89877 if (irqs_disabled()) {
89878- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
89879+ msg2 = " disabled interrupts";
89880 local_irq_enable();
89881 }
89882- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
89883+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
89884
89885+ add_latent_entropy();
89886 return ret;
89887 }
89888
89889@@ -907,8 +977,8 @@ static int run_init_process(const char *init_filename)
89890 {
89891 argv_init[0] = init_filename;
89892 return do_execve(getname_kernel(init_filename),
89893- (const char __user *const __user *)argv_init,
89894- (const char __user *const __user *)envp_init);
89895+ (const char __user *const __force_user *)argv_init,
89896+ (const char __user *const __force_user *)envp_init);
89897 }
89898
89899 static int try_to_run_init_process(const char *init_filename)
89900@@ -925,6 +995,10 @@ static int try_to_run_init_process(const char *init_filename)
89901 return ret;
89902 }
89903
89904+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
89905+extern int gr_init_ran;
89906+#endif
89907+
89908 static noinline void __init kernel_init_freeable(void);
89909
89910 static int __ref kernel_init(void *unused)
89911@@ -949,6 +1023,11 @@ static int __ref kernel_init(void *unused)
89912 ramdisk_execute_command, ret);
89913 }
89914
89915+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
89916+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
89917+ gr_init_ran = 1;
89918+#endif
89919+
89920 /*
89921 * We try each of these until one succeeds.
89922 *
89923@@ -1004,7 +1083,7 @@ static noinline void __init kernel_init_freeable(void)
89924 do_basic_setup();
89925
89926 /* Open the /dev/console on the rootfs, this should never fail */
89927- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
89928+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
89929 pr_err("Warning: unable to open an initial console.\n");
89930
89931 (void) sys_dup(0);
89932@@ -1017,11 +1096,13 @@ static noinline void __init kernel_init_freeable(void)
89933 if (!ramdisk_execute_command)
89934 ramdisk_execute_command = "/init";
89935
89936- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
89937+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
89938 ramdisk_execute_command = NULL;
89939 prepare_namespace();
89940 }
89941
89942+ grsecurity_init();
89943+
89944 /*
89945 * Ok, we have completed the initial bootup, and
89946 * we're essentially up and running. Get rid of the
89947diff --git a/ipc/compat.c b/ipc/compat.c
89948index b5ef4f7..ff31d87 100644
89949--- a/ipc/compat.c
89950+++ b/ipc/compat.c
89951@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
89952 COMPAT_SHMLBA);
89953 if (err < 0)
89954 return err;
89955- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
89956+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
89957 }
89958 case SHMDT:
89959 return sys_shmdt(compat_ptr(ptr));
89960diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
89961index c3f0326..d4e0579 100644
89962--- a/ipc/ipc_sysctl.c
89963+++ b/ipc/ipc_sysctl.c
89964@@ -30,7 +30,7 @@ static void *get_ipc(struct ctl_table *table)
89965 static int proc_ipc_dointvec(struct ctl_table *table, int write,
89966 void __user *buffer, size_t *lenp, loff_t *ppos)
89967 {
89968- struct ctl_table ipc_table;
89969+ ctl_table_no_const ipc_table;
89970
89971 memcpy(&ipc_table, table, sizeof(ipc_table));
89972 ipc_table.data = get_ipc(table);
89973@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
89974 static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
89975 void __user *buffer, size_t *lenp, loff_t *ppos)
89976 {
89977- struct ctl_table ipc_table;
89978+ ctl_table_no_const ipc_table;
89979
89980 memcpy(&ipc_table, table, sizeof(ipc_table));
89981 ipc_table.data = get_ipc(table);
89982@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
89983 static int proc_ipc_callback_dointvec_minmax(struct ctl_table *table, int write,
89984 void __user *buffer, size_t *lenp, loff_t *ppos)
89985 {
89986- struct ctl_table ipc_table;
89987+ ctl_table_no_const ipc_table;
89988 size_t lenp_bef = *lenp;
89989 int rc;
89990
89991@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec_minmax(struct ctl_table *table, int write,
89992 static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
89993 void __user *buffer, size_t *lenp, loff_t *ppos)
89994 {
89995- struct ctl_table ipc_table;
89996+ ctl_table_no_const ipc_table;
89997 memcpy(&ipc_table, table, sizeof(ipc_table));
89998 ipc_table.data = get_ipc(table);
89999
90000@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
90001 static int proc_ipcauto_dointvec_minmax(struct ctl_table *table, int write,
90002 void __user *buffer, size_t *lenp, loff_t *ppos)
90003 {
90004- struct ctl_table ipc_table;
90005+ ctl_table_no_const ipc_table;
90006 size_t lenp_bef = *lenp;
90007 int oldval;
90008 int rc;
90009diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
90010index 68d4e95..1477ded 100644
90011--- a/ipc/mq_sysctl.c
90012+++ b/ipc/mq_sysctl.c
90013@@ -25,7 +25,7 @@ static void *get_mq(struct ctl_table *table)
90014 static int proc_mq_dointvec(struct ctl_table *table, int write,
90015 void __user *buffer, size_t *lenp, loff_t *ppos)
90016 {
90017- struct ctl_table mq_table;
90018+ ctl_table_no_const mq_table;
90019 memcpy(&mq_table, table, sizeof(mq_table));
90020 mq_table.data = get_mq(table);
90021
90022@@ -35,7 +35,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
90023 static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
90024 void __user *buffer, size_t *lenp, loff_t *ppos)
90025 {
90026- struct ctl_table mq_table;
90027+ ctl_table_no_const mq_table;
90028 memcpy(&mq_table, table, sizeof(mq_table));
90029 mq_table.data = get_mq(table);
90030
90031diff --git a/ipc/mqueue.c b/ipc/mqueue.c
90032index 4fcf39a..d3cc2ec 100644
90033--- a/ipc/mqueue.c
90034+++ b/ipc/mqueue.c
90035@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
90036 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
90037 info->attr.mq_msgsize);
90038
90039+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
90040 spin_lock(&mq_lock);
90041 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
90042 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
90043diff --git a/ipc/shm.c b/ipc/shm.c
90044index 89fc354..cf56786 100644
90045--- a/ipc/shm.c
90046+++ b/ipc/shm.c
90047@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
90048 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
90049 #endif
90050
90051+#ifdef CONFIG_GRKERNSEC
90052+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
90053+ const time_t shm_createtime, const kuid_t cuid,
90054+ const int shmid);
90055+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
90056+ const time_t shm_createtime);
90057+#endif
90058+
90059 void shm_init_ns(struct ipc_namespace *ns)
90060 {
90061 ns->shm_ctlmax = SHMMAX;
90062@@ -557,6 +565,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
90063 shp->shm_lprid = 0;
90064 shp->shm_atim = shp->shm_dtim = 0;
90065 shp->shm_ctim = get_seconds();
90066+#ifdef CONFIG_GRKERNSEC
90067+ {
90068+ struct timespec timeval;
90069+ do_posix_clock_monotonic_gettime(&timeval);
90070+
90071+ shp->shm_createtime = timeval.tv_sec;
90072+ }
90073+#endif
90074 shp->shm_segsz = size;
90075 shp->shm_nattch = 0;
90076 shp->shm_file = file;
90077@@ -1092,6 +1108,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
90078 f_mode = FMODE_READ | FMODE_WRITE;
90079 }
90080 if (shmflg & SHM_EXEC) {
90081+
90082+#ifdef CONFIG_PAX_MPROTECT
90083+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
90084+ goto out;
90085+#endif
90086+
90087 prot |= PROT_EXEC;
90088 acc_mode |= S_IXUGO;
90089 }
90090@@ -1116,6 +1138,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
90091 if (err)
90092 goto out_unlock;
90093
90094+#ifdef CONFIG_GRKERNSEC
90095+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
90096+ shp->shm_perm.cuid, shmid) ||
90097+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
90098+ err = -EACCES;
90099+ goto out_unlock;
90100+ }
90101+#endif
90102+
90103 ipc_lock_object(&shp->shm_perm);
90104
90105 /* check if shm_destroy() is tearing down shp */
90106@@ -1128,6 +1159,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
90107 path = shp->shm_file->f_path;
90108 path_get(&path);
90109 shp->shm_nattch++;
90110+#ifdef CONFIG_GRKERNSEC
90111+ shp->shm_lapid = current->pid;
90112+#endif
90113 size = i_size_read(path.dentry->d_inode);
90114 ipc_unlock_object(&shp->shm_perm);
90115 rcu_read_unlock();
90116diff --git a/ipc/util.c b/ipc/util.c
90117index 27d74e6..8be0be2 100644
90118--- a/ipc/util.c
90119+++ b/ipc/util.c
90120@@ -71,6 +71,8 @@ struct ipc_proc_iface {
90121 int (*show)(struct seq_file *, void *);
90122 };
90123
90124+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
90125+
90126 static void ipc_memory_notifier(struct work_struct *work)
90127 {
90128 ipcns_notify(IPCNS_MEMCHANGED);
90129@@ -537,6 +539,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
90130 granted_mode >>= 6;
90131 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
90132 granted_mode >>= 3;
90133+
90134+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
90135+ return -1;
90136+
90137 /* is there some bit set in requested_mode but not in granted_mode? */
90138 if ((requested_mode & ~granted_mode & 0007) &&
90139 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
90140diff --git a/kernel/acct.c b/kernel/acct.c
90141index 808a86f..da69695 100644
90142--- a/kernel/acct.c
90143+++ b/kernel/acct.c
90144@@ -556,7 +556,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
90145 */
90146 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
90147 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
90148- file->f_op->write(file, (char *)&ac,
90149+ file->f_op->write(file, (char __force_user *)&ac,
90150 sizeof(acct_t), &file->f_pos);
90151 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
90152 set_fs(fs);
90153diff --git a/kernel/audit.c b/kernel/audit.c
90154index 3ef2e0e..8873765 100644
90155--- a/kernel/audit.c
90156+++ b/kernel/audit.c
90157@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
90158 3) suppressed due to audit_rate_limit
90159 4) suppressed due to audit_backlog_limit
90160 */
90161-static atomic_t audit_lost = ATOMIC_INIT(0);
90162+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
90163
90164 /* The netlink socket. */
90165 static struct sock *audit_sock;
90166@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
90167 unsigned long now;
90168 int print;
90169
90170- atomic_inc(&audit_lost);
90171+ atomic_inc_unchecked(&audit_lost);
90172
90173 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
90174
90175@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
90176 if (print) {
90177 if (printk_ratelimit())
90178 pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
90179- atomic_read(&audit_lost),
90180+ atomic_read_unchecked(&audit_lost),
90181 audit_rate_limit,
90182 audit_backlog_limit);
90183 audit_panic(message);
90184@@ -840,7 +840,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
90185 s.pid = audit_pid;
90186 s.rate_limit = audit_rate_limit;
90187 s.backlog_limit = audit_backlog_limit;
90188- s.lost = atomic_read(&audit_lost);
90189+ s.lost = atomic_read_unchecked(&audit_lost);
90190 s.backlog = skb_queue_len(&audit_skb_queue);
90191 s.version = AUDIT_VERSION_LATEST;
90192 s.backlog_wait_time = audit_backlog_wait_time;
90193diff --git a/kernel/auditsc.c b/kernel/auditsc.c
90194index 21eae3c..66db239 100644
90195--- a/kernel/auditsc.c
90196+++ b/kernel/auditsc.c
90197@@ -2023,7 +2023,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
90198 }
90199
90200 /* global counter which is incremented every time something logs in */
90201-static atomic_t session_id = ATOMIC_INIT(0);
90202+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
90203
90204 static int audit_set_loginuid_perm(kuid_t loginuid)
90205 {
90206@@ -2090,7 +2090,7 @@ int audit_set_loginuid(kuid_t loginuid)
90207
90208 /* are we setting or clearing? */
90209 if (uid_valid(loginuid))
90210- sessionid = (unsigned int)atomic_inc_return(&session_id);
90211+ sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
90212
90213 task->sessionid = sessionid;
90214 task->loginuid = loginuid;
90215diff --git a/kernel/capability.c b/kernel/capability.c
90216index a5cf13c..07a2647 100644
90217--- a/kernel/capability.c
90218+++ b/kernel/capability.c
90219@@ -192,6 +192,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
90220 * before modification is attempted and the application
90221 * fails.
90222 */
90223+ if (tocopy > ARRAY_SIZE(kdata))
90224+ return -EFAULT;
90225+
90226 if (copy_to_user(dataptr, kdata, tocopy
90227 * sizeof(struct __user_cap_data_struct))) {
90228 return -EFAULT;
90229@@ -293,10 +296,11 @@ bool has_ns_capability(struct task_struct *t,
90230 int ret;
90231
90232 rcu_read_lock();
90233- ret = security_capable(__task_cred(t), ns, cap);
90234+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
90235+ gr_task_is_capable(t, __task_cred(t), cap);
90236 rcu_read_unlock();
90237
90238- return (ret == 0);
90239+ return ret;
90240 }
90241
90242 /**
90243@@ -333,10 +337,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
90244 int ret;
90245
90246 rcu_read_lock();
90247- ret = security_capable_noaudit(__task_cred(t), ns, cap);
90248+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
90249 rcu_read_unlock();
90250
90251- return (ret == 0);
90252+ return ret;
90253 }
90254
90255 /**
90256@@ -374,7 +378,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
90257 BUG();
90258 }
90259
90260- if (security_capable(current_cred(), ns, cap) == 0) {
90261+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
90262 current->flags |= PF_SUPERPRIV;
90263 return true;
90264 }
90265@@ -382,6 +386,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
90266 }
90267 EXPORT_SYMBOL(ns_capable);
90268
90269+bool ns_capable_nolog(struct user_namespace *ns, int cap)
90270+{
90271+ if (unlikely(!cap_valid(cap))) {
90272+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
90273+ BUG();
90274+ }
90275+
90276+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
90277+ current->flags |= PF_SUPERPRIV;
90278+ return true;
90279+ }
90280+ return false;
90281+}
90282+EXPORT_SYMBOL(ns_capable_nolog);
90283+
90284 /**
90285 * file_ns_capable - Determine if the file's opener had a capability in effect
90286 * @file: The file we want to check
90287@@ -423,6 +442,12 @@ bool capable(int cap)
90288 }
90289 EXPORT_SYMBOL(capable);
90290
90291+bool capable_nolog(int cap)
90292+{
90293+ return ns_capable_nolog(&init_user_ns, cap);
90294+}
90295+EXPORT_SYMBOL(capable_nolog);
90296+
90297 /**
90298 * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
90299 * @inode: The inode in question
90300@@ -440,3 +465,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
90301 kgid_has_mapping(ns, inode->i_gid);
90302 }
90303 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
90304+
90305+bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap)
90306+{
90307+ struct user_namespace *ns = current_user_ns();
90308+
90309+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
90310+ kgid_has_mapping(ns, inode->i_gid);
90311+}
90312+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
90313diff --git a/kernel/cgroup.c b/kernel/cgroup.c
90314index 70776ae..09c4988 100644
90315--- a/kernel/cgroup.c
90316+++ b/kernel/cgroup.c
90317@@ -5146,6 +5146,14 @@ static void cgroup_release_agent(struct work_struct *work)
90318 release_list);
90319 list_del_init(&cgrp->release_list);
90320 raw_spin_unlock(&release_list_lock);
90321+
90322+ /*
90323+ * don't bother calling call_usermodehelper if we haven't
90324+ * configured a binary to execute
90325+ */
90326+ if (cgrp->root->release_agent_path[0] == '\0')
90327+ goto continue_free;
90328+
90329 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
90330 if (!pathbuf)
90331 goto continue_free;
90332@@ -5336,7 +5344,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
90333 struct task_struct *task;
90334 int count = 0;
90335
90336- seq_printf(seq, "css_set %p\n", cset);
90337+ seq_printf(seq, "css_set %pK\n", cset);
90338
90339 list_for_each_entry(task, &cset->tasks, cg_list) {
90340 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
90341diff --git a/kernel/compat.c b/kernel/compat.c
90342index 633394f..bdfa969 100644
90343--- a/kernel/compat.c
90344+++ b/kernel/compat.c
90345@@ -13,6 +13,7 @@
90346
90347 #include <linux/linkage.h>
90348 #include <linux/compat.h>
90349+#include <linux/module.h>
90350 #include <linux/errno.h>
90351 #include <linux/time.h>
90352 #include <linux/signal.h>
90353@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
90354 mm_segment_t oldfs;
90355 long ret;
90356
90357- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
90358+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
90359 oldfs = get_fs();
90360 set_fs(KERNEL_DS);
90361 ret = hrtimer_nanosleep_restart(restart);
90362@@ -252,7 +253,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
90363 oldfs = get_fs();
90364 set_fs(KERNEL_DS);
90365 ret = hrtimer_nanosleep(&tu,
90366- rmtp ? (struct timespec __user *)&rmt : NULL,
90367+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
90368 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
90369 set_fs(oldfs);
90370
90371@@ -361,7 +362,7 @@ COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
90372 mm_segment_t old_fs = get_fs();
90373
90374 set_fs(KERNEL_DS);
90375- ret = sys_sigpending((old_sigset_t __user *) &s);
90376+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
90377 set_fs(old_fs);
90378 if (ret == 0)
90379 ret = put_user(s, set);
90380@@ -451,7 +452,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
90381 mm_segment_t old_fs = get_fs();
90382
90383 set_fs(KERNEL_DS);
90384- ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
90385+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
90386 set_fs(old_fs);
90387
90388 if (!ret) {
90389@@ -533,8 +534,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
90390 set_fs (KERNEL_DS);
90391 ret = sys_wait4(pid,
90392 (stat_addr ?
90393- (unsigned int __user *) &status : NULL),
90394- options, (struct rusage __user *) &r);
90395+ (unsigned int __force_user *) &status : NULL),
90396+ options, (struct rusage __force_user *) &r);
90397 set_fs (old_fs);
90398
90399 if (ret > 0) {
90400@@ -560,8 +561,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
90401 memset(&info, 0, sizeof(info));
90402
90403 set_fs(KERNEL_DS);
90404- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
90405- uru ? (struct rusage __user *)&ru : NULL);
90406+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
90407+ uru ? (struct rusage __force_user *)&ru : NULL);
90408 set_fs(old_fs);
90409
90410 if ((ret < 0) || (info.si_signo == 0))
90411@@ -695,8 +696,8 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
90412 oldfs = get_fs();
90413 set_fs(KERNEL_DS);
90414 err = sys_timer_settime(timer_id, flags,
90415- (struct itimerspec __user *) &newts,
90416- (struct itimerspec __user *) &oldts);
90417+ (struct itimerspec __force_user *) &newts,
90418+ (struct itimerspec __force_user *) &oldts);
90419 set_fs(oldfs);
90420 if (!err && old && put_compat_itimerspec(old, &oldts))
90421 return -EFAULT;
90422@@ -713,7 +714,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
90423 oldfs = get_fs();
90424 set_fs(KERNEL_DS);
90425 err = sys_timer_gettime(timer_id,
90426- (struct itimerspec __user *) &ts);
90427+ (struct itimerspec __force_user *) &ts);
90428 set_fs(oldfs);
90429 if (!err && put_compat_itimerspec(setting, &ts))
90430 return -EFAULT;
90431@@ -732,7 +733,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
90432 oldfs = get_fs();
90433 set_fs(KERNEL_DS);
90434 err = sys_clock_settime(which_clock,
90435- (struct timespec __user *) &ts);
90436+ (struct timespec __force_user *) &ts);
90437 set_fs(oldfs);
90438 return err;
90439 }
90440@@ -747,7 +748,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
90441 oldfs = get_fs();
90442 set_fs(KERNEL_DS);
90443 err = sys_clock_gettime(which_clock,
90444- (struct timespec __user *) &ts);
90445+ (struct timespec __force_user *) &ts);
90446 set_fs(oldfs);
90447 if (!err && compat_put_timespec(&ts, tp))
90448 return -EFAULT;
90449@@ -767,7 +768,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
90450
90451 oldfs = get_fs();
90452 set_fs(KERNEL_DS);
90453- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
90454+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
90455 set_fs(oldfs);
90456
90457 err = compat_put_timex(utp, &txc);
90458@@ -787,7 +788,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
90459 oldfs = get_fs();
90460 set_fs(KERNEL_DS);
90461 err = sys_clock_getres(which_clock,
90462- (struct timespec __user *) &ts);
90463+ (struct timespec __force_user *) &ts);
90464 set_fs(oldfs);
90465 if (!err && tp && compat_put_timespec(&ts, tp))
90466 return -EFAULT;
90467@@ -801,7 +802,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
90468 struct timespec tu;
90469 struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
90470
90471- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
90472+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
90473 oldfs = get_fs();
90474 set_fs(KERNEL_DS);
90475 err = clock_nanosleep_restart(restart);
90476@@ -833,8 +834,8 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
90477 oldfs = get_fs();
90478 set_fs(KERNEL_DS);
90479 err = sys_clock_nanosleep(which_clock, flags,
90480- (struct timespec __user *) &in,
90481- (struct timespec __user *) &out);
90482+ (struct timespec __force_user *) &in,
90483+ (struct timespec __force_user *) &out);
90484 set_fs(oldfs);
90485
90486 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
90487@@ -1128,7 +1129,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
90488 mm_segment_t old_fs = get_fs();
90489
90490 set_fs(KERNEL_DS);
90491- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
90492+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
90493 set_fs(old_fs);
90494 if (compat_put_timespec(&t, interval))
90495 return -EFAULT;
90496diff --git a/kernel/configs.c b/kernel/configs.c
90497index c18b1f1..b9a0132 100644
90498--- a/kernel/configs.c
90499+++ b/kernel/configs.c
90500@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
90501 struct proc_dir_entry *entry;
90502
90503 /* create the current config file */
90504+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
90505+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
90506+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
90507+ &ikconfig_file_ops);
90508+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
90509+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
90510+ &ikconfig_file_ops);
90511+#endif
90512+#else
90513 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
90514 &ikconfig_file_ops);
90515+#endif
90516+
90517 if (!entry)
90518 return -ENOMEM;
90519
90520diff --git a/kernel/cred.c b/kernel/cred.c
90521index e0573a4..26c0fd3 100644
90522--- a/kernel/cred.c
90523+++ b/kernel/cred.c
90524@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
90525 validate_creds(cred);
90526 alter_cred_subscribers(cred, -1);
90527 put_cred(cred);
90528+
90529+#ifdef CONFIG_GRKERNSEC_SETXID
90530+ cred = (struct cred *) tsk->delayed_cred;
90531+ if (cred != NULL) {
90532+ tsk->delayed_cred = NULL;
90533+ validate_creds(cred);
90534+ alter_cred_subscribers(cred, -1);
90535+ put_cred(cred);
90536+ }
90537+#endif
90538 }
90539
90540 /**
90541@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
90542 * Always returns 0 thus allowing this function to be tail-called at the end
90543 * of, say, sys_setgid().
90544 */
90545-int commit_creds(struct cred *new)
90546+static int __commit_creds(struct cred *new)
90547 {
90548 struct task_struct *task = current;
90549 const struct cred *old = task->real_cred;
90550@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
90551
90552 get_cred(new); /* we will require a ref for the subj creds too */
90553
90554+ gr_set_role_label(task, new->uid, new->gid);
90555+
90556 /* dumpability changes */
90557 if (!uid_eq(old->euid, new->euid) ||
90558 !gid_eq(old->egid, new->egid) ||
90559@@ -479,6 +491,105 @@ int commit_creds(struct cred *new)
90560 put_cred(old);
90561 return 0;
90562 }
90563+#ifdef CONFIG_GRKERNSEC_SETXID
90564+extern int set_user(struct cred *new);
90565+
90566+void gr_delayed_cred_worker(void)
90567+{
90568+ const struct cred *new = current->delayed_cred;
90569+ struct cred *ncred;
90570+
90571+ current->delayed_cred = NULL;
90572+
90573+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
90574+ // from doing get_cred on it when queueing this
90575+ put_cred(new);
90576+ return;
90577+ } else if (new == NULL)
90578+ return;
90579+
90580+ ncred = prepare_creds();
90581+ if (!ncred)
90582+ goto die;
90583+ // uids
90584+ ncred->uid = new->uid;
90585+ ncred->euid = new->euid;
90586+ ncred->suid = new->suid;
90587+ ncred->fsuid = new->fsuid;
90588+ // gids
90589+ ncred->gid = new->gid;
90590+ ncred->egid = new->egid;
90591+ ncred->sgid = new->sgid;
90592+ ncred->fsgid = new->fsgid;
90593+ // groups
90594+ set_groups(ncred, new->group_info);
90595+ // caps
90596+ ncred->securebits = new->securebits;
90597+ ncred->cap_inheritable = new->cap_inheritable;
90598+ ncred->cap_permitted = new->cap_permitted;
90599+ ncred->cap_effective = new->cap_effective;
90600+ ncred->cap_bset = new->cap_bset;
90601+
90602+ if (set_user(ncred)) {
90603+ abort_creds(ncred);
90604+ goto die;
90605+ }
90606+
90607+ // from doing get_cred on it when queueing this
90608+ put_cred(new);
90609+
90610+ __commit_creds(ncred);
90611+ return;
90612+die:
90613+ // from doing get_cred on it when queueing this
90614+ put_cred(new);
90615+ do_group_exit(SIGKILL);
90616+}
90617+#endif
90618+
90619+int commit_creds(struct cred *new)
90620+{
90621+#ifdef CONFIG_GRKERNSEC_SETXID
90622+ int ret;
90623+ int schedule_it = 0;
90624+ struct task_struct *t;
90625+ unsigned oldsecurebits = current_cred()->securebits;
90626+
90627+ /* we won't get called with tasklist_lock held for writing
90628+ and interrupts disabled as the cred struct in that case is
90629+ init_cred
90630+ */
90631+ if (grsec_enable_setxid && !current_is_single_threaded() &&
90632+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
90633+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
90634+ schedule_it = 1;
90635+ }
90636+ ret = __commit_creds(new);
90637+ if (schedule_it) {
90638+ rcu_read_lock();
90639+ read_lock(&tasklist_lock);
90640+ for (t = next_thread(current); t != current;
90641+ t = next_thread(t)) {
90642+ /* we'll check if the thread has uid 0 in
90643+ * the delayed worker routine
90644+ */
90645+ if (task_securebits(t) == oldsecurebits &&
90646+ t->delayed_cred == NULL) {
90647+ t->delayed_cred = get_cred(new);
90648+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
90649+ set_tsk_need_resched(t);
90650+ }
90651+ }
90652+ read_unlock(&tasklist_lock);
90653+ rcu_read_unlock();
90654+ }
90655+
90656+ return ret;
90657+#else
90658+ return __commit_creds(new);
90659+#endif
90660+}
90661+
90662 EXPORT_SYMBOL(commit_creds);
90663
90664 /**
90665diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
90666index 1adf62b..7736e06 100644
90667--- a/kernel/debug/debug_core.c
90668+++ b/kernel/debug/debug_core.c
90669@@ -124,7 +124,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
90670 */
90671 static atomic_t masters_in_kgdb;
90672 static atomic_t slaves_in_kgdb;
90673-static atomic_t kgdb_break_tasklet_var;
90674+static atomic_unchecked_t kgdb_break_tasklet_var;
90675 atomic_t kgdb_setting_breakpoint;
90676
90677 struct task_struct *kgdb_usethread;
90678@@ -134,7 +134,7 @@ int kgdb_single_step;
90679 static pid_t kgdb_sstep_pid;
90680
90681 /* to keep track of the CPU which is doing the single stepping*/
90682-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
90683+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
90684
90685 /*
90686 * If you are debugging a problem where roundup (the collection of
90687@@ -549,7 +549,7 @@ return_normal:
90688 * kernel will only try for the value of sstep_tries before
90689 * giving up and continuing on.
90690 */
90691- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
90692+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
90693 (kgdb_info[cpu].task &&
90694 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
90695 atomic_set(&kgdb_active, -1);
90696@@ -647,8 +647,8 @@ cpu_master_loop:
90697 }
90698
90699 kgdb_restore:
90700- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
90701- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
90702+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
90703+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
90704 if (kgdb_info[sstep_cpu].task)
90705 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
90706 else
90707@@ -925,18 +925,18 @@ static void kgdb_unregister_callbacks(void)
90708 static void kgdb_tasklet_bpt(unsigned long ing)
90709 {
90710 kgdb_breakpoint();
90711- atomic_set(&kgdb_break_tasklet_var, 0);
90712+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
90713 }
90714
90715 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
90716
90717 void kgdb_schedule_breakpoint(void)
90718 {
90719- if (atomic_read(&kgdb_break_tasklet_var) ||
90720+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
90721 atomic_read(&kgdb_active) != -1 ||
90722 atomic_read(&kgdb_setting_breakpoint))
90723 return;
90724- atomic_inc(&kgdb_break_tasklet_var);
90725+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
90726 tasklet_schedule(&kgdb_tasklet_breakpoint);
90727 }
90728 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
90729diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
90730index 2f7c760..95b6a66 100644
90731--- a/kernel/debug/kdb/kdb_main.c
90732+++ b/kernel/debug/kdb/kdb_main.c
90733@@ -1977,7 +1977,7 @@ static int kdb_lsmod(int argc, const char **argv)
90734 continue;
90735
90736 kdb_printf("%-20s%8u 0x%p ", mod->name,
90737- mod->core_size, (void *)mod);
90738+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
90739 #ifdef CONFIG_MODULE_UNLOAD
90740 kdb_printf("%4ld ", module_refcount(mod));
90741 #endif
90742@@ -1987,7 +1987,7 @@ static int kdb_lsmod(int argc, const char **argv)
90743 kdb_printf(" (Loading)");
90744 else
90745 kdb_printf(" (Live)");
90746- kdb_printf(" 0x%p", mod->module_core);
90747+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
90748
90749 #ifdef CONFIG_MODULE_UNLOAD
90750 {
90751diff --git a/kernel/events/core.c b/kernel/events/core.c
90752index 6b17ac1..00fd505 100644
90753--- a/kernel/events/core.c
90754+++ b/kernel/events/core.c
90755@@ -160,8 +160,15 @@ static struct srcu_struct pmus_srcu;
90756 * 0 - disallow raw tracepoint access for unpriv
90757 * 1 - disallow cpu events for unpriv
90758 * 2 - disallow kernel profiling for unpriv
90759+ * 3 - disallow all unpriv perf event use
90760 */
90761-int sysctl_perf_event_paranoid __read_mostly = 1;
90762+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
90763+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
90764+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
90765+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
90766+#else
90767+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
90768+#endif
90769
90770 /* Minimum for 512 kiB + 1 user control page */
90771 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
90772@@ -187,7 +194,7 @@ void update_perf_cpu_limits(void)
90773
90774 tmp *= sysctl_perf_cpu_time_max_percent;
90775 do_div(tmp, 100);
90776- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
90777+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
90778 }
90779
90780 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
90781@@ -293,7 +300,7 @@ void perf_sample_event_took(u64 sample_len_ns)
90782 }
90783 }
90784
90785-static atomic64_t perf_event_id;
90786+static atomic64_unchecked_t perf_event_id;
90787
90788 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
90789 enum event_type_t event_type);
90790@@ -3023,7 +3030,7 @@ static void __perf_event_read(void *info)
90791
90792 static inline u64 perf_event_count(struct perf_event *event)
90793 {
90794- return local64_read(&event->count) + atomic64_read(&event->child_count);
90795+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
90796 }
90797
90798 static u64 perf_event_read(struct perf_event *event)
90799@@ -3399,9 +3406,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
90800 mutex_lock(&event->child_mutex);
90801 total += perf_event_read(event);
90802 *enabled += event->total_time_enabled +
90803- atomic64_read(&event->child_total_time_enabled);
90804+ atomic64_read_unchecked(&event->child_total_time_enabled);
90805 *running += event->total_time_running +
90806- atomic64_read(&event->child_total_time_running);
90807+ atomic64_read_unchecked(&event->child_total_time_running);
90808
90809 list_for_each_entry(child, &event->child_list, child_list) {
90810 total += perf_event_read(child);
90811@@ -3830,10 +3837,10 @@ void perf_event_update_userpage(struct perf_event *event)
90812 userpg->offset -= local64_read(&event->hw.prev_count);
90813
90814 userpg->time_enabled = enabled +
90815- atomic64_read(&event->child_total_time_enabled);
90816+ atomic64_read_unchecked(&event->child_total_time_enabled);
90817
90818 userpg->time_running = running +
90819- atomic64_read(&event->child_total_time_running);
90820+ atomic64_read_unchecked(&event->child_total_time_running);
90821
90822 arch_perf_update_userpage(userpg, now);
90823
90824@@ -4397,7 +4404,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
90825
90826 /* Data. */
90827 sp = perf_user_stack_pointer(regs);
90828- rem = __output_copy_user(handle, (void *) sp, dump_size);
90829+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
90830 dyn_size = dump_size - rem;
90831
90832 perf_output_skip(handle, rem);
90833@@ -4488,11 +4495,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
90834 values[n++] = perf_event_count(event);
90835 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
90836 values[n++] = enabled +
90837- atomic64_read(&event->child_total_time_enabled);
90838+ atomic64_read_unchecked(&event->child_total_time_enabled);
90839 }
90840 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
90841 values[n++] = running +
90842- atomic64_read(&event->child_total_time_running);
90843+ atomic64_read_unchecked(&event->child_total_time_running);
90844 }
90845 if (read_format & PERF_FORMAT_ID)
90846 values[n++] = primary_event_id(event);
90847@@ -6801,7 +6808,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
90848 event->parent = parent_event;
90849
90850 event->ns = get_pid_ns(task_active_pid_ns(current));
90851- event->id = atomic64_inc_return(&perf_event_id);
90852+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
90853
90854 event->state = PERF_EVENT_STATE_INACTIVE;
90855
90856@@ -7080,6 +7087,11 @@ SYSCALL_DEFINE5(perf_event_open,
90857 if (flags & ~PERF_FLAG_ALL)
90858 return -EINVAL;
90859
90860+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
90861+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
90862+ return -EACCES;
90863+#endif
90864+
90865 err = perf_copy_attr(attr_uptr, &attr);
90866 if (err)
90867 return err;
90868@@ -7432,10 +7444,10 @@ static void sync_child_event(struct perf_event *child_event,
90869 /*
90870 * Add back the child's count to the parent's count:
90871 */
90872- atomic64_add(child_val, &parent_event->child_count);
90873- atomic64_add(child_event->total_time_enabled,
90874+ atomic64_add_unchecked(child_val, &parent_event->child_count);
90875+ atomic64_add_unchecked(child_event->total_time_enabled,
90876 &parent_event->child_total_time_enabled);
90877- atomic64_add(child_event->total_time_running,
90878+ atomic64_add_unchecked(child_event->total_time_running,
90879 &parent_event->child_total_time_running);
90880
90881 /*
90882diff --git a/kernel/events/internal.h b/kernel/events/internal.h
90883index 569b2187..19940d9 100644
90884--- a/kernel/events/internal.h
90885+++ b/kernel/events/internal.h
90886@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
90887 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
90888 }
90889
90890-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
90891+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
90892 static inline unsigned long \
90893 func_name(struct perf_output_handle *handle, \
90894- const void *buf, unsigned long len) \
90895+ const void user *buf, unsigned long len) \
90896 { \
90897 unsigned long size, written; \
90898 \
90899@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
90900 return 0;
90901 }
90902
90903-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
90904+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
90905
90906 static inline unsigned long
90907 memcpy_skip(void *dst, const void *src, unsigned long n)
90908@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
90909 return 0;
90910 }
90911
90912-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
90913+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
90914
90915 #ifndef arch_perf_out_copy_user
90916 #define arch_perf_out_copy_user arch_perf_out_copy_user
90917@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
90918 }
90919 #endif
90920
90921-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
90922+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
90923
90924 /* Callchain handling */
90925 extern struct perf_callchain_entry *
90926diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
90927index 6f3254e..e4c1fe4 100644
90928--- a/kernel/events/uprobes.c
90929+++ b/kernel/events/uprobes.c
90930@@ -1670,7 +1670,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
90931 {
90932 struct page *page;
90933 uprobe_opcode_t opcode;
90934- int result;
90935+ long result;
90936
90937 pagefault_disable();
90938 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
90939diff --git a/kernel/exit.c b/kernel/exit.c
90940index e5c4668..592d2e5 100644
90941--- a/kernel/exit.c
90942+++ b/kernel/exit.c
90943@@ -173,6 +173,10 @@ void release_task(struct task_struct * p)
90944 struct task_struct *leader;
90945 int zap_leader;
90946 repeat:
90947+#ifdef CONFIG_NET
90948+ gr_del_task_from_ip_table(p);
90949+#endif
90950+
90951 /* don't need to get the RCU readlock here - the process is dead and
90952 * can't be modifying its own credentials. But shut RCU-lockdep up */
90953 rcu_read_lock();
90954@@ -664,6 +668,8 @@ void do_exit(long code)
90955 struct task_struct *tsk = current;
90956 int group_dead;
90957
90958+ set_fs(USER_DS);
90959+
90960 profile_task_exit(tsk);
90961
90962 WARN_ON(blk_needs_flush_plug(tsk));
90963@@ -680,7 +686,6 @@ void do_exit(long code)
90964 * mm_release()->clear_child_tid() from writing to a user-controlled
90965 * kernel address.
90966 */
90967- set_fs(USER_DS);
90968
90969 ptrace_event(PTRACE_EVENT_EXIT, code);
90970
90971@@ -739,6 +744,9 @@ void do_exit(long code)
90972 tsk->exit_code = code;
90973 taskstats_exit(tsk, group_dead);
90974
90975+ gr_acl_handle_psacct(tsk, code);
90976+ gr_acl_handle_exit();
90977+
90978 exit_mm(tsk);
90979
90980 if (group_dead)
90981@@ -858,7 +866,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
90982 * Take down every thread in the group. This is called by fatal signals
90983 * as well as by sys_exit_group (below).
90984 */
90985-void
90986+__noreturn void
90987 do_group_exit(int exit_code)
90988 {
90989 struct signal_struct *sig = current->signal;
90990diff --git a/kernel/fork.c b/kernel/fork.c
90991index 6a13c46..a623c8e 100644
90992--- a/kernel/fork.c
90993+++ b/kernel/fork.c
90994@@ -183,6 +183,48 @@ void thread_info_cache_init(void)
90995 # endif
90996 #endif
90997
90998+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
90999+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
91000+ int node, void **lowmem_stack)
91001+{
91002+ struct page *pages[THREAD_SIZE / PAGE_SIZE];
91003+ void *ret = NULL;
91004+ unsigned int i;
91005+
91006+ *lowmem_stack = alloc_thread_info_node(tsk, node);
91007+ if (*lowmem_stack == NULL)
91008+ goto out;
91009+
91010+ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
91011+ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
91012+
91013+ /* use VM_IOREMAP to gain THREAD_SIZE alignment */
91014+ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
91015+ if (ret == NULL) {
91016+ free_thread_info(*lowmem_stack);
91017+ *lowmem_stack = NULL;
91018+ }
91019+
91020+out:
91021+ return ret;
91022+}
91023+
91024+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
91025+{
91026+ unmap_process_stacks(tsk);
91027+}
91028+#else
91029+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
91030+ int node, void **lowmem_stack)
91031+{
91032+ return alloc_thread_info_node(tsk, node);
91033+}
91034+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
91035+{
91036+ free_thread_info(ti);
91037+}
91038+#endif
91039+
91040 /* SLAB cache for signal_struct structures (tsk->signal) */
91041 static struct kmem_cache *signal_cachep;
91042
91043@@ -201,18 +243,22 @@ struct kmem_cache *vm_area_cachep;
91044 /* SLAB cache for mm_struct structures (tsk->mm) */
91045 static struct kmem_cache *mm_cachep;
91046
91047-static void account_kernel_stack(struct thread_info *ti, int account)
91048+static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
91049 {
91050+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
91051+ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
91052+#else
91053 struct zone *zone = page_zone(virt_to_page(ti));
91054+#endif
91055
91056 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
91057 }
91058
91059 void free_task(struct task_struct *tsk)
91060 {
91061- account_kernel_stack(tsk->stack, -1);
91062+ account_kernel_stack(tsk, tsk->stack, -1);
91063 arch_release_thread_info(tsk->stack);
91064- free_thread_info(tsk->stack);
91065+ gr_free_thread_info(tsk, tsk->stack);
91066 rt_mutex_debug_task_free(tsk);
91067 ftrace_graph_exit_task(tsk);
91068 put_seccomp_filter(tsk);
91069@@ -299,6 +345,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
91070 struct task_struct *tsk;
91071 struct thread_info *ti;
91072 unsigned long *stackend;
91073+ void *lowmem_stack;
91074 int node = tsk_fork_get_node(orig);
91075 int err;
91076
91077@@ -306,7 +353,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
91078 if (!tsk)
91079 return NULL;
91080
91081- ti = alloc_thread_info_node(tsk, node);
91082+ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
91083 if (!ti)
91084 goto free_tsk;
91085
91086@@ -315,6 +362,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
91087 goto free_ti;
91088
91089 tsk->stack = ti;
91090+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
91091+ tsk->lowmem_stack = lowmem_stack;
91092+#endif
91093
91094 setup_thread_stack(tsk, orig);
91095 clear_user_return_notifier(tsk);
91096@@ -323,7 +373,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
91097 *stackend = STACK_END_MAGIC; /* for overflow detection */
91098
91099 #ifdef CONFIG_CC_STACKPROTECTOR
91100- tsk->stack_canary = get_random_int();
91101+ tsk->stack_canary = pax_get_random_long();
91102 #endif
91103
91104 /*
91105@@ -337,24 +387,92 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
91106 tsk->splice_pipe = NULL;
91107 tsk->task_frag.page = NULL;
91108
91109- account_kernel_stack(ti, 1);
91110+ account_kernel_stack(tsk, ti, 1);
91111
91112 return tsk;
91113
91114 free_ti:
91115- free_thread_info(ti);
91116+ gr_free_thread_info(tsk, ti);
91117 free_tsk:
91118 free_task_struct(tsk);
91119 return NULL;
91120 }
91121
91122 #ifdef CONFIG_MMU
91123-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
91124+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
91125+{
91126+ struct vm_area_struct *tmp;
91127+ unsigned long charge;
91128+ struct file *file;
91129+ int retval;
91130+
91131+ charge = 0;
91132+ if (mpnt->vm_flags & VM_ACCOUNT) {
91133+ unsigned long len = vma_pages(mpnt);
91134+
91135+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
91136+ goto fail_nomem;
91137+ charge = len;
91138+ }
91139+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
91140+ if (!tmp)
91141+ goto fail_nomem;
91142+ *tmp = *mpnt;
91143+ tmp->vm_mm = mm;
91144+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
91145+ retval = vma_dup_policy(mpnt, tmp);
91146+ if (retval)
91147+ goto fail_nomem_policy;
91148+ if (anon_vma_fork(tmp, mpnt))
91149+ goto fail_nomem_anon_vma_fork;
91150+ tmp->vm_flags &= ~VM_LOCKED;
91151+ tmp->vm_next = tmp->vm_prev = NULL;
91152+ tmp->vm_mirror = NULL;
91153+ file = tmp->vm_file;
91154+ if (file) {
91155+ struct inode *inode = file_inode(file);
91156+ struct address_space *mapping = file->f_mapping;
91157+
91158+ get_file(file);
91159+ if (tmp->vm_flags & VM_DENYWRITE)
91160+ atomic_dec(&inode->i_writecount);
91161+ mutex_lock(&mapping->i_mmap_mutex);
91162+ if (tmp->vm_flags & VM_SHARED)
91163+ mapping->i_mmap_writable++;
91164+ flush_dcache_mmap_lock(mapping);
91165+ /* insert tmp into the share list, just after mpnt */
91166+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
91167+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
91168+ else
91169+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
91170+ flush_dcache_mmap_unlock(mapping);
91171+ mutex_unlock(&mapping->i_mmap_mutex);
91172+ }
91173+
91174+ /*
91175+ * Clear hugetlb-related page reserves for children. This only
91176+ * affects MAP_PRIVATE mappings. Faults generated by the child
91177+ * are not guaranteed to succeed, even if read-only
91178+ */
91179+ if (is_vm_hugetlb_page(tmp))
91180+ reset_vma_resv_huge_pages(tmp);
91181+
91182+ return tmp;
91183+
91184+fail_nomem_anon_vma_fork:
91185+ mpol_put(vma_policy(tmp));
91186+fail_nomem_policy:
91187+ kmem_cache_free(vm_area_cachep, tmp);
91188+fail_nomem:
91189+ vm_unacct_memory(charge);
91190+ return NULL;
91191+}
91192+
91193+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
91194 {
91195 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
91196 struct rb_node **rb_link, *rb_parent;
91197 int retval;
91198- unsigned long charge;
91199
91200 uprobe_start_dup_mmap();
91201 down_write(&oldmm->mmap_sem);
91202@@ -383,55 +501,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
91203
91204 prev = NULL;
91205 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
91206- struct file *file;
91207-
91208 if (mpnt->vm_flags & VM_DONTCOPY) {
91209 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
91210 -vma_pages(mpnt));
91211 continue;
91212 }
91213- charge = 0;
91214- if (mpnt->vm_flags & VM_ACCOUNT) {
91215- unsigned long len = vma_pages(mpnt);
91216-
91217- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
91218- goto fail_nomem;
91219- charge = len;
91220- }
91221- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
91222- if (!tmp)
91223- goto fail_nomem;
91224- *tmp = *mpnt;
91225- INIT_LIST_HEAD(&tmp->anon_vma_chain);
91226- retval = vma_dup_policy(mpnt, tmp);
91227- if (retval)
91228- goto fail_nomem_policy;
91229- tmp->vm_mm = mm;
91230- if (anon_vma_fork(tmp, mpnt))
91231- goto fail_nomem_anon_vma_fork;
91232- tmp->vm_flags &= ~VM_LOCKED;
91233- tmp->vm_next = tmp->vm_prev = NULL;
91234- file = tmp->vm_file;
91235- if (file) {
91236- struct inode *inode = file_inode(file);
91237- struct address_space *mapping = file->f_mapping;
91238-
91239- get_file(file);
91240- if (tmp->vm_flags & VM_DENYWRITE)
91241- atomic_dec(&inode->i_writecount);
91242- mutex_lock(&mapping->i_mmap_mutex);
91243- if (tmp->vm_flags & VM_SHARED)
91244- mapping->i_mmap_writable++;
91245- flush_dcache_mmap_lock(mapping);
91246- /* insert tmp into the share list, just after mpnt */
91247- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
91248- vma_nonlinear_insert(tmp,
91249- &mapping->i_mmap_nonlinear);
91250- else
91251- vma_interval_tree_insert_after(tmp, mpnt,
91252- &mapping->i_mmap);
91253- flush_dcache_mmap_unlock(mapping);
91254- mutex_unlock(&mapping->i_mmap_mutex);
91255+ tmp = dup_vma(mm, oldmm, mpnt);
91256+ if (!tmp) {
91257+ retval = -ENOMEM;
91258+ goto out;
91259 }
91260
91261 /*
91262@@ -463,6 +541,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
91263 if (retval)
91264 goto out;
91265 }
91266+
91267+#ifdef CONFIG_PAX_SEGMEXEC
91268+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
91269+ struct vm_area_struct *mpnt_m;
91270+
91271+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
91272+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
91273+
91274+ if (!mpnt->vm_mirror)
91275+ continue;
91276+
91277+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
91278+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
91279+ mpnt->vm_mirror = mpnt_m;
91280+ } else {
91281+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
91282+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
91283+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
91284+ mpnt->vm_mirror->vm_mirror = mpnt;
91285+ }
91286+ }
91287+ BUG_ON(mpnt_m);
91288+ }
91289+#endif
91290+
91291 /* a new mm has just been created */
91292 arch_dup_mmap(oldmm, mm);
91293 retval = 0;
91294@@ -472,14 +575,6 @@ out:
91295 up_write(&oldmm->mmap_sem);
91296 uprobe_end_dup_mmap();
91297 return retval;
91298-fail_nomem_anon_vma_fork:
91299- mpol_put(vma_policy(tmp));
91300-fail_nomem_policy:
91301- kmem_cache_free(vm_area_cachep, tmp);
91302-fail_nomem:
91303- retval = -ENOMEM;
91304- vm_unacct_memory(charge);
91305- goto out;
91306 }
91307
91308 static inline int mm_alloc_pgd(struct mm_struct *mm)
91309@@ -698,8 +793,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
91310 return ERR_PTR(err);
91311
91312 mm = get_task_mm(task);
91313- if (mm && mm != current->mm &&
91314- !ptrace_may_access(task, mode)) {
91315+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
91316+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
91317 mmput(mm);
91318 mm = ERR_PTR(-EACCES);
91319 }
91320@@ -918,13 +1013,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
91321 spin_unlock(&fs->lock);
91322 return -EAGAIN;
91323 }
91324- fs->users++;
91325+ atomic_inc(&fs->users);
91326 spin_unlock(&fs->lock);
91327 return 0;
91328 }
91329 tsk->fs = copy_fs_struct(fs);
91330 if (!tsk->fs)
91331 return -ENOMEM;
91332+ /* Carry through gr_chroot_dentry and is_chrooted instead
91333+ of recomputing it here. Already copied when the task struct
91334+ is duplicated. This allows pivot_root to not be treated as
91335+ a chroot
91336+ */
91337+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
91338+
91339 return 0;
91340 }
91341
91342@@ -1133,7 +1235,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
91343 * parts of the process environment (as per the clone
91344 * flags). The actual kick-off is left to the caller.
91345 */
91346-static struct task_struct *copy_process(unsigned long clone_flags,
91347+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
91348 unsigned long stack_start,
91349 unsigned long stack_size,
91350 int __user *child_tidptr,
91351@@ -1205,6 +1307,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
91352 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
91353 #endif
91354 retval = -EAGAIN;
91355+
91356+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
91357+
91358 if (atomic_read(&p->real_cred->user->processes) >=
91359 task_rlimit(p, RLIMIT_NPROC)) {
91360 if (p->real_cred->user != INIT_USER &&
91361@@ -1452,6 +1557,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
91362 goto bad_fork_free_pid;
91363 }
91364
91365+ /* synchronizes with gr_set_acls()
91366+ we need to call this past the point of no return for fork()
91367+ */
91368+ gr_copy_label(p);
91369+
91370 if (likely(p->pid)) {
91371 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
91372
91373@@ -1541,6 +1651,8 @@ bad_fork_cleanup_count:
91374 bad_fork_free:
91375 free_task(p);
91376 fork_out:
91377+ gr_log_forkfail(retval);
91378+
91379 return ERR_PTR(retval);
91380 }
91381
91382@@ -1602,6 +1714,7 @@ long do_fork(unsigned long clone_flags,
91383
91384 p = copy_process(clone_flags, stack_start, stack_size,
91385 child_tidptr, NULL, trace);
91386+ add_latent_entropy();
91387 /*
91388 * Do this prior waking up the new thread - the thread pointer
91389 * might get invalid after that point, if the thread exits quickly.
91390@@ -1618,6 +1731,8 @@ long do_fork(unsigned long clone_flags,
91391 if (clone_flags & CLONE_PARENT_SETTID)
91392 put_user(nr, parent_tidptr);
91393
91394+ gr_handle_brute_check();
91395+
91396 if (clone_flags & CLONE_VFORK) {
91397 p->vfork_done = &vfork;
91398 init_completion(&vfork);
91399@@ -1736,7 +1851,7 @@ void __init proc_caches_init(void)
91400 mm_cachep = kmem_cache_create("mm_struct",
91401 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
91402 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
91403- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
91404+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
91405 mmap_init();
91406 nsproxy_cache_init();
91407 }
91408@@ -1776,7 +1891,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
91409 return 0;
91410
91411 /* don't need lock here; in the worst case we'll do useless copy */
91412- if (fs->users == 1)
91413+ if (atomic_read(&fs->users) == 1)
91414 return 0;
91415
91416 *new_fsp = copy_fs_struct(fs);
91417@@ -1883,7 +1998,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
91418 fs = current->fs;
91419 spin_lock(&fs->lock);
91420 current->fs = new_fs;
91421- if (--fs->users)
91422+ gr_set_chroot_entries(current, &current->fs->root);
91423+ if (atomic_dec_return(&fs->users))
91424 new_fs = NULL;
91425 else
91426 new_fs = fs;
91427diff --git a/kernel/futex.c b/kernel/futex.c
91428index b632b5f..ca00da9 100644
91429--- a/kernel/futex.c
91430+++ b/kernel/futex.c
91431@@ -202,7 +202,7 @@ struct futex_pi_state {
91432 atomic_t refcount;
91433
91434 union futex_key key;
91435-};
91436+} __randomize_layout;
91437
91438 /**
91439 * struct futex_q - The hashed futex queue entry, one per waiting task
91440@@ -236,7 +236,7 @@ struct futex_q {
91441 struct rt_mutex_waiter *rt_waiter;
91442 union futex_key *requeue_pi_key;
91443 u32 bitset;
91444-};
91445+} __randomize_layout;
91446
91447 static const struct futex_q futex_q_init = {
91448 /* list gets initialized in queue_me()*/
91449@@ -394,6 +394,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
91450 struct page *page, *page_head;
91451 int err, ro = 0;
91452
91453+#ifdef CONFIG_PAX_SEGMEXEC
91454+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
91455+ return -EFAULT;
91456+#endif
91457+
91458 /*
91459 * The futex address must be "naturally" aligned.
91460 */
91461@@ -593,7 +598,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
91462
91463 static int get_futex_value_locked(u32 *dest, u32 __user *from)
91464 {
91465- int ret;
91466+ unsigned long ret;
91467
91468 pagefault_disable();
91469 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
91470@@ -3033,6 +3038,7 @@ static void __init futex_detect_cmpxchg(void)
91471 {
91472 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
91473 u32 curval;
91474+ mm_segment_t oldfs;
91475
91476 /*
91477 * This will fail and we want it. Some arch implementations do
91478@@ -3044,8 +3050,11 @@ static void __init futex_detect_cmpxchg(void)
91479 * implementation, the non-functional ones will return
91480 * -ENOSYS.
91481 */
91482+ oldfs = get_fs();
91483+ set_fs(USER_DS);
91484 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
91485 futex_cmpxchg_enabled = 1;
91486+ set_fs(oldfs);
91487 #endif
91488 }
91489
91490diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
91491index 55c8c93..9ba7ad6 100644
91492--- a/kernel/futex_compat.c
91493+++ b/kernel/futex_compat.c
91494@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
91495 return 0;
91496 }
91497
91498-static void __user *futex_uaddr(struct robust_list __user *entry,
91499+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
91500 compat_long_t futex_offset)
91501 {
91502 compat_uptr_t base = ptr_to_compat(entry);
91503diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
91504index b358a80..fc25240 100644
91505--- a/kernel/gcov/base.c
91506+++ b/kernel/gcov/base.c
91507@@ -114,11 +114,6 @@ void gcov_enable_events(void)
91508 }
91509
91510 #ifdef CONFIG_MODULES
91511-static inline int within(void *addr, void *start, unsigned long size)
91512-{
91513- return ((addr >= start) && (addr < start + size));
91514-}
91515-
91516 /* Update list and generate events when modules are unloaded. */
91517 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
91518 void *data)
91519@@ -133,7 +128,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
91520
91521 /* Remove entries located in module from linked list. */
91522 while ((info = gcov_info_next(info))) {
91523- if (within(info, mod->module_core, mod->core_size)) {
91524+ if (within_module_core_rw((unsigned long)info, mod)) {
91525 gcov_info_unlink(prev, info);
91526 if (gcov_events_enabled)
91527 gcov_event(GCOV_REMOVE, info);
91528diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
91529index 3ab2899..c6ad010 100644
91530--- a/kernel/hrtimer.c
91531+++ b/kernel/hrtimer.c
91532@@ -1449,7 +1449,7 @@ void hrtimer_peek_ahead_timers(void)
91533 local_irq_restore(flags);
91534 }
91535
91536-static void run_hrtimer_softirq(struct softirq_action *h)
91537+static __latent_entropy void run_hrtimer_softirq(void)
91538 {
91539 hrtimer_peek_ahead_timers();
91540 }
91541diff --git a/kernel/irq_work.c b/kernel/irq_work.c
91542index a82170e..5b01e7f 100644
91543--- a/kernel/irq_work.c
91544+++ b/kernel/irq_work.c
91545@@ -191,12 +191,13 @@ static int irq_work_cpu_notify(struct notifier_block *self,
91546 return NOTIFY_OK;
91547 }
91548
91549-static struct notifier_block cpu_notify;
91550+static struct notifier_block cpu_notify = {
91551+ .notifier_call = irq_work_cpu_notify,
91552+ .priority = 0,
91553+};
91554
91555 static __init int irq_work_init_cpu_notifier(void)
91556 {
91557- cpu_notify.notifier_call = irq_work_cpu_notify;
91558- cpu_notify.priority = 0;
91559 register_cpu_notifier(&cpu_notify);
91560 return 0;
91561 }
91562diff --git a/kernel/jump_label.c b/kernel/jump_label.c
91563index 9019f15..9a3c42e 100644
91564--- a/kernel/jump_label.c
91565+++ b/kernel/jump_label.c
91566@@ -14,6 +14,7 @@
91567 #include <linux/err.h>
91568 #include <linux/static_key.h>
91569 #include <linux/jump_label_ratelimit.h>
91570+#include <linux/mm.h>
91571
91572 #ifdef HAVE_JUMP_LABEL
91573
91574@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
91575
91576 size = (((unsigned long)stop - (unsigned long)start)
91577 / sizeof(struct jump_entry));
91578+ pax_open_kernel();
91579 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
91580+ pax_close_kernel();
91581 }
91582
91583 static void jump_label_update(struct static_key *key, int enable);
91584@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
91585 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
91586 struct jump_entry *iter;
91587
91588+ pax_open_kernel();
91589 for (iter = iter_start; iter < iter_stop; iter++) {
91590 if (within_module_init(iter->code, mod))
91591 iter->code = 0;
91592 }
91593+ pax_close_kernel();
91594 }
91595
91596 static int
91597diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
91598index cb0cf37..b69e161 100644
91599--- a/kernel/kallsyms.c
91600+++ b/kernel/kallsyms.c
91601@@ -11,6 +11,9 @@
91602 * Changed the compression method from stem compression to "table lookup"
91603 * compression (see scripts/kallsyms.c for a more complete description)
91604 */
91605+#ifdef CONFIG_GRKERNSEC_HIDESYM
91606+#define __INCLUDED_BY_HIDESYM 1
91607+#endif
91608 #include <linux/kallsyms.h>
91609 #include <linux/module.h>
91610 #include <linux/init.h>
91611@@ -54,12 +57,33 @@ extern const unsigned long kallsyms_markers[] __weak;
91612
91613 static inline int is_kernel_inittext(unsigned long addr)
91614 {
91615+ if (system_state != SYSTEM_BOOTING)
91616+ return 0;
91617+
91618 if (addr >= (unsigned long)_sinittext
91619 && addr <= (unsigned long)_einittext)
91620 return 1;
91621 return 0;
91622 }
91623
91624+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91625+#ifdef CONFIG_MODULES
91626+static inline int is_module_text(unsigned long addr)
91627+{
91628+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
91629+ return 1;
91630+
91631+ addr = ktla_ktva(addr);
91632+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
91633+}
91634+#else
91635+static inline int is_module_text(unsigned long addr)
91636+{
91637+ return 0;
91638+}
91639+#endif
91640+#endif
91641+
91642 static inline int is_kernel_text(unsigned long addr)
91643 {
91644 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
91645@@ -70,13 +94,28 @@ static inline int is_kernel_text(unsigned long addr)
91646
91647 static inline int is_kernel(unsigned long addr)
91648 {
91649+
91650+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91651+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
91652+ return 1;
91653+
91654+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
91655+#else
91656 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
91657+#endif
91658+
91659 return 1;
91660 return in_gate_area_no_mm(addr);
91661 }
91662
91663 static int is_ksym_addr(unsigned long addr)
91664 {
91665+
91666+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91667+ if (is_module_text(addr))
91668+ return 0;
91669+#endif
91670+
91671 if (all_var)
91672 return is_kernel(addr);
91673
91674@@ -481,7 +520,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
91675
91676 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
91677 {
91678- iter->name[0] = '\0';
91679 iter->nameoff = get_symbol_offset(new_pos);
91680 iter->pos = new_pos;
91681 }
91682@@ -529,6 +567,11 @@ static int s_show(struct seq_file *m, void *p)
91683 {
91684 struct kallsym_iter *iter = m->private;
91685
91686+#ifdef CONFIG_GRKERNSEC_HIDESYM
91687+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
91688+ return 0;
91689+#endif
91690+
91691 /* Some debugging symbols have no name. Ignore them. */
91692 if (!iter->name[0])
91693 return 0;
91694@@ -542,6 +585,7 @@ static int s_show(struct seq_file *m, void *p)
91695 */
91696 type = iter->exported ? toupper(iter->type) :
91697 tolower(iter->type);
91698+
91699 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
91700 type, iter->name, iter->module_name);
91701 } else
91702@@ -567,7 +611,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
91703 struct kallsym_iter *iter;
91704 int ret;
91705
91706- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
91707+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
91708 if (!iter)
91709 return -ENOMEM;
91710 reset_iter(iter, 0);
91711diff --git a/kernel/kcmp.c b/kernel/kcmp.c
91712index e30ac0f..3528cac 100644
91713--- a/kernel/kcmp.c
91714+++ b/kernel/kcmp.c
91715@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
91716 struct task_struct *task1, *task2;
91717 int ret;
91718
91719+#ifdef CONFIG_GRKERNSEC
91720+ return -ENOSYS;
91721+#endif
91722+
91723 rcu_read_lock();
91724
91725 /*
91726diff --git a/kernel/kexec.c b/kernel/kexec.c
91727index 4b8f0c9..fffd0df 100644
91728--- a/kernel/kexec.c
91729+++ b/kernel/kexec.c
91730@@ -1045,7 +1045,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
91731 compat_ulong_t, flags)
91732 {
91733 struct compat_kexec_segment in;
91734- struct kexec_segment out, __user *ksegments;
91735+ struct kexec_segment out;
91736+ struct kexec_segment __user *ksegments;
91737 unsigned long i, result;
91738
91739 /* Don't allow clients that don't understand the native
91740diff --git a/kernel/kmod.c b/kernel/kmod.c
91741index 8637e04..8b1d0d8 100644
91742--- a/kernel/kmod.c
91743+++ b/kernel/kmod.c
91744@@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
91745 kfree(info->argv);
91746 }
91747
91748-static int call_modprobe(char *module_name, int wait)
91749+static int call_modprobe(char *module_name, char *module_param, int wait)
91750 {
91751 struct subprocess_info *info;
91752 static char *envp[] = {
91753@@ -85,7 +85,7 @@ static int call_modprobe(char *module_name, int wait)
91754 NULL
91755 };
91756
91757- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
91758+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
91759 if (!argv)
91760 goto out;
91761
91762@@ -97,7 +97,8 @@ static int call_modprobe(char *module_name, int wait)
91763 argv[1] = "-q";
91764 argv[2] = "--";
91765 argv[3] = module_name; /* check free_modprobe_argv() */
91766- argv[4] = NULL;
91767+ argv[4] = module_param;
91768+ argv[5] = NULL;
91769
91770 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
91771 NULL, free_modprobe_argv, NULL);
91772@@ -129,9 +130,8 @@ out:
91773 * If module auto-loading support is disabled then this function
91774 * becomes a no-operation.
91775 */
91776-int __request_module(bool wait, const char *fmt, ...)
91777+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
91778 {
91779- va_list args;
91780 char module_name[MODULE_NAME_LEN];
91781 unsigned int max_modprobes;
91782 int ret;
91783@@ -150,9 +150,7 @@ int __request_module(bool wait, const char *fmt, ...)
91784 if (!modprobe_path[0])
91785 return 0;
91786
91787- va_start(args, fmt);
91788- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
91789- va_end(args);
91790+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
91791 if (ret >= MODULE_NAME_LEN)
91792 return -ENAMETOOLONG;
91793
91794@@ -160,6 +158,20 @@ int __request_module(bool wait, const char *fmt, ...)
91795 if (ret)
91796 return ret;
91797
91798+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91799+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
91800+ /* hack to workaround consolekit/udisks stupidity */
91801+ read_lock(&tasklist_lock);
91802+ if (!strcmp(current->comm, "mount") &&
91803+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
91804+ read_unlock(&tasklist_lock);
91805+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
91806+ return -EPERM;
91807+ }
91808+ read_unlock(&tasklist_lock);
91809+ }
91810+#endif
91811+
91812 /* If modprobe needs a service that is in a module, we get a recursive
91813 * loop. Limit the number of running kmod threads to max_threads/2 or
91814 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
91815@@ -188,11 +200,52 @@ int __request_module(bool wait, const char *fmt, ...)
91816
91817 trace_module_request(module_name, wait, _RET_IP_);
91818
91819- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
91820+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
91821
91822 atomic_dec(&kmod_concurrent);
91823 return ret;
91824 }
91825+
91826+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
91827+{
91828+ va_list args;
91829+ int ret;
91830+
91831+ va_start(args, fmt);
91832+ ret = ____request_module(wait, module_param, fmt, args);
91833+ va_end(args);
91834+
91835+ return ret;
91836+}
91837+
91838+int __request_module(bool wait, const char *fmt, ...)
91839+{
91840+ va_list args;
91841+ int ret;
91842+
91843+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91844+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
91845+ char module_param[MODULE_NAME_LEN];
91846+
91847+ memset(module_param, 0, sizeof(module_param));
91848+
91849+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
91850+
91851+ va_start(args, fmt);
91852+ ret = ____request_module(wait, module_param, fmt, args);
91853+ va_end(args);
91854+
91855+ return ret;
91856+ }
91857+#endif
91858+
91859+ va_start(args, fmt);
91860+ ret = ____request_module(wait, NULL, fmt, args);
91861+ va_end(args);
91862+
91863+ return ret;
91864+}
91865+
91866 EXPORT_SYMBOL(__request_module);
91867 #endif /* CONFIG_MODULES */
91868
91869@@ -218,6 +271,20 @@ static int ____call_usermodehelper(void *data)
91870 */
91871 set_user_nice(current, 0);
91872
91873+#ifdef CONFIG_GRKERNSEC
91874+ /* this is race-free as far as userland is concerned as we copied
91875+ out the path to be used prior to this point and are now operating
91876+ on that copy
91877+ */
91878+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
91879+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
91880+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
91881+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of /sbin and system library paths\n", sub_info->path);
91882+ retval = -EPERM;
91883+ goto fail;
91884+ }
91885+#endif
91886+
91887 retval = -ENOMEM;
91888 new = prepare_kernel_cred(current);
91889 if (!new)
91890@@ -240,8 +307,8 @@ static int ____call_usermodehelper(void *data)
91891 commit_creds(new);
91892
91893 retval = do_execve(getname_kernel(sub_info->path),
91894- (const char __user *const __user *)sub_info->argv,
91895- (const char __user *const __user *)sub_info->envp);
91896+ (const char __user *const __force_user *)sub_info->argv,
91897+ (const char __user *const __force_user *)sub_info->envp);
91898 if (!retval)
91899 return 0;
91900
91901@@ -260,6 +327,10 @@ static int call_helper(void *data)
91902
91903 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
91904 {
91905+#ifdef CONFIG_GRKERNSEC
91906+ kfree(info->path);
91907+ info->path = info->origpath;
91908+#endif
91909 if (info->cleanup)
91910 (*info->cleanup)(info);
91911 kfree(info);
91912@@ -300,7 +371,7 @@ static int wait_for_helper(void *data)
91913 *
91914 * Thus the __user pointer cast is valid here.
91915 */
91916- sys_wait4(pid, (int __user *)&ret, 0, NULL);
91917+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
91918
91919 /*
91920 * If ret is 0, either ____call_usermodehelper failed and the
91921@@ -539,7 +610,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
91922 goto out;
91923
91924 INIT_WORK(&sub_info->work, __call_usermodehelper);
91925+#ifdef CONFIG_GRKERNSEC
91926+ sub_info->origpath = path;
91927+ sub_info->path = kstrdup(path, gfp_mask);
91928+#else
91929 sub_info->path = path;
91930+#endif
91931 sub_info->argv = argv;
91932 sub_info->envp = envp;
91933
91934@@ -647,7 +723,7 @@ EXPORT_SYMBOL(call_usermodehelper);
91935 static int proc_cap_handler(struct ctl_table *table, int write,
91936 void __user *buffer, size_t *lenp, loff_t *ppos)
91937 {
91938- struct ctl_table t;
91939+ ctl_table_no_const t;
91940 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
91941 kernel_cap_t new_cap;
91942 int err, i;
91943diff --git a/kernel/kprobes.c b/kernel/kprobes.c
91944index 734e9a7..0a313b8 100644
91945--- a/kernel/kprobes.c
91946+++ b/kernel/kprobes.c
91947@@ -31,6 +31,9 @@
91948 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
91949 * <prasanna@in.ibm.com> added function-return probes.
91950 */
91951+#ifdef CONFIG_GRKERNSEC_HIDESYM
91952+#define __INCLUDED_BY_HIDESYM 1
91953+#endif
91954 #include <linux/kprobes.h>
91955 #include <linux/hash.h>
91956 #include <linux/init.h>
91957@@ -122,12 +125,12 @@ enum kprobe_slot_state {
91958
91959 static void *alloc_insn_page(void)
91960 {
91961- return module_alloc(PAGE_SIZE);
91962+ return module_alloc_exec(PAGE_SIZE);
91963 }
91964
91965 static void free_insn_page(void *page)
91966 {
91967- module_free(NULL, page);
91968+ module_free_exec(NULL, page);
91969 }
91970
91971 struct kprobe_insn_cache kprobe_insn_slots = {
91972@@ -2176,11 +2179,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
91973 kprobe_type = "k";
91974
91975 if (sym)
91976- seq_printf(pi, "%p %s %s+0x%x %s ",
91977+ seq_printf(pi, "%pK %s %s+0x%x %s ",
91978 p->addr, kprobe_type, sym, offset,
91979 (modname ? modname : " "));
91980 else
91981- seq_printf(pi, "%p %s %p ",
91982+ seq_printf(pi, "%pK %s %pK ",
91983 p->addr, kprobe_type, p->addr);
91984
91985 if (!pp)
91986diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
91987index 6683cce..daf8999 100644
91988--- a/kernel/ksysfs.c
91989+++ b/kernel/ksysfs.c
91990@@ -50,6 +50,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
91991 {
91992 if (count+1 > UEVENT_HELPER_PATH_LEN)
91993 return -ENOENT;
91994+ if (!capable(CAP_SYS_ADMIN))
91995+ return -EPERM;
91996 memcpy(uevent_helper, buf, count);
91997 uevent_helper[count] = '\0';
91998 if (count && uevent_helper[count-1] == '\n')
91999@@ -176,7 +178,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
92000 return count;
92001 }
92002
92003-static struct bin_attribute notes_attr = {
92004+static bin_attribute_no_const notes_attr __read_only = {
92005 .attr = {
92006 .name = "notes",
92007 .mode = S_IRUGO,
92008diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
92009index d24e433..fa04fb8 100644
92010--- a/kernel/locking/lockdep.c
92011+++ b/kernel/locking/lockdep.c
92012@@ -597,6 +597,10 @@ static int static_obj(void *obj)
92013 end = (unsigned long) &_end,
92014 addr = (unsigned long) obj;
92015
92016+#ifdef CONFIG_PAX_KERNEXEC
92017+ start = ktla_ktva(start);
92018+#endif
92019+
92020 /*
92021 * static variable?
92022 */
92023@@ -738,6 +742,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
92024 if (!static_obj(lock->key)) {
92025 debug_locks_off();
92026 printk("INFO: trying to register non-static key.\n");
92027+ printk("lock:%pS key:%pS.\n", lock, lock->key);
92028 printk("the code is fine but needs lockdep annotation.\n");
92029 printk("turning off the locking correctness validator.\n");
92030 dump_stack();
92031@@ -3079,7 +3084,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
92032 if (!class)
92033 return 0;
92034 }
92035- atomic_inc((atomic_t *)&class->ops);
92036+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
92037 if (very_verbose(class)) {
92038 printk("\nacquire class [%p] %s", class->key, class->name);
92039 if (class->name_version > 1)
92040diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
92041index ef43ac4..2720dfa 100644
92042--- a/kernel/locking/lockdep_proc.c
92043+++ b/kernel/locking/lockdep_proc.c
92044@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
92045 return 0;
92046 }
92047
92048- seq_printf(m, "%p", class->key);
92049+ seq_printf(m, "%pK", class->key);
92050 #ifdef CONFIG_DEBUG_LOCKDEP
92051 seq_printf(m, " OPS:%8ld", class->ops);
92052 #endif
92053@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
92054
92055 list_for_each_entry(entry, &class->locks_after, entry) {
92056 if (entry->distance == 1) {
92057- seq_printf(m, " -> [%p] ", entry->class->key);
92058+ seq_printf(m, " -> [%pK] ", entry->class->key);
92059 print_name(m, entry->class);
92060 seq_puts(m, "\n");
92061 }
92062@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
92063 if (!class->key)
92064 continue;
92065
92066- seq_printf(m, "[%p] ", class->key);
92067+ seq_printf(m, "[%pK] ", class->key);
92068 print_name(m, class);
92069 seq_puts(m, "\n");
92070 }
92071@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
92072 if (!i)
92073 seq_line(m, '-', 40-namelen, namelen);
92074
92075- snprintf(ip, sizeof(ip), "[<%p>]",
92076+ snprintf(ip, sizeof(ip), "[<%pK>]",
92077 (void *)class->contention_point[i]);
92078 seq_printf(m, "%40s %14lu %29s %pS\n",
92079 name, stats->contention_point[i],
92080@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
92081 if (!i)
92082 seq_line(m, '-', 40-namelen, namelen);
92083
92084- snprintf(ip, sizeof(ip), "[<%p>]",
92085+ snprintf(ip, sizeof(ip), "[<%pK>]",
92086 (void *)class->contending_point[i]);
92087 seq_printf(m, "%40s %14lu %29s %pS\n",
92088 name, stats->contending_point[i],
92089diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
92090index be9ee15..39d6233 100644
92091--- a/kernel/locking/mcs_spinlock.c
92092+++ b/kernel/locking/mcs_spinlock.c
92093@@ -102,7 +102,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
92094
92095 prev = decode_cpu(old);
92096 node->prev = prev;
92097- ACCESS_ONCE(prev->next) = node;
92098+ ACCESS_ONCE_RW(prev->next) = node;
92099
92100 /*
92101 * Normally @prev is untouchable after the above store; because at that
92102@@ -174,8 +174,8 @@ unqueue:
92103 * it will wait in Step-A.
92104 */
92105
92106- ACCESS_ONCE(next->prev) = prev;
92107- ACCESS_ONCE(prev->next) = next;
92108+ ACCESS_ONCE_RW(next->prev) = prev;
92109+ ACCESS_ONCE_RW(prev->next) = next;
92110
92111 return false;
92112 }
92113@@ -197,13 +197,13 @@ void osq_unlock(struct optimistic_spin_queue *lock)
92114 node = this_cpu_ptr(&osq_node);
92115 next = xchg(&node->next, NULL);
92116 if (next) {
92117- ACCESS_ONCE(next->locked) = 1;
92118+ ACCESS_ONCE_RW(next->locked) = 1;
92119 return;
92120 }
92121
92122 next = osq_wait_next(lock, node, NULL);
92123 if (next)
92124- ACCESS_ONCE(next->locked) = 1;
92125+ ACCESS_ONCE_RW(next->locked) = 1;
92126 }
92127
92128 #endif
92129diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
92130index 74356dc..48dd5e1 100644
92131--- a/kernel/locking/mcs_spinlock.h
92132+++ b/kernel/locking/mcs_spinlock.h
92133@@ -81,7 +81,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
92134 */
92135 return;
92136 }
92137- ACCESS_ONCE(prev->next) = node;
92138+ ACCESS_ONCE_RW(prev->next) = node;
92139
92140 /* Wait until the lock holder passes the lock down. */
92141 arch_mcs_spin_lock_contended(&node->locked);
92142diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
92143index 5cf6731..ce3bc5a 100644
92144--- a/kernel/locking/mutex-debug.c
92145+++ b/kernel/locking/mutex-debug.c
92146@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
92147 }
92148
92149 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
92150- struct thread_info *ti)
92151+ struct task_struct *task)
92152 {
92153 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
92154
92155 /* Mark the current thread as blocked on the lock: */
92156- ti->task->blocked_on = waiter;
92157+ task->blocked_on = waiter;
92158 }
92159
92160 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
92161- struct thread_info *ti)
92162+ struct task_struct *task)
92163 {
92164 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
92165- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
92166- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
92167- ti->task->blocked_on = NULL;
92168+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
92169+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
92170+ task->blocked_on = NULL;
92171
92172 list_del_init(&waiter->list);
92173 waiter->task = NULL;
92174diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
92175index 0799fd3..d06ae3b 100644
92176--- a/kernel/locking/mutex-debug.h
92177+++ b/kernel/locking/mutex-debug.h
92178@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
92179 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
92180 extern void debug_mutex_add_waiter(struct mutex *lock,
92181 struct mutex_waiter *waiter,
92182- struct thread_info *ti);
92183+ struct task_struct *task);
92184 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
92185- struct thread_info *ti);
92186+ struct task_struct *task);
92187 extern void debug_mutex_unlock(struct mutex *lock);
92188 extern void debug_mutex_init(struct mutex *lock, const char *name,
92189 struct lock_class_key *key);
92190diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
92191index acca2c1..ddeaea8 100644
92192--- a/kernel/locking/mutex.c
92193+++ b/kernel/locking/mutex.c
92194@@ -490,7 +490,7 @@ slowpath:
92195 goto skip_wait;
92196
92197 debug_mutex_lock_common(lock, &waiter);
92198- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
92199+ debug_mutex_add_waiter(lock, &waiter, task);
92200
92201 /* add waiting tasks to the end of the waitqueue (FIFO): */
92202 list_add_tail(&waiter.list, &lock->wait_list);
92203@@ -534,7 +534,7 @@ slowpath:
92204 schedule_preempt_disabled();
92205 spin_lock_mutex(&lock->wait_lock, flags);
92206 }
92207- mutex_remove_waiter(lock, &waiter, current_thread_info());
92208+ mutex_remove_waiter(lock, &waiter, task);
92209 /* set it to 0 if there are no waiters left: */
92210 if (likely(list_empty(&lock->wait_list)))
92211 atomic_set(&lock->count, 0);
92212@@ -571,7 +571,7 @@ skip_wait:
92213 return 0;
92214
92215 err:
92216- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
92217+ mutex_remove_waiter(lock, &waiter, task);
92218 spin_unlock_mutex(&lock->wait_lock, flags);
92219 debug_mutex_free_waiter(&waiter);
92220 mutex_release(&lock->dep_map, 1, ip);
92221diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
92222index 1d96dd0..994ff19 100644
92223--- a/kernel/locking/rtmutex-tester.c
92224+++ b/kernel/locking/rtmutex-tester.c
92225@@ -22,7 +22,7 @@
92226 #define MAX_RT_TEST_MUTEXES 8
92227
92228 static spinlock_t rttest_lock;
92229-static atomic_t rttest_event;
92230+static atomic_unchecked_t rttest_event;
92231
92232 struct test_thread_data {
92233 int opcode;
92234@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
92235
92236 case RTTEST_LOCKCONT:
92237 td->mutexes[td->opdata] = 1;
92238- td->event = atomic_add_return(1, &rttest_event);
92239+ td->event = atomic_add_return_unchecked(1, &rttest_event);
92240 return 0;
92241
92242 case RTTEST_RESET:
92243@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
92244 return 0;
92245
92246 case RTTEST_RESETEVENT:
92247- atomic_set(&rttest_event, 0);
92248+ atomic_set_unchecked(&rttest_event, 0);
92249 return 0;
92250
92251 default:
92252@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
92253 return ret;
92254
92255 td->mutexes[id] = 1;
92256- td->event = atomic_add_return(1, &rttest_event);
92257+ td->event = atomic_add_return_unchecked(1, &rttest_event);
92258 rt_mutex_lock(&mutexes[id]);
92259- td->event = atomic_add_return(1, &rttest_event);
92260+ td->event = atomic_add_return_unchecked(1, &rttest_event);
92261 td->mutexes[id] = 4;
92262 return 0;
92263
92264@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
92265 return ret;
92266
92267 td->mutexes[id] = 1;
92268- td->event = atomic_add_return(1, &rttest_event);
92269+ td->event = atomic_add_return_unchecked(1, &rttest_event);
92270 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
92271- td->event = atomic_add_return(1, &rttest_event);
92272+ td->event = atomic_add_return_unchecked(1, &rttest_event);
92273 td->mutexes[id] = ret ? 0 : 4;
92274 return ret ? -EINTR : 0;
92275
92276@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
92277 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
92278 return ret;
92279
92280- td->event = atomic_add_return(1, &rttest_event);
92281+ td->event = atomic_add_return_unchecked(1, &rttest_event);
92282 rt_mutex_unlock(&mutexes[id]);
92283- td->event = atomic_add_return(1, &rttest_event);
92284+ td->event = atomic_add_return_unchecked(1, &rttest_event);
92285 td->mutexes[id] = 0;
92286 return 0;
92287
92288@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
92289 break;
92290
92291 td->mutexes[dat] = 2;
92292- td->event = atomic_add_return(1, &rttest_event);
92293+ td->event = atomic_add_return_unchecked(1, &rttest_event);
92294 break;
92295
92296 default:
92297@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
92298 return;
92299
92300 td->mutexes[dat] = 3;
92301- td->event = atomic_add_return(1, &rttest_event);
92302+ td->event = atomic_add_return_unchecked(1, &rttest_event);
92303 break;
92304
92305 case RTTEST_LOCKNOWAIT:
92306@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
92307 return;
92308
92309 td->mutexes[dat] = 1;
92310- td->event = atomic_add_return(1, &rttest_event);
92311+ td->event = atomic_add_return_unchecked(1, &rttest_event);
92312 return;
92313
92314 default:
92315diff --git a/kernel/module.c b/kernel/module.c
92316index 81e727c..a8ea6f9 100644
92317--- a/kernel/module.c
92318+++ b/kernel/module.c
92319@@ -61,6 +61,7 @@
92320 #include <linux/pfn.h>
92321 #include <linux/bsearch.h>
92322 #include <linux/fips.h>
92323+#include <linux/grsecurity.h>
92324 #include <uapi/linux/module.h>
92325 #include "module-internal.h"
92326
92327@@ -157,7 +158,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
92328
92329 /* Bounds of module allocation, for speeding __module_address.
92330 * Protected by module_mutex. */
92331-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
92332+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
92333+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
92334
92335 int register_module_notifier(struct notifier_block * nb)
92336 {
92337@@ -324,7 +326,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
92338 return true;
92339
92340 list_for_each_entry_rcu(mod, &modules, list) {
92341- struct symsearch arr[] = {
92342+ struct symsearch modarr[] = {
92343 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
92344 NOT_GPL_ONLY, false },
92345 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
92346@@ -349,7 +351,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
92347 if (mod->state == MODULE_STATE_UNFORMED)
92348 continue;
92349
92350- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
92351+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
92352 return true;
92353 }
92354 return false;
92355@@ -489,7 +491,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
92356 if (!pcpusec->sh_size)
92357 return 0;
92358
92359- if (align > PAGE_SIZE) {
92360+ if (align-1 >= PAGE_SIZE) {
92361 pr_warn("%s: per-cpu alignment %li > %li\n",
92362 mod->name, align, PAGE_SIZE);
92363 align = PAGE_SIZE;
92364@@ -1061,7 +1063,7 @@ struct module_attribute module_uevent =
92365 static ssize_t show_coresize(struct module_attribute *mattr,
92366 struct module_kobject *mk, char *buffer)
92367 {
92368- return sprintf(buffer, "%u\n", mk->mod->core_size);
92369+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
92370 }
92371
92372 static struct module_attribute modinfo_coresize =
92373@@ -1070,7 +1072,7 @@ static struct module_attribute modinfo_coresize =
92374 static ssize_t show_initsize(struct module_attribute *mattr,
92375 struct module_kobject *mk, char *buffer)
92376 {
92377- return sprintf(buffer, "%u\n", mk->mod->init_size);
92378+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
92379 }
92380
92381 static struct module_attribute modinfo_initsize =
92382@@ -1162,12 +1164,29 @@ static int check_version(Elf_Shdr *sechdrs,
92383 goto bad_version;
92384 }
92385
92386+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
92387+ /*
92388+ * avoid potentially printing jibberish on attempted load
92389+ * of a module randomized with a different seed
92390+ */
92391+ pr_warn("no symbol version for %s\n", symname);
92392+#else
92393 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
92394+#endif
92395 return 0;
92396
92397 bad_version:
92398+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
92399+ /*
92400+ * avoid potentially printing jibberish on attempted load
92401+ * of a module randomized with a different seed
92402+ */
92403+ printk("attempted module disagrees about version of symbol %s\n",
92404+ symname);
92405+#else
92406 printk("%s: disagrees about version of symbol %s\n",
92407 mod->name, symname);
92408+#endif
92409 return 0;
92410 }
92411
92412@@ -1283,7 +1302,7 @@ resolve_symbol_wait(struct module *mod,
92413 */
92414 #ifdef CONFIG_SYSFS
92415
92416-#ifdef CONFIG_KALLSYMS
92417+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
92418 static inline bool sect_empty(const Elf_Shdr *sect)
92419 {
92420 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
92421@@ -1423,7 +1442,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
92422 {
92423 unsigned int notes, loaded, i;
92424 struct module_notes_attrs *notes_attrs;
92425- struct bin_attribute *nattr;
92426+ bin_attribute_no_const *nattr;
92427
92428 /* failed to create section attributes, so can't create notes */
92429 if (!mod->sect_attrs)
92430@@ -1535,7 +1554,7 @@ static void del_usage_links(struct module *mod)
92431 static int module_add_modinfo_attrs(struct module *mod)
92432 {
92433 struct module_attribute *attr;
92434- struct module_attribute *temp_attr;
92435+ module_attribute_no_const *temp_attr;
92436 int error = 0;
92437 int i;
92438
92439@@ -1756,21 +1775,21 @@ static void set_section_ro_nx(void *base,
92440
92441 static void unset_module_core_ro_nx(struct module *mod)
92442 {
92443- set_page_attributes(mod->module_core + mod->core_text_size,
92444- mod->module_core + mod->core_size,
92445+ set_page_attributes(mod->module_core_rw,
92446+ mod->module_core_rw + mod->core_size_rw,
92447 set_memory_x);
92448- set_page_attributes(mod->module_core,
92449- mod->module_core + mod->core_ro_size,
92450+ set_page_attributes(mod->module_core_rx,
92451+ mod->module_core_rx + mod->core_size_rx,
92452 set_memory_rw);
92453 }
92454
92455 static void unset_module_init_ro_nx(struct module *mod)
92456 {
92457- set_page_attributes(mod->module_init + mod->init_text_size,
92458- mod->module_init + mod->init_size,
92459+ set_page_attributes(mod->module_init_rw,
92460+ mod->module_init_rw + mod->init_size_rw,
92461 set_memory_x);
92462- set_page_attributes(mod->module_init,
92463- mod->module_init + mod->init_ro_size,
92464+ set_page_attributes(mod->module_init_rx,
92465+ mod->module_init_rx + mod->init_size_rx,
92466 set_memory_rw);
92467 }
92468
92469@@ -1783,14 +1802,14 @@ void set_all_modules_text_rw(void)
92470 list_for_each_entry_rcu(mod, &modules, list) {
92471 if (mod->state == MODULE_STATE_UNFORMED)
92472 continue;
92473- if ((mod->module_core) && (mod->core_text_size)) {
92474- set_page_attributes(mod->module_core,
92475- mod->module_core + mod->core_text_size,
92476+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
92477+ set_page_attributes(mod->module_core_rx,
92478+ mod->module_core_rx + mod->core_size_rx,
92479 set_memory_rw);
92480 }
92481- if ((mod->module_init) && (mod->init_text_size)) {
92482- set_page_attributes(mod->module_init,
92483- mod->module_init + mod->init_text_size,
92484+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
92485+ set_page_attributes(mod->module_init_rx,
92486+ mod->module_init_rx + mod->init_size_rx,
92487 set_memory_rw);
92488 }
92489 }
92490@@ -1806,14 +1825,14 @@ void set_all_modules_text_ro(void)
92491 list_for_each_entry_rcu(mod, &modules, list) {
92492 if (mod->state == MODULE_STATE_UNFORMED)
92493 continue;
92494- if ((mod->module_core) && (mod->core_text_size)) {
92495- set_page_attributes(mod->module_core,
92496- mod->module_core + mod->core_text_size,
92497+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
92498+ set_page_attributes(mod->module_core_rx,
92499+ mod->module_core_rx + mod->core_size_rx,
92500 set_memory_ro);
92501 }
92502- if ((mod->module_init) && (mod->init_text_size)) {
92503- set_page_attributes(mod->module_init,
92504- mod->module_init + mod->init_text_size,
92505+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
92506+ set_page_attributes(mod->module_init_rx,
92507+ mod->module_init_rx + mod->init_size_rx,
92508 set_memory_ro);
92509 }
92510 }
92511@@ -1864,16 +1883,19 @@ static void free_module(struct module *mod)
92512
92513 /* This may be NULL, but that's OK */
92514 unset_module_init_ro_nx(mod);
92515- module_free(mod, mod->module_init);
92516+ module_free(mod, mod->module_init_rw);
92517+ module_free_exec(mod, mod->module_init_rx);
92518 kfree(mod->args);
92519 percpu_modfree(mod);
92520
92521 /* Free lock-classes: */
92522- lockdep_free_key_range(mod->module_core, mod->core_size);
92523+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
92524+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
92525
92526 /* Finally, free the core (containing the module structure) */
92527 unset_module_core_ro_nx(mod);
92528- module_free(mod, mod->module_core);
92529+ module_free_exec(mod, mod->module_core_rx);
92530+ module_free(mod, mod->module_core_rw);
92531
92532 #ifdef CONFIG_MPU
92533 update_protections(current->mm);
92534@@ -1942,9 +1964,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
92535 int ret = 0;
92536 const struct kernel_symbol *ksym;
92537
92538+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92539+ int is_fs_load = 0;
92540+ int register_filesystem_found = 0;
92541+ char *p;
92542+
92543+ p = strstr(mod->args, "grsec_modharden_fs");
92544+ if (p) {
92545+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
92546+ /* copy \0 as well */
92547+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
92548+ is_fs_load = 1;
92549+ }
92550+#endif
92551+
92552 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
92553 const char *name = info->strtab + sym[i].st_name;
92554
92555+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92556+ /* it's a real shame this will never get ripped and copied
92557+ upstream! ;(
92558+ */
92559+ if (is_fs_load && !strcmp(name, "register_filesystem"))
92560+ register_filesystem_found = 1;
92561+#endif
92562+
92563 switch (sym[i].st_shndx) {
92564 case SHN_COMMON:
92565 /* Ignore common symbols */
92566@@ -1969,7 +2013,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
92567 ksym = resolve_symbol_wait(mod, info, name);
92568 /* Ok if resolved. */
92569 if (ksym && !IS_ERR(ksym)) {
92570+ pax_open_kernel();
92571 sym[i].st_value = ksym->value;
92572+ pax_close_kernel();
92573 break;
92574 }
92575
92576@@ -1988,11 +2034,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
92577 secbase = (unsigned long)mod_percpu(mod);
92578 else
92579 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
92580+ pax_open_kernel();
92581 sym[i].st_value += secbase;
92582+ pax_close_kernel();
92583 break;
92584 }
92585 }
92586
92587+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92588+ if (is_fs_load && !register_filesystem_found) {
92589+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
92590+ ret = -EPERM;
92591+ }
92592+#endif
92593+
92594 return ret;
92595 }
92596
92597@@ -2076,22 +2131,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
92598 || s->sh_entsize != ~0UL
92599 || strstarts(sname, ".init"))
92600 continue;
92601- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
92602+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
92603+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
92604+ else
92605+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
92606 pr_debug("\t%s\n", sname);
92607 }
92608- switch (m) {
92609- case 0: /* executable */
92610- mod->core_size = debug_align(mod->core_size);
92611- mod->core_text_size = mod->core_size;
92612- break;
92613- case 1: /* RO: text and ro-data */
92614- mod->core_size = debug_align(mod->core_size);
92615- mod->core_ro_size = mod->core_size;
92616- break;
92617- case 3: /* whole core */
92618- mod->core_size = debug_align(mod->core_size);
92619- break;
92620- }
92621 }
92622
92623 pr_debug("Init section allocation order:\n");
92624@@ -2105,23 +2150,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
92625 || s->sh_entsize != ~0UL
92626 || !strstarts(sname, ".init"))
92627 continue;
92628- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
92629- | INIT_OFFSET_MASK);
92630+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
92631+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
92632+ else
92633+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
92634+ s->sh_entsize |= INIT_OFFSET_MASK;
92635 pr_debug("\t%s\n", sname);
92636 }
92637- switch (m) {
92638- case 0: /* executable */
92639- mod->init_size = debug_align(mod->init_size);
92640- mod->init_text_size = mod->init_size;
92641- break;
92642- case 1: /* RO: text and ro-data */
92643- mod->init_size = debug_align(mod->init_size);
92644- mod->init_ro_size = mod->init_size;
92645- break;
92646- case 3: /* whole init */
92647- mod->init_size = debug_align(mod->init_size);
92648- break;
92649- }
92650 }
92651 }
92652
92653@@ -2294,7 +2329,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
92654
92655 /* Put symbol section at end of init part of module. */
92656 symsect->sh_flags |= SHF_ALLOC;
92657- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
92658+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
92659 info->index.sym) | INIT_OFFSET_MASK;
92660 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
92661
92662@@ -2311,13 +2346,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
92663 }
92664
92665 /* Append room for core symbols at end of core part. */
92666- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
92667- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
92668- mod->core_size += strtab_size;
92669+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
92670+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
92671+ mod->core_size_rx += strtab_size;
92672
92673 /* Put string table section at end of init part of module. */
92674 strsect->sh_flags |= SHF_ALLOC;
92675- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
92676+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
92677 info->index.str) | INIT_OFFSET_MASK;
92678 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
92679 }
92680@@ -2335,12 +2370,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
92681 /* Make sure we get permanent strtab: don't use info->strtab. */
92682 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
92683
92684+ pax_open_kernel();
92685+
92686 /* Set types up while we still have access to sections. */
92687 for (i = 0; i < mod->num_symtab; i++)
92688 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
92689
92690- mod->core_symtab = dst = mod->module_core + info->symoffs;
92691- mod->core_strtab = s = mod->module_core + info->stroffs;
92692+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
92693+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
92694 src = mod->symtab;
92695 for (ndst = i = 0; i < mod->num_symtab; i++) {
92696 if (i == 0 ||
92697@@ -2352,6 +2389,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
92698 }
92699 }
92700 mod->core_num_syms = ndst;
92701+
92702+ pax_close_kernel();
92703 }
92704 #else
92705 static inline void layout_symtab(struct module *mod, struct load_info *info)
92706@@ -2385,17 +2424,33 @@ void * __weak module_alloc(unsigned long size)
92707 return vmalloc_exec(size);
92708 }
92709
92710-static void *module_alloc_update_bounds(unsigned long size)
92711+static void *module_alloc_update_bounds_rw(unsigned long size)
92712 {
92713 void *ret = module_alloc(size);
92714
92715 if (ret) {
92716 mutex_lock(&module_mutex);
92717 /* Update module bounds. */
92718- if ((unsigned long)ret < module_addr_min)
92719- module_addr_min = (unsigned long)ret;
92720- if ((unsigned long)ret + size > module_addr_max)
92721- module_addr_max = (unsigned long)ret + size;
92722+ if ((unsigned long)ret < module_addr_min_rw)
92723+ module_addr_min_rw = (unsigned long)ret;
92724+ if ((unsigned long)ret + size > module_addr_max_rw)
92725+ module_addr_max_rw = (unsigned long)ret + size;
92726+ mutex_unlock(&module_mutex);
92727+ }
92728+ return ret;
92729+}
92730+
92731+static void *module_alloc_update_bounds_rx(unsigned long size)
92732+{
92733+ void *ret = module_alloc_exec(size);
92734+
92735+ if (ret) {
92736+ mutex_lock(&module_mutex);
92737+ /* Update module bounds. */
92738+ if ((unsigned long)ret < module_addr_min_rx)
92739+ module_addr_min_rx = (unsigned long)ret;
92740+ if ((unsigned long)ret + size > module_addr_max_rx)
92741+ module_addr_max_rx = (unsigned long)ret + size;
92742 mutex_unlock(&module_mutex);
92743 }
92744 return ret;
92745@@ -2652,7 +2707,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
92746 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
92747
92748 if (info->index.sym == 0) {
92749+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
92750+ /*
92751+ * avoid potentially printing jibberish on attempted load
92752+ * of a module randomized with a different seed
92753+ */
92754+ pr_warn("module has no symbols (stripped?)\n");
92755+#else
92756 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
92757+#endif
92758 return ERR_PTR(-ENOEXEC);
92759 }
92760
92761@@ -2668,8 +2731,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
92762 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
92763 {
92764 const char *modmagic = get_modinfo(info, "vermagic");
92765+ const char *license = get_modinfo(info, "license");
92766 int err;
92767
92768+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
92769+ if (!license || !license_is_gpl_compatible(license))
92770+ return -ENOEXEC;
92771+#endif
92772+
92773 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
92774 modmagic = NULL;
92775
92776@@ -2694,7 +2763,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
92777 }
92778
92779 /* Set up license info based on the info section */
92780- set_license(mod, get_modinfo(info, "license"));
92781+ set_license(mod, license);
92782
92783 return 0;
92784 }
92785@@ -2788,7 +2857,7 @@ static int move_module(struct module *mod, struct load_info *info)
92786 void *ptr;
92787
92788 /* Do the allocs. */
92789- ptr = module_alloc_update_bounds(mod->core_size);
92790+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
92791 /*
92792 * The pointer to this block is stored in the module structure
92793 * which is inside the block. Just mark it as not being a
92794@@ -2798,11 +2867,11 @@ static int move_module(struct module *mod, struct load_info *info)
92795 if (!ptr)
92796 return -ENOMEM;
92797
92798- memset(ptr, 0, mod->core_size);
92799- mod->module_core = ptr;
92800+ memset(ptr, 0, mod->core_size_rw);
92801+ mod->module_core_rw = ptr;
92802
92803- if (mod->init_size) {
92804- ptr = module_alloc_update_bounds(mod->init_size);
92805+ if (mod->init_size_rw) {
92806+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
92807 /*
92808 * The pointer to this block is stored in the module structure
92809 * which is inside the block. This block doesn't need to be
92810@@ -2811,13 +2880,45 @@ static int move_module(struct module *mod, struct load_info *info)
92811 */
92812 kmemleak_ignore(ptr);
92813 if (!ptr) {
92814- module_free(mod, mod->module_core);
92815+ module_free(mod, mod->module_core_rw);
92816 return -ENOMEM;
92817 }
92818- memset(ptr, 0, mod->init_size);
92819- mod->module_init = ptr;
92820+ memset(ptr, 0, mod->init_size_rw);
92821+ mod->module_init_rw = ptr;
92822 } else
92823- mod->module_init = NULL;
92824+ mod->module_init_rw = NULL;
92825+
92826+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
92827+ kmemleak_not_leak(ptr);
92828+ if (!ptr) {
92829+ if (mod->module_init_rw)
92830+ module_free(mod, mod->module_init_rw);
92831+ module_free(mod, mod->module_core_rw);
92832+ return -ENOMEM;
92833+ }
92834+
92835+ pax_open_kernel();
92836+ memset(ptr, 0, mod->core_size_rx);
92837+ pax_close_kernel();
92838+ mod->module_core_rx = ptr;
92839+
92840+ if (mod->init_size_rx) {
92841+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
92842+ kmemleak_ignore(ptr);
92843+ if (!ptr && mod->init_size_rx) {
92844+ module_free_exec(mod, mod->module_core_rx);
92845+ if (mod->module_init_rw)
92846+ module_free(mod, mod->module_init_rw);
92847+ module_free(mod, mod->module_core_rw);
92848+ return -ENOMEM;
92849+ }
92850+
92851+ pax_open_kernel();
92852+ memset(ptr, 0, mod->init_size_rx);
92853+ pax_close_kernel();
92854+ mod->module_init_rx = ptr;
92855+ } else
92856+ mod->module_init_rx = NULL;
92857
92858 /* Transfer each section which specifies SHF_ALLOC */
92859 pr_debug("final section addresses:\n");
92860@@ -2828,16 +2929,45 @@ static int move_module(struct module *mod, struct load_info *info)
92861 if (!(shdr->sh_flags & SHF_ALLOC))
92862 continue;
92863
92864- if (shdr->sh_entsize & INIT_OFFSET_MASK)
92865- dest = mod->module_init
92866- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
92867- else
92868- dest = mod->module_core + shdr->sh_entsize;
92869+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
92870+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
92871+ dest = mod->module_init_rw
92872+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
92873+ else
92874+ dest = mod->module_init_rx
92875+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
92876+ } else {
92877+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
92878+ dest = mod->module_core_rw + shdr->sh_entsize;
92879+ else
92880+ dest = mod->module_core_rx + shdr->sh_entsize;
92881+ }
92882+
92883+ if (shdr->sh_type != SHT_NOBITS) {
92884+
92885+#ifdef CONFIG_PAX_KERNEXEC
92886+#ifdef CONFIG_X86_64
92887+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
92888+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
92889+#endif
92890+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
92891+ pax_open_kernel();
92892+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
92893+ pax_close_kernel();
92894+ } else
92895+#endif
92896
92897- if (shdr->sh_type != SHT_NOBITS)
92898 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
92899+ }
92900 /* Update sh_addr to point to copy in image. */
92901- shdr->sh_addr = (unsigned long)dest;
92902+
92903+#ifdef CONFIG_PAX_KERNEXEC
92904+ if (shdr->sh_flags & SHF_EXECINSTR)
92905+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
92906+ else
92907+#endif
92908+
92909+ shdr->sh_addr = (unsigned long)dest;
92910 pr_debug("\t0x%lx %s\n",
92911 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
92912 }
92913@@ -2894,12 +3024,12 @@ static void flush_module_icache(const struct module *mod)
92914 * Do it before processing of module parameters, so the module
92915 * can provide parameter accessor functions of its own.
92916 */
92917- if (mod->module_init)
92918- flush_icache_range((unsigned long)mod->module_init,
92919- (unsigned long)mod->module_init
92920- + mod->init_size);
92921- flush_icache_range((unsigned long)mod->module_core,
92922- (unsigned long)mod->module_core + mod->core_size);
92923+ if (mod->module_init_rx)
92924+ flush_icache_range((unsigned long)mod->module_init_rx,
92925+ (unsigned long)mod->module_init_rx
92926+ + mod->init_size_rx);
92927+ flush_icache_range((unsigned long)mod->module_core_rx,
92928+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
92929
92930 set_fs(old_fs);
92931 }
92932@@ -2956,8 +3086,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
92933 static void module_deallocate(struct module *mod, struct load_info *info)
92934 {
92935 percpu_modfree(mod);
92936- module_free(mod, mod->module_init);
92937- module_free(mod, mod->module_core);
92938+ module_free_exec(mod, mod->module_init_rx);
92939+ module_free_exec(mod, mod->module_core_rx);
92940+ module_free(mod, mod->module_init_rw);
92941+ module_free(mod, mod->module_core_rw);
92942 }
92943
92944 int __weak module_finalize(const Elf_Ehdr *hdr,
92945@@ -2970,7 +3102,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
92946 static int post_relocation(struct module *mod, const struct load_info *info)
92947 {
92948 /* Sort exception table now relocations are done. */
92949+ pax_open_kernel();
92950 sort_extable(mod->extable, mod->extable + mod->num_exentries);
92951+ pax_close_kernel();
92952
92953 /* Copy relocated percpu area over. */
92954 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
92955@@ -3079,11 +3213,12 @@ static int do_init_module(struct module *mod)
92956 mod->strtab = mod->core_strtab;
92957 #endif
92958 unset_module_init_ro_nx(mod);
92959- module_free(mod, mod->module_init);
92960- mod->module_init = NULL;
92961- mod->init_size = 0;
92962- mod->init_ro_size = 0;
92963- mod->init_text_size = 0;
92964+ module_free(mod, mod->module_init_rw);
92965+ module_free_exec(mod, mod->module_init_rx);
92966+ mod->module_init_rw = NULL;
92967+ mod->module_init_rx = NULL;
92968+ mod->init_size_rw = 0;
92969+ mod->init_size_rx = 0;
92970 mutex_unlock(&module_mutex);
92971 wake_up_all(&module_wq);
92972
92973@@ -3151,16 +3286,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
92974 module_bug_finalize(info->hdr, info->sechdrs, mod);
92975
92976 /* Set RO and NX regions for core */
92977- set_section_ro_nx(mod->module_core,
92978- mod->core_text_size,
92979- mod->core_ro_size,
92980- mod->core_size);
92981+ set_section_ro_nx(mod->module_core_rx,
92982+ mod->core_size_rx,
92983+ mod->core_size_rx,
92984+ mod->core_size_rx);
92985
92986 /* Set RO and NX regions for init */
92987- set_section_ro_nx(mod->module_init,
92988- mod->init_text_size,
92989- mod->init_ro_size,
92990- mod->init_size);
92991+ set_section_ro_nx(mod->module_init_rx,
92992+ mod->init_size_rx,
92993+ mod->init_size_rx,
92994+ mod->init_size_rx);
92995
92996 /* Mark state as coming so strong_try_module_get() ignores us,
92997 * but kallsyms etc. can see us. */
92998@@ -3244,9 +3379,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
92999 if (err)
93000 goto free_unload;
93001
93002+ /* Now copy in args */
93003+ mod->args = strndup_user(uargs, ~0UL >> 1);
93004+ if (IS_ERR(mod->args)) {
93005+ err = PTR_ERR(mod->args);
93006+ goto free_unload;
93007+ }
93008+
93009 /* Set up MODINFO_ATTR fields */
93010 setup_modinfo(mod, info);
93011
93012+#ifdef CONFIG_GRKERNSEC_MODHARDEN
93013+ {
93014+ char *p, *p2;
93015+
93016+ if (strstr(mod->args, "grsec_modharden_netdev")) {
93017+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
93018+ err = -EPERM;
93019+ goto free_modinfo;
93020+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
93021+ p += sizeof("grsec_modharden_normal") - 1;
93022+ p2 = strstr(p, "_");
93023+ if (p2) {
93024+ *p2 = '\0';
93025+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
93026+ *p2 = '_';
93027+ }
93028+ err = -EPERM;
93029+ goto free_modinfo;
93030+ }
93031+ }
93032+#endif
93033+
93034 /* Fix up syms, so that st_value is a pointer to location. */
93035 err = simplify_symbols(mod, info);
93036 if (err < 0)
93037@@ -3262,13 +3426,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
93038
93039 flush_module_icache(mod);
93040
93041- /* Now copy in args */
93042- mod->args = strndup_user(uargs, ~0UL >> 1);
93043- if (IS_ERR(mod->args)) {
93044- err = PTR_ERR(mod->args);
93045- goto free_arch_cleanup;
93046- }
93047-
93048 dynamic_debug_setup(info->debug, info->num_debug);
93049
93050 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
93051@@ -3311,11 +3468,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
93052 ddebug_cleanup:
93053 dynamic_debug_remove(info->debug);
93054 synchronize_sched();
93055- kfree(mod->args);
93056- free_arch_cleanup:
93057 module_arch_cleanup(mod);
93058 free_modinfo:
93059 free_modinfo(mod);
93060+ kfree(mod->args);
93061 free_unload:
93062 module_unload_free(mod);
93063 unlink_mod:
93064@@ -3398,10 +3554,16 @@ static const char *get_ksymbol(struct module *mod,
93065 unsigned long nextval;
93066
93067 /* At worse, next value is at end of module */
93068- if (within_module_init(addr, mod))
93069- nextval = (unsigned long)mod->module_init+mod->init_text_size;
93070+ if (within_module_init_rx(addr, mod))
93071+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
93072+ else if (within_module_init_rw(addr, mod))
93073+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
93074+ else if (within_module_core_rx(addr, mod))
93075+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
93076+ else if (within_module_core_rw(addr, mod))
93077+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
93078 else
93079- nextval = (unsigned long)mod->module_core+mod->core_text_size;
93080+ return NULL;
93081
93082 /* Scan for closest preceding symbol, and next symbol. (ELF
93083 starts real symbols at 1). */
93084@@ -3652,7 +3814,7 @@ static int m_show(struct seq_file *m, void *p)
93085 return 0;
93086
93087 seq_printf(m, "%s %u",
93088- mod->name, mod->init_size + mod->core_size);
93089+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
93090 print_unload_info(m, mod);
93091
93092 /* Informative for users. */
93093@@ -3661,7 +3823,7 @@ static int m_show(struct seq_file *m, void *p)
93094 mod->state == MODULE_STATE_COMING ? "Loading":
93095 "Live");
93096 /* Used by oprofile and other similar tools. */
93097- seq_printf(m, " 0x%pK", mod->module_core);
93098+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
93099
93100 /* Taints info */
93101 if (mod->taints)
93102@@ -3697,7 +3859,17 @@ static const struct file_operations proc_modules_operations = {
93103
93104 static int __init proc_modules_init(void)
93105 {
93106+#ifndef CONFIG_GRKERNSEC_HIDESYM
93107+#ifdef CONFIG_GRKERNSEC_PROC_USER
93108+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
93109+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
93110+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
93111+#else
93112 proc_create("modules", 0, NULL, &proc_modules_operations);
93113+#endif
93114+#else
93115+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
93116+#endif
93117 return 0;
93118 }
93119 module_init(proc_modules_init);
93120@@ -3758,14 +3930,14 @@ struct module *__module_address(unsigned long addr)
93121 {
93122 struct module *mod;
93123
93124- if (addr < module_addr_min || addr > module_addr_max)
93125+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
93126+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
93127 return NULL;
93128
93129 list_for_each_entry_rcu(mod, &modules, list) {
93130 if (mod->state == MODULE_STATE_UNFORMED)
93131 continue;
93132- if (within_module_core(addr, mod)
93133- || within_module_init(addr, mod))
93134+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
93135 return mod;
93136 }
93137 return NULL;
93138@@ -3800,11 +3972,20 @@ bool is_module_text_address(unsigned long addr)
93139 */
93140 struct module *__module_text_address(unsigned long addr)
93141 {
93142- struct module *mod = __module_address(addr);
93143+ struct module *mod;
93144+
93145+#ifdef CONFIG_X86_32
93146+ addr = ktla_ktva(addr);
93147+#endif
93148+
93149+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
93150+ return NULL;
93151+
93152+ mod = __module_address(addr);
93153+
93154 if (mod) {
93155 /* Make sure it's within the text section. */
93156- if (!within(addr, mod->module_init, mod->init_text_size)
93157- && !within(addr, mod->module_core, mod->core_text_size))
93158+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
93159 mod = NULL;
93160 }
93161 return mod;
93162diff --git a/kernel/notifier.c b/kernel/notifier.c
93163index 4803da6..1c5eea6 100644
93164--- a/kernel/notifier.c
93165+++ b/kernel/notifier.c
93166@@ -5,6 +5,7 @@
93167 #include <linux/rcupdate.h>
93168 #include <linux/vmalloc.h>
93169 #include <linux/reboot.h>
93170+#include <linux/mm.h>
93171
93172 /*
93173 * Notifier list for kernel code which wants to be called
93174@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
93175 while ((*nl) != NULL) {
93176 if (n->priority > (*nl)->priority)
93177 break;
93178- nl = &((*nl)->next);
93179+ nl = (struct notifier_block **)&((*nl)->next);
93180 }
93181- n->next = *nl;
93182+ pax_open_kernel();
93183+ *(const void **)&n->next = *nl;
93184 rcu_assign_pointer(*nl, n);
93185+ pax_close_kernel();
93186 return 0;
93187 }
93188
93189@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
93190 return 0;
93191 if (n->priority > (*nl)->priority)
93192 break;
93193- nl = &((*nl)->next);
93194+ nl = (struct notifier_block **)&((*nl)->next);
93195 }
93196- n->next = *nl;
93197+ pax_open_kernel();
93198+ *(const void **)&n->next = *nl;
93199 rcu_assign_pointer(*nl, n);
93200+ pax_close_kernel();
93201 return 0;
93202 }
93203
93204@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
93205 {
93206 while ((*nl) != NULL) {
93207 if ((*nl) == n) {
93208+ pax_open_kernel();
93209 rcu_assign_pointer(*nl, n->next);
93210+ pax_close_kernel();
93211 return 0;
93212 }
93213- nl = &((*nl)->next);
93214+ nl = (struct notifier_block **)&((*nl)->next);
93215 }
93216 return -ENOENT;
93217 }
93218diff --git a/kernel/padata.c b/kernel/padata.c
93219index 161402f..598814c 100644
93220--- a/kernel/padata.c
93221+++ b/kernel/padata.c
93222@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
93223 * seq_nr mod. number of cpus in use.
93224 */
93225
93226- seq_nr = atomic_inc_return(&pd->seq_nr);
93227+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
93228 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
93229
93230 return padata_index_to_cpu(pd, cpu_index);
93231@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
93232 padata_init_pqueues(pd);
93233 padata_init_squeues(pd);
93234 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
93235- atomic_set(&pd->seq_nr, -1);
93236+ atomic_set_unchecked(&pd->seq_nr, -1);
93237 atomic_set(&pd->reorder_objects, 0);
93238 atomic_set(&pd->refcnt, 0);
93239 pd->pinst = pinst;
93240diff --git a/kernel/panic.c b/kernel/panic.c
93241index 62e16ce..9db5047b 100644
93242--- a/kernel/panic.c
93243+++ b/kernel/panic.c
93244@@ -53,7 +53,7 @@ EXPORT_SYMBOL(panic_blink);
93245 /*
93246 * Stop ourself in panic -- architecture code may override this
93247 */
93248-void __weak panic_smp_self_stop(void)
93249+void __weak __noreturn panic_smp_self_stop(void)
93250 {
93251 while (1)
93252 cpu_relax();
93253@@ -420,7 +420,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
93254 disable_trace_on_warning();
93255
93256 pr_warn("------------[ cut here ]------------\n");
93257- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
93258+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
93259 raw_smp_processor_id(), current->pid, file, line, caller);
93260
93261 if (args)
93262@@ -474,7 +474,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
93263 */
93264 __visible void __stack_chk_fail(void)
93265 {
93266- panic("stack-protector: Kernel stack is corrupted in: %p\n",
93267+ dump_stack();
93268+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
93269 __builtin_return_address(0));
93270 }
93271 EXPORT_SYMBOL(__stack_chk_fail);
93272diff --git a/kernel/pid.c b/kernel/pid.c
93273index 9b9a266..c20ef80 100644
93274--- a/kernel/pid.c
93275+++ b/kernel/pid.c
93276@@ -33,6 +33,7 @@
93277 #include <linux/rculist.h>
93278 #include <linux/bootmem.h>
93279 #include <linux/hash.h>
93280+#include <linux/security.h>
93281 #include <linux/pid_namespace.h>
93282 #include <linux/init_task.h>
93283 #include <linux/syscalls.h>
93284@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
93285
93286 int pid_max = PID_MAX_DEFAULT;
93287
93288-#define RESERVED_PIDS 300
93289+#define RESERVED_PIDS 500
93290
93291 int pid_max_min = RESERVED_PIDS + 1;
93292 int pid_max_max = PID_MAX_LIMIT;
93293@@ -445,10 +446,18 @@ EXPORT_SYMBOL(pid_task);
93294 */
93295 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
93296 {
93297+ struct task_struct *task;
93298+
93299 rcu_lockdep_assert(rcu_read_lock_held(),
93300 "find_task_by_pid_ns() needs rcu_read_lock()"
93301 " protection");
93302- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
93303+
93304+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
93305+
93306+ if (gr_pid_is_chrooted(task))
93307+ return NULL;
93308+
93309+ return task;
93310 }
93311
93312 struct task_struct *find_task_by_vpid(pid_t vnr)
93313@@ -456,6 +465,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
93314 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
93315 }
93316
93317+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
93318+{
93319+ rcu_lockdep_assert(rcu_read_lock_held(),
93320+ "find_task_by_pid_ns() needs rcu_read_lock()"
93321+ " protection");
93322+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
93323+}
93324+
93325 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
93326 {
93327 struct pid *pid;
93328diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
93329index db95d8e..a0ca23f 100644
93330--- a/kernel/pid_namespace.c
93331+++ b/kernel/pid_namespace.c
93332@@ -253,7 +253,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
93333 void __user *buffer, size_t *lenp, loff_t *ppos)
93334 {
93335 struct pid_namespace *pid_ns = task_active_pid_ns(current);
93336- struct ctl_table tmp = *table;
93337+ ctl_table_no_const tmp = *table;
93338
93339 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
93340 return -EPERM;
93341diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
93342index 3b89464..5e38379 100644
93343--- a/kernel/posix-cpu-timers.c
93344+++ b/kernel/posix-cpu-timers.c
93345@@ -1464,14 +1464,14 @@ struct k_clock clock_posix_cpu = {
93346
93347 static __init int init_posix_cpu_timers(void)
93348 {
93349- struct k_clock process = {
93350+ static struct k_clock process = {
93351 .clock_getres = process_cpu_clock_getres,
93352 .clock_get = process_cpu_clock_get,
93353 .timer_create = process_cpu_timer_create,
93354 .nsleep = process_cpu_nsleep,
93355 .nsleep_restart = process_cpu_nsleep_restart,
93356 };
93357- struct k_clock thread = {
93358+ static struct k_clock thread = {
93359 .clock_getres = thread_cpu_clock_getres,
93360 .clock_get = thread_cpu_clock_get,
93361 .timer_create = thread_cpu_timer_create,
93362diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
93363index 424c2d4..679242f 100644
93364--- a/kernel/posix-timers.c
93365+++ b/kernel/posix-timers.c
93366@@ -43,6 +43,7 @@
93367 #include <linux/hash.h>
93368 #include <linux/posix-clock.h>
93369 #include <linux/posix-timers.h>
93370+#include <linux/grsecurity.h>
93371 #include <linux/syscalls.h>
93372 #include <linux/wait.h>
93373 #include <linux/workqueue.h>
93374@@ -122,7 +123,7 @@ static DEFINE_SPINLOCK(hash_lock);
93375 * which we beg off on and pass to do_sys_settimeofday().
93376 */
93377
93378-static struct k_clock posix_clocks[MAX_CLOCKS];
93379+static struct k_clock *posix_clocks[MAX_CLOCKS];
93380
93381 /*
93382 * These ones are defined below.
93383@@ -275,7 +276,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
93384 */
93385 static __init int init_posix_timers(void)
93386 {
93387- struct k_clock clock_realtime = {
93388+ static struct k_clock clock_realtime = {
93389 .clock_getres = hrtimer_get_res,
93390 .clock_get = posix_clock_realtime_get,
93391 .clock_set = posix_clock_realtime_set,
93392@@ -287,7 +288,7 @@ static __init int init_posix_timers(void)
93393 .timer_get = common_timer_get,
93394 .timer_del = common_timer_del,
93395 };
93396- struct k_clock clock_monotonic = {
93397+ static struct k_clock clock_monotonic = {
93398 .clock_getres = hrtimer_get_res,
93399 .clock_get = posix_ktime_get_ts,
93400 .nsleep = common_nsleep,
93401@@ -297,19 +298,19 @@ static __init int init_posix_timers(void)
93402 .timer_get = common_timer_get,
93403 .timer_del = common_timer_del,
93404 };
93405- struct k_clock clock_monotonic_raw = {
93406+ static struct k_clock clock_monotonic_raw = {
93407 .clock_getres = hrtimer_get_res,
93408 .clock_get = posix_get_monotonic_raw,
93409 };
93410- struct k_clock clock_realtime_coarse = {
93411+ static struct k_clock clock_realtime_coarse = {
93412 .clock_getres = posix_get_coarse_res,
93413 .clock_get = posix_get_realtime_coarse,
93414 };
93415- struct k_clock clock_monotonic_coarse = {
93416+ static struct k_clock clock_monotonic_coarse = {
93417 .clock_getres = posix_get_coarse_res,
93418 .clock_get = posix_get_monotonic_coarse,
93419 };
93420- struct k_clock clock_tai = {
93421+ static struct k_clock clock_tai = {
93422 .clock_getres = hrtimer_get_res,
93423 .clock_get = posix_get_tai,
93424 .nsleep = common_nsleep,
93425@@ -319,7 +320,7 @@ static __init int init_posix_timers(void)
93426 .timer_get = common_timer_get,
93427 .timer_del = common_timer_del,
93428 };
93429- struct k_clock clock_boottime = {
93430+ static struct k_clock clock_boottime = {
93431 .clock_getres = hrtimer_get_res,
93432 .clock_get = posix_get_boottime,
93433 .nsleep = common_nsleep,
93434@@ -531,7 +532,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
93435 return;
93436 }
93437
93438- posix_clocks[clock_id] = *new_clock;
93439+ posix_clocks[clock_id] = new_clock;
93440 }
93441 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
93442
93443@@ -577,9 +578,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
93444 return (id & CLOCKFD_MASK) == CLOCKFD ?
93445 &clock_posix_dynamic : &clock_posix_cpu;
93446
93447- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
93448+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
93449 return NULL;
93450- return &posix_clocks[id];
93451+ return posix_clocks[id];
93452 }
93453
93454 static int common_timer_create(struct k_itimer *new_timer)
93455@@ -597,7 +598,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
93456 struct k_clock *kc = clockid_to_kclock(which_clock);
93457 struct k_itimer *new_timer;
93458 int error, new_timer_id;
93459- sigevent_t event;
93460+ sigevent_t event = { };
93461 int it_id_set = IT_ID_NOT_SET;
93462
93463 if (!kc)
93464@@ -1011,6 +1012,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
93465 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
93466 return -EFAULT;
93467
93468+ /* only the CLOCK_REALTIME clock can be set, all other clocks
93469+ have their clock_set fptr set to a nosettime dummy function
93470+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
93471+ call common_clock_set, which calls do_sys_settimeofday, which
93472+ we hook
93473+ */
93474+
93475 return kc->clock_set(which_clock, &new_tp);
93476 }
93477
93478diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
93479index 9a83d78..128bfc0 100644
93480--- a/kernel/power/Kconfig
93481+++ b/kernel/power/Kconfig
93482@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
93483 config HIBERNATION
93484 bool "Hibernation (aka 'suspend to disk')"
93485 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
93486+ depends on !GRKERNSEC_KMEM
93487+ depends on !PAX_MEMORY_SANITIZE
93488 select HIBERNATE_CALLBACKS
93489 select LZO_COMPRESS
93490 select LZO_DECOMPRESS
93491diff --git a/kernel/power/process.c b/kernel/power/process.c
93492index 4ee194e..925778f 100644
93493--- a/kernel/power/process.c
93494+++ b/kernel/power/process.c
93495@@ -35,6 +35,7 @@ static int try_to_freeze_tasks(bool user_only)
93496 unsigned int elapsed_msecs;
93497 bool wakeup = false;
93498 int sleep_usecs = USEC_PER_MSEC;
93499+ bool timedout = false;
93500
93501 do_gettimeofday(&start);
93502
93503@@ -45,13 +46,20 @@ static int try_to_freeze_tasks(bool user_only)
93504
93505 while (true) {
93506 todo = 0;
93507+ if (time_after(jiffies, end_time))
93508+ timedout = true;
93509 read_lock(&tasklist_lock);
93510 do_each_thread(g, p) {
93511 if (p == current || !freeze_task(p))
93512 continue;
93513
93514- if (!freezer_should_skip(p))
93515+ if (!freezer_should_skip(p)) {
93516 todo++;
93517+ if (timedout) {
93518+ printk(KERN_ERR "Task refusing to freeze:\n");
93519+ sched_show_task(p);
93520+ }
93521+ }
93522 } while_each_thread(g, p);
93523 read_unlock(&tasklist_lock);
93524
93525@@ -60,7 +68,7 @@ static int try_to_freeze_tasks(bool user_only)
93526 todo += wq_busy;
93527 }
93528
93529- if (!todo || time_after(jiffies, end_time))
93530+ if (!todo || timedout)
93531 break;
93532
93533 if (pm_wakeup_pending()) {
93534diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
93535index 13e839d..8a71f12 100644
93536--- a/kernel/printk/printk.c
93537+++ b/kernel/printk/printk.c
93538@@ -480,6 +480,11 @@ static int check_syslog_permissions(int type, bool from_file)
93539 if (from_file && type != SYSLOG_ACTION_OPEN)
93540 return 0;
93541
93542+#ifdef CONFIG_GRKERNSEC_DMESG
93543+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
93544+ return -EPERM;
93545+#endif
93546+
93547 if (syslog_action_restricted(type)) {
93548 if (capable(CAP_SYSLOG))
93549 return 0;
93550diff --git a/kernel/profile.c b/kernel/profile.c
93551index 54bf5ba..df6e0a2 100644
93552--- a/kernel/profile.c
93553+++ b/kernel/profile.c
93554@@ -37,7 +37,7 @@ struct profile_hit {
93555 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
93556 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
93557
93558-static atomic_t *prof_buffer;
93559+static atomic_unchecked_t *prof_buffer;
93560 static unsigned long prof_len, prof_shift;
93561
93562 int prof_on __read_mostly;
93563@@ -256,7 +256,7 @@ static void profile_flip_buffers(void)
93564 hits[i].pc = 0;
93565 continue;
93566 }
93567- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
93568+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
93569 hits[i].hits = hits[i].pc = 0;
93570 }
93571 }
93572@@ -317,9 +317,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
93573 * Add the current hit(s) and flush the write-queue out
93574 * to the global buffer:
93575 */
93576- atomic_add(nr_hits, &prof_buffer[pc]);
93577+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
93578 for (i = 0; i < NR_PROFILE_HIT; ++i) {
93579- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
93580+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
93581 hits[i].pc = hits[i].hits = 0;
93582 }
93583 out:
93584@@ -394,7 +394,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
93585 {
93586 unsigned long pc;
93587 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
93588- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
93589+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
93590 }
93591 #endif /* !CONFIG_SMP */
93592
93593@@ -490,7 +490,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
93594 return -EFAULT;
93595 buf++; p++; count--; read++;
93596 }
93597- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
93598+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
93599 if (copy_to_user(buf, (void *)pnt, count))
93600 return -EFAULT;
93601 read += count;
93602@@ -521,7 +521,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
93603 }
93604 #endif
93605 profile_discard_flip_buffers();
93606- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
93607+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
93608 return count;
93609 }
93610
93611diff --git a/kernel/ptrace.c b/kernel/ptrace.c
93612index adf9862..9d86345 100644
93613--- a/kernel/ptrace.c
93614+++ b/kernel/ptrace.c
93615@@ -327,7 +327,7 @@ static int ptrace_attach(struct task_struct *task, long request,
93616 if (seize)
93617 flags |= PT_SEIZED;
93618 rcu_read_lock();
93619- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
93620+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
93621 flags |= PT_PTRACE_CAP;
93622 rcu_read_unlock();
93623 task->ptrace = flags;
93624@@ -538,7 +538,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
93625 break;
93626 return -EIO;
93627 }
93628- if (copy_to_user(dst, buf, retval))
93629+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
93630 return -EFAULT;
93631 copied += retval;
93632 src += retval;
93633@@ -806,7 +806,7 @@ int ptrace_request(struct task_struct *child, long request,
93634 bool seized = child->ptrace & PT_SEIZED;
93635 int ret = -EIO;
93636 siginfo_t siginfo, *si;
93637- void __user *datavp = (void __user *) data;
93638+ void __user *datavp = (__force void __user *) data;
93639 unsigned long __user *datalp = datavp;
93640 unsigned long flags;
93641
93642@@ -1052,14 +1052,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
93643 goto out;
93644 }
93645
93646+ if (gr_handle_ptrace(child, request)) {
93647+ ret = -EPERM;
93648+ goto out_put_task_struct;
93649+ }
93650+
93651 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
93652 ret = ptrace_attach(child, request, addr, data);
93653 /*
93654 * Some architectures need to do book-keeping after
93655 * a ptrace attach.
93656 */
93657- if (!ret)
93658+ if (!ret) {
93659 arch_ptrace_attach(child);
93660+ gr_audit_ptrace(child);
93661+ }
93662 goto out_put_task_struct;
93663 }
93664
93665@@ -1087,7 +1094,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
93666 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
93667 if (copied != sizeof(tmp))
93668 return -EIO;
93669- return put_user(tmp, (unsigned long __user *)data);
93670+ return put_user(tmp, (__force unsigned long __user *)data);
93671 }
93672
93673 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
93674@@ -1181,7 +1188,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
93675 }
93676
93677 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
93678- compat_long_t, addr, compat_long_t, data)
93679+ compat_ulong_t, addr, compat_ulong_t, data)
93680 {
93681 struct task_struct *child;
93682 long ret;
93683@@ -1197,14 +1204,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
93684 goto out;
93685 }
93686
93687+ if (gr_handle_ptrace(child, request)) {
93688+ ret = -EPERM;
93689+ goto out_put_task_struct;
93690+ }
93691+
93692 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
93693 ret = ptrace_attach(child, request, addr, data);
93694 /*
93695 * Some architectures need to do book-keeping after
93696 * a ptrace attach.
93697 */
93698- if (!ret)
93699+ if (!ret) {
93700 arch_ptrace_attach(child);
93701+ gr_audit_ptrace(child);
93702+ }
93703 goto out_put_task_struct;
93704 }
93705
93706diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
93707index 948a769..5ca842b 100644
93708--- a/kernel/rcu/rcutorture.c
93709+++ b/kernel/rcu/rcutorture.c
93710@@ -124,12 +124,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
93711 rcu_torture_count) = { 0 };
93712 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
93713 rcu_torture_batch) = { 0 };
93714-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
93715-static atomic_t n_rcu_torture_alloc;
93716-static atomic_t n_rcu_torture_alloc_fail;
93717-static atomic_t n_rcu_torture_free;
93718-static atomic_t n_rcu_torture_mberror;
93719-static atomic_t n_rcu_torture_error;
93720+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
93721+static atomic_unchecked_t n_rcu_torture_alloc;
93722+static atomic_unchecked_t n_rcu_torture_alloc_fail;
93723+static atomic_unchecked_t n_rcu_torture_free;
93724+static atomic_unchecked_t n_rcu_torture_mberror;
93725+static atomic_unchecked_t n_rcu_torture_error;
93726 static long n_rcu_torture_barrier_error;
93727 static long n_rcu_torture_boost_ktrerror;
93728 static long n_rcu_torture_boost_rterror;
93729@@ -200,11 +200,11 @@ rcu_torture_alloc(void)
93730
93731 spin_lock_bh(&rcu_torture_lock);
93732 if (list_empty(&rcu_torture_freelist)) {
93733- atomic_inc(&n_rcu_torture_alloc_fail);
93734+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
93735 spin_unlock_bh(&rcu_torture_lock);
93736 return NULL;
93737 }
93738- atomic_inc(&n_rcu_torture_alloc);
93739+ atomic_inc_unchecked(&n_rcu_torture_alloc);
93740 p = rcu_torture_freelist.next;
93741 list_del_init(p);
93742 spin_unlock_bh(&rcu_torture_lock);
93743@@ -217,7 +217,7 @@ rcu_torture_alloc(void)
93744 static void
93745 rcu_torture_free(struct rcu_torture *p)
93746 {
93747- atomic_inc(&n_rcu_torture_free);
93748+ atomic_inc_unchecked(&n_rcu_torture_free);
93749 spin_lock_bh(&rcu_torture_lock);
93750 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
93751 spin_unlock_bh(&rcu_torture_lock);
93752@@ -301,7 +301,7 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
93753 i = rp->rtort_pipe_count;
93754 if (i > RCU_TORTURE_PIPE_LEN)
93755 i = RCU_TORTURE_PIPE_LEN;
93756- atomic_inc(&rcu_torture_wcount[i]);
93757+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
93758 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
93759 rp->rtort_mbtest = 0;
93760 return true;
93761@@ -808,7 +808,7 @@ rcu_torture_writer(void *arg)
93762 i = old_rp->rtort_pipe_count;
93763 if (i > RCU_TORTURE_PIPE_LEN)
93764 i = RCU_TORTURE_PIPE_LEN;
93765- atomic_inc(&rcu_torture_wcount[i]);
93766+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
93767 old_rp->rtort_pipe_count++;
93768 switch (synctype[torture_random(&rand) % nsynctypes]) {
93769 case RTWS_DEF_FREE:
93770@@ -926,7 +926,7 @@ static void rcu_torture_timer(unsigned long unused)
93771 return;
93772 }
93773 if (p->rtort_mbtest == 0)
93774- atomic_inc(&n_rcu_torture_mberror);
93775+ atomic_inc_unchecked(&n_rcu_torture_mberror);
93776 spin_lock(&rand_lock);
93777 cur_ops->read_delay(&rand);
93778 n_rcu_torture_timers++;
93779@@ -996,7 +996,7 @@ rcu_torture_reader(void *arg)
93780 continue;
93781 }
93782 if (p->rtort_mbtest == 0)
93783- atomic_inc(&n_rcu_torture_mberror);
93784+ atomic_inc_unchecked(&n_rcu_torture_mberror);
93785 cur_ops->read_delay(&rand);
93786 preempt_disable();
93787 pipe_count = p->rtort_pipe_count;
93788@@ -1054,15 +1054,15 @@ rcu_torture_printk(char *page)
93789 }
93790 page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG);
93791 page += sprintf(page,
93792- "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
93793+ "rtc: %pP ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
93794 rcu_torture_current,
93795 rcu_torture_current_version,
93796 list_empty(&rcu_torture_freelist),
93797- atomic_read(&n_rcu_torture_alloc),
93798- atomic_read(&n_rcu_torture_alloc_fail),
93799- atomic_read(&n_rcu_torture_free));
93800+ atomic_read_unchecked(&n_rcu_torture_alloc),
93801+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
93802+ atomic_read_unchecked(&n_rcu_torture_free));
93803 page += sprintf(page, "rtmbe: %d rtbke: %ld rtbre: %ld ",
93804- atomic_read(&n_rcu_torture_mberror),
93805+ atomic_read_unchecked(&n_rcu_torture_mberror),
93806 n_rcu_torture_boost_ktrerror,
93807 n_rcu_torture_boost_rterror);
93808 page += sprintf(page, "rtbf: %ld rtb: %ld nt: %ld ",
93809@@ -1075,14 +1075,14 @@ rcu_torture_printk(char *page)
93810 n_barrier_attempts,
93811 n_rcu_torture_barrier_error);
93812 page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
93813- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
93814+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
93815 n_rcu_torture_barrier_error != 0 ||
93816 n_rcu_torture_boost_ktrerror != 0 ||
93817 n_rcu_torture_boost_rterror != 0 ||
93818 n_rcu_torture_boost_failure != 0 ||
93819 i > 1) {
93820 page += sprintf(page, "!!! ");
93821- atomic_inc(&n_rcu_torture_error);
93822+ atomic_inc_unchecked(&n_rcu_torture_error);
93823 WARN_ON_ONCE(1);
93824 }
93825 page += sprintf(page, "Reader Pipe: ");
93826@@ -1096,7 +1096,7 @@ rcu_torture_printk(char *page)
93827 page += sprintf(page, "Free-Block Circulation: ");
93828 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
93829 page += sprintf(page, " %d",
93830- atomic_read(&rcu_torture_wcount[i]));
93831+ atomic_read_unchecked(&rcu_torture_wcount[i]));
93832 }
93833 page += sprintf(page, "\n");
93834 if (cur_ops->stats)
93835@@ -1461,7 +1461,7 @@ rcu_torture_cleanup(void)
93836
93837 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
93838
93839- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
93840+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
93841 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
93842 else if (torture_onoff_failures())
93843 rcu_torture_print_module_parms(cur_ops,
93844@@ -1584,18 +1584,18 @@ rcu_torture_init(void)
93845
93846 rcu_torture_current = NULL;
93847 rcu_torture_current_version = 0;
93848- atomic_set(&n_rcu_torture_alloc, 0);
93849- atomic_set(&n_rcu_torture_alloc_fail, 0);
93850- atomic_set(&n_rcu_torture_free, 0);
93851- atomic_set(&n_rcu_torture_mberror, 0);
93852- atomic_set(&n_rcu_torture_error, 0);
93853+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
93854+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
93855+ atomic_set_unchecked(&n_rcu_torture_free, 0);
93856+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
93857+ atomic_set_unchecked(&n_rcu_torture_error, 0);
93858 n_rcu_torture_barrier_error = 0;
93859 n_rcu_torture_boost_ktrerror = 0;
93860 n_rcu_torture_boost_rterror = 0;
93861 n_rcu_torture_boost_failure = 0;
93862 n_rcu_torture_boosts = 0;
93863 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
93864- atomic_set(&rcu_torture_wcount[i], 0);
93865+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
93866 for_each_possible_cpu(cpu) {
93867 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
93868 per_cpu(rcu_torture_count, cpu)[i] = 0;
93869diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
93870index c639556..cf0a0d5 100644
93871--- a/kernel/rcu/srcu.c
93872+++ b/kernel/rcu/srcu.c
93873@@ -298,9 +298,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
93874
93875 idx = ACCESS_ONCE(sp->completed) & 0x1;
93876 preempt_disable();
93877- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
93878+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
93879 smp_mb(); /* B */ /* Avoid leaking the critical section. */
93880- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
93881+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
93882 preempt_enable();
93883 return idx;
93884 }
93885diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
93886index d9efcc1..ea543e9 100644
93887--- a/kernel/rcu/tiny.c
93888+++ b/kernel/rcu/tiny.c
93889@@ -42,7 +42,7 @@
93890 /* Forward declarations for tiny_plugin.h. */
93891 struct rcu_ctrlblk;
93892 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
93893-static void rcu_process_callbacks(struct softirq_action *unused);
93894+static void rcu_process_callbacks(void);
93895 static void __call_rcu(struct rcu_head *head,
93896 void (*func)(struct rcu_head *rcu),
93897 struct rcu_ctrlblk *rcp);
93898@@ -308,7 +308,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
93899 false));
93900 }
93901
93902-static void rcu_process_callbacks(struct softirq_action *unused)
93903+static __latent_entropy void rcu_process_callbacks(void)
93904 {
93905 __rcu_process_callbacks(&rcu_sched_ctrlblk);
93906 __rcu_process_callbacks(&rcu_bh_ctrlblk);
93907diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
93908index 858c565..7efd915 100644
93909--- a/kernel/rcu/tiny_plugin.h
93910+++ b/kernel/rcu/tiny_plugin.h
93911@@ -152,17 +152,17 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
93912 dump_stack();
93913 }
93914 if (*rcp->curtail && ULONG_CMP_GE(j, js))
93915- ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
93916+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies +
93917 3 * rcu_jiffies_till_stall_check() + 3;
93918 else if (ULONG_CMP_GE(j, js))
93919- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
93920+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
93921 }
93922
93923 static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
93924 {
93925 rcp->ticks_this_gp = 0;
93926 rcp->gp_start = jiffies;
93927- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
93928+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
93929 }
93930
93931 static void check_cpu_stalls(void)
93932diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
93933index 625d0b0..0bce4d6 100644
93934--- a/kernel/rcu/tree.c
93935+++ b/kernel/rcu/tree.c
93936@@ -263,7 +263,7 @@ static void rcu_momentary_dyntick_idle(void)
93937 */
93938 rdtp = this_cpu_ptr(&rcu_dynticks);
93939 smp_mb__before_atomic(); /* Earlier stuff before QS. */
93940- atomic_add(2, &rdtp->dynticks); /* QS. */
93941+ atomic_add_unchecked(2, &rdtp->dynticks); /* QS. */
93942 smp_mb__after_atomic(); /* Later stuff after QS. */
93943 break;
93944 }
93945@@ -523,9 +523,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
93946 rcu_prepare_for_idle(smp_processor_id());
93947 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
93948 smp_mb__before_atomic(); /* See above. */
93949- atomic_inc(&rdtp->dynticks);
93950+ atomic_inc_unchecked(&rdtp->dynticks);
93951 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
93952- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
93953+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
93954
93955 /*
93956 * It is illegal to enter an extended quiescent state while
93957@@ -643,10 +643,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
93958 int user)
93959 {
93960 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
93961- atomic_inc(&rdtp->dynticks);
93962+ atomic_inc_unchecked(&rdtp->dynticks);
93963 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
93964 smp_mb__after_atomic(); /* See above. */
93965- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
93966+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
93967 rcu_cleanup_after_idle(smp_processor_id());
93968 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
93969 if (!user && !is_idle_task(current)) {
93970@@ -767,14 +767,14 @@ void rcu_nmi_enter(void)
93971 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
93972
93973 if (rdtp->dynticks_nmi_nesting == 0 &&
93974- (atomic_read(&rdtp->dynticks) & 0x1))
93975+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
93976 return;
93977 rdtp->dynticks_nmi_nesting++;
93978 smp_mb__before_atomic(); /* Force delay from prior write. */
93979- atomic_inc(&rdtp->dynticks);
93980+ atomic_inc_unchecked(&rdtp->dynticks);
93981 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
93982 smp_mb__after_atomic(); /* See above. */
93983- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
93984+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
93985 }
93986
93987 /**
93988@@ -793,9 +793,9 @@ void rcu_nmi_exit(void)
93989 return;
93990 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
93991 smp_mb__before_atomic(); /* See above. */
93992- atomic_inc(&rdtp->dynticks);
93993+ atomic_inc_unchecked(&rdtp->dynticks);
93994 smp_mb__after_atomic(); /* Force delay to next write. */
93995- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
93996+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
93997 }
93998
93999 /**
94000@@ -808,7 +808,7 @@ void rcu_nmi_exit(void)
94001 */
94002 bool notrace __rcu_is_watching(void)
94003 {
94004- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
94005+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
94006 }
94007
94008 /**
94009@@ -891,7 +891,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
94010 static int dyntick_save_progress_counter(struct rcu_data *rdp,
94011 bool *isidle, unsigned long *maxj)
94012 {
94013- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
94014+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
94015 rcu_sysidle_check_cpu(rdp, isidle, maxj);
94016 if ((rdp->dynticks_snap & 0x1) == 0) {
94017 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
94018@@ -920,7 +920,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
94019 int *rcrmp;
94020 unsigned int snap;
94021
94022- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
94023+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
94024 snap = (unsigned int)rdp->dynticks_snap;
94025
94026 /*
94027@@ -983,10 +983,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
94028 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
94029 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
94030 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
94031- ACCESS_ONCE(rdp->cond_resched_completed) =
94032+ ACCESS_ONCE_RW(rdp->cond_resched_completed) =
94033 ACCESS_ONCE(rdp->mynode->completed);
94034 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
94035- ACCESS_ONCE(*rcrmp) =
94036+ ACCESS_ONCE_RW(*rcrmp) =
94037 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
94038 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
94039 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
94040@@ -1008,7 +1008,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
94041 rsp->gp_start = j;
94042 smp_wmb(); /* Record start time before stall time. */
94043 j1 = rcu_jiffies_till_stall_check();
94044- ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
94045+ ACCESS_ONCE_RW(rsp->jiffies_stall) = j + j1;
94046 rsp->jiffies_resched = j + j1 / 2;
94047 }
94048
94049@@ -1052,7 +1052,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
94050 raw_spin_unlock_irqrestore(&rnp->lock, flags);
94051 return;
94052 }
94053- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
94054+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
94055 raw_spin_unlock_irqrestore(&rnp->lock, flags);
94056
94057 /*
94058@@ -1130,7 +1130,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
94059
94060 raw_spin_lock_irqsave(&rnp->lock, flags);
94061 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
94062- ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
94063+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies +
94064 3 * rcu_jiffies_till_stall_check() + 3;
94065 raw_spin_unlock_irqrestore(&rnp->lock, flags);
94066
94067@@ -1214,7 +1214,7 @@ void rcu_cpu_stall_reset(void)
94068 struct rcu_state *rsp;
94069
94070 for_each_rcu_flavor(rsp)
94071- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
94072+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
94073 }
94074
94075 /*
94076@@ -1594,7 +1594,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
94077 raw_spin_unlock_irq(&rnp->lock);
94078 return 0;
94079 }
94080- ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
94081+ ACCESS_ONCE_RW(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
94082
94083 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
94084 /*
94085@@ -1635,9 +1635,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
94086 rdp = this_cpu_ptr(rsp->rda);
94087 rcu_preempt_check_blocked_tasks(rnp);
94088 rnp->qsmask = rnp->qsmaskinit;
94089- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
94090+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
94091 WARN_ON_ONCE(rnp->completed != rsp->completed);
94092- ACCESS_ONCE(rnp->completed) = rsp->completed;
94093+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
94094 if (rnp == rdp->mynode)
94095 (void)__note_gp_changes(rsp, rnp, rdp);
94096 rcu_preempt_boost_start_gp(rnp);
94097@@ -1687,7 +1687,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
94098 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
94099 raw_spin_lock_irq(&rnp->lock);
94100 smp_mb__after_unlock_lock();
94101- ACCESS_ONCE(rsp->gp_flags) &= ~RCU_GP_FLAG_FQS;
94102+ ACCESS_ONCE_RW(rsp->gp_flags) &= ~RCU_GP_FLAG_FQS;
94103 raw_spin_unlock_irq(&rnp->lock);
94104 }
94105 return fqs_state;
94106@@ -1732,7 +1732,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
94107 rcu_for_each_node_breadth_first(rsp, rnp) {
94108 raw_spin_lock_irq(&rnp->lock);
94109 smp_mb__after_unlock_lock();
94110- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
94111+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
94112 rdp = this_cpu_ptr(rsp->rda);
94113 if (rnp == rdp->mynode)
94114 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
94115@@ -1747,14 +1747,14 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
94116 rcu_nocb_gp_set(rnp, nocb);
94117
94118 /* Declare grace period done. */
94119- ACCESS_ONCE(rsp->completed) = rsp->gpnum;
94120+ ACCESS_ONCE_RW(rsp->completed) = rsp->gpnum;
94121 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
94122 rsp->fqs_state = RCU_GP_IDLE;
94123 rdp = this_cpu_ptr(rsp->rda);
94124 /* Advance CBs to reduce false positives below. */
94125 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
94126 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
94127- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
94128+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
94129 trace_rcu_grace_period(rsp->name,
94130 ACCESS_ONCE(rsp->gpnum),
94131 TPS("newreq"));
94132@@ -1879,7 +1879,7 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
94133 */
94134 return false;
94135 }
94136- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
94137+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
94138 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
94139 TPS("newreq"));
94140
94141@@ -2100,7 +2100,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
94142 rsp->qlen += rdp->qlen;
94143 rdp->n_cbs_orphaned += rdp->qlen;
94144 rdp->qlen_lazy = 0;
94145- ACCESS_ONCE(rdp->qlen) = 0;
94146+ ACCESS_ONCE_RW(rdp->qlen) = 0;
94147 }
94148
94149 /*
94150@@ -2347,7 +2347,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
94151 }
94152 smp_mb(); /* List handling before counting for rcu_barrier(). */
94153 rdp->qlen_lazy -= count_lazy;
94154- ACCESS_ONCE(rdp->qlen) -= count;
94155+ ACCESS_ONCE_RW(rdp->qlen) -= count;
94156 rdp->n_cbs_invoked += count;
94157
94158 /* Reinstate batch limit if we have worked down the excess. */
94159@@ -2492,7 +2492,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
94160 if (rnp_old != NULL)
94161 raw_spin_unlock(&rnp_old->fqslock);
94162 if (ret) {
94163- ACCESS_ONCE(rsp->n_force_qs_lh)++;
94164+ ACCESS_ONCE_RW(rsp->n_force_qs_lh)++;
94165 return;
94166 }
94167 rnp_old = rnp;
94168@@ -2504,11 +2504,11 @@ static void force_quiescent_state(struct rcu_state *rsp)
94169 smp_mb__after_unlock_lock();
94170 raw_spin_unlock(&rnp_old->fqslock);
94171 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
94172- ACCESS_ONCE(rsp->n_force_qs_lh)++;
94173+ ACCESS_ONCE_RW(rsp->n_force_qs_lh)++;
94174 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
94175 return; /* Someone beat us to it. */
94176 }
94177- ACCESS_ONCE(rsp->gp_flags) |= RCU_GP_FLAG_FQS;
94178+ ACCESS_ONCE_RW(rsp->gp_flags) |= RCU_GP_FLAG_FQS;
94179 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
94180 wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */
94181 }
94182@@ -2553,7 +2553,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
94183 /*
94184 * Do RCU core processing for the current CPU.
94185 */
94186-static void rcu_process_callbacks(struct softirq_action *unused)
94187+static void rcu_process_callbacks(void)
94188 {
94189 struct rcu_state *rsp;
94190
94191@@ -2665,7 +2665,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
94192 WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
94193 if (debug_rcu_head_queue(head)) {
94194 /* Probable double call_rcu(), so leak the callback. */
94195- ACCESS_ONCE(head->func) = rcu_leak_callback;
94196+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
94197 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
94198 return;
94199 }
94200@@ -2693,7 +2693,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
94201 local_irq_restore(flags);
94202 return;
94203 }
94204- ACCESS_ONCE(rdp->qlen)++;
94205+ ACCESS_ONCE_RW(rdp->qlen)++;
94206 if (lazy)
94207 rdp->qlen_lazy++;
94208 else
94209@@ -2968,11 +2968,11 @@ void synchronize_sched_expedited(void)
94210 * counter wrap on a 32-bit system. Quite a few more CPUs would of
94211 * course be required on a 64-bit system.
94212 */
94213- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
94214+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
94215 (ulong)atomic_long_read(&rsp->expedited_done) +
94216 ULONG_MAX / 8)) {
94217 synchronize_sched();
94218- atomic_long_inc(&rsp->expedited_wrap);
94219+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
94220 return;
94221 }
94222
94223@@ -2980,7 +2980,7 @@ void synchronize_sched_expedited(void)
94224 * Take a ticket. Note that atomic_inc_return() implies a
94225 * full memory barrier.
94226 */
94227- snap = atomic_long_inc_return(&rsp->expedited_start);
94228+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
94229 firstsnap = snap;
94230 get_online_cpus();
94231 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
94232@@ -2993,14 +2993,14 @@ void synchronize_sched_expedited(void)
94233 synchronize_sched_expedited_cpu_stop,
94234 NULL) == -EAGAIN) {
94235 put_online_cpus();
94236- atomic_long_inc(&rsp->expedited_tryfail);
94237+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
94238
94239 /* Check to see if someone else did our work for us. */
94240 s = atomic_long_read(&rsp->expedited_done);
94241 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
94242 /* ensure test happens before caller kfree */
94243 smp_mb__before_atomic(); /* ^^^ */
94244- atomic_long_inc(&rsp->expedited_workdone1);
94245+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
94246 return;
94247 }
94248
94249@@ -3009,7 +3009,7 @@ void synchronize_sched_expedited(void)
94250 udelay(trycount * num_online_cpus());
94251 } else {
94252 wait_rcu_gp(call_rcu_sched);
94253- atomic_long_inc(&rsp->expedited_normal);
94254+ atomic_long_inc_unchecked(&rsp->expedited_normal);
94255 return;
94256 }
94257
94258@@ -3018,7 +3018,7 @@ void synchronize_sched_expedited(void)
94259 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
94260 /* ensure test happens before caller kfree */
94261 smp_mb__before_atomic(); /* ^^^ */
94262- atomic_long_inc(&rsp->expedited_workdone2);
94263+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
94264 return;
94265 }
94266
94267@@ -3030,10 +3030,10 @@ void synchronize_sched_expedited(void)
94268 * period works for us.
94269 */
94270 get_online_cpus();
94271- snap = atomic_long_read(&rsp->expedited_start);
94272+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
94273 smp_mb(); /* ensure read is before try_stop_cpus(). */
94274 }
94275- atomic_long_inc(&rsp->expedited_stoppedcpus);
94276+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
94277
94278 /*
94279 * Everyone up to our most recent fetch is covered by our grace
94280@@ -3042,16 +3042,16 @@ void synchronize_sched_expedited(void)
94281 * than we did already did their update.
94282 */
94283 do {
94284- atomic_long_inc(&rsp->expedited_done_tries);
94285+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
94286 s = atomic_long_read(&rsp->expedited_done);
94287 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
94288 /* ensure test happens before caller kfree */
94289 smp_mb__before_atomic(); /* ^^^ */
94290- atomic_long_inc(&rsp->expedited_done_lost);
94291+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
94292 break;
94293 }
94294 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
94295- atomic_long_inc(&rsp->expedited_done_exit);
94296+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
94297
94298 put_online_cpus();
94299 }
94300@@ -3257,7 +3257,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
94301 * ACCESS_ONCE() to prevent the compiler from speculating
94302 * the increment to precede the early-exit check.
94303 */
94304- ACCESS_ONCE(rsp->n_barrier_done)++;
94305+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
94306 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
94307 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
94308 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
94309@@ -3307,7 +3307,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
94310
94311 /* Increment ->n_barrier_done to prevent duplicate work. */
94312 smp_mb(); /* Keep increment after above mechanism. */
94313- ACCESS_ONCE(rsp->n_barrier_done)++;
94314+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
94315 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
94316 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
94317 smp_mb(); /* Keep increment before caller's subsequent code. */
94318@@ -3352,10 +3352,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
94319 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
94320 init_callback_list(rdp);
94321 rdp->qlen_lazy = 0;
94322- ACCESS_ONCE(rdp->qlen) = 0;
94323+ ACCESS_ONCE_RW(rdp->qlen) = 0;
94324 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
94325 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
94326- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
94327+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
94328 rdp->cpu = cpu;
94329 rdp->rsp = rsp;
94330 rcu_boot_init_nocb_percpu_data(rdp);
94331@@ -3388,8 +3388,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
94332 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
94333 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
94334 rcu_sysidle_init_percpu_data(rdp->dynticks);
94335- atomic_set(&rdp->dynticks->dynticks,
94336- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
94337+ atomic_set_unchecked(&rdp->dynticks->dynticks,
94338+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
94339 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
94340
94341 /* Add CPU to rcu_node bitmasks. */
94342diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
94343index 0f69a79..c85c2dc 100644
94344--- a/kernel/rcu/tree.h
94345+++ b/kernel/rcu/tree.h
94346@@ -87,11 +87,11 @@ struct rcu_dynticks {
94347 long long dynticks_nesting; /* Track irq/process nesting level. */
94348 /* Process level is worth LLONG_MAX/2. */
94349 int dynticks_nmi_nesting; /* Track NMI nesting level. */
94350- atomic_t dynticks; /* Even value for idle, else odd. */
94351+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
94352 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
94353 long long dynticks_idle_nesting;
94354 /* irq/process nesting level from idle. */
94355- atomic_t dynticks_idle; /* Even value for idle, else odd. */
94356+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
94357 /* "Idle" excludes userspace execution. */
94358 unsigned long dynticks_idle_jiffies;
94359 /* End of last non-NMI non-idle period. */
94360@@ -435,17 +435,17 @@ struct rcu_state {
94361 /* _rcu_barrier(). */
94362 /* End of fields guarded by barrier_mutex. */
94363
94364- atomic_long_t expedited_start; /* Starting ticket. */
94365- atomic_long_t expedited_done; /* Done ticket. */
94366- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
94367- atomic_long_t expedited_tryfail; /* # acquisition failures. */
94368- atomic_long_t expedited_workdone1; /* # done by others #1. */
94369- atomic_long_t expedited_workdone2; /* # done by others #2. */
94370- atomic_long_t expedited_normal; /* # fallbacks to normal. */
94371- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
94372- atomic_long_t expedited_done_tries; /* # tries to update _done. */
94373- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
94374- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
94375+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
94376+ atomic_long_t expedited_done; /* Done ticket. */
94377+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
94378+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
94379+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
94380+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
94381+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
94382+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
94383+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
94384+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
94385+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
94386
94387 unsigned long jiffies_force_qs; /* Time at which to invoke */
94388 /* force_quiescent_state(). */
94389diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
94390index 02ac0fb..4aa4a36 100644
94391--- a/kernel/rcu/tree_plugin.h
94392+++ b/kernel/rcu/tree_plugin.h
94393@@ -735,7 +735,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
94394 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
94395 {
94396 return !rcu_preempted_readers_exp(rnp) &&
94397- ACCESS_ONCE(rnp->expmask) == 0;
94398+ ACCESS_ONCE_RW(rnp->expmask) == 0;
94399 }
94400
94401 /*
94402@@ -897,7 +897,7 @@ void synchronize_rcu_expedited(void)
94403
94404 /* Clean up and exit. */
94405 smp_mb(); /* ensure expedited GP seen before counter increment. */
94406- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
94407+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
94408 unlock_mb_ret:
94409 mutex_unlock(&sync_rcu_preempt_exp_mutex);
94410 mb_ret:
94411@@ -1447,7 +1447,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
94412 free_cpumask_var(cm);
94413 }
94414
94415-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
94416+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
94417 .store = &rcu_cpu_kthread_task,
94418 .thread_should_run = rcu_cpu_kthread_should_run,
94419 .thread_fn = rcu_cpu_kthread,
94420@@ -1926,7 +1926,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
94421 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
94422 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
94423 cpu, ticks_value, ticks_title,
94424- atomic_read(&rdtp->dynticks) & 0xfff,
94425+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
94426 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
94427 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
94428 fast_no_hz);
94429@@ -2079,7 +2079,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
94430
94431 /* Enqueue the callback on the nocb list and update counts. */
94432 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
94433- ACCESS_ONCE(*old_rhpp) = rhp;
94434+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
94435 atomic_long_add(rhcount, &rdp->nocb_q_count);
94436 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
94437
94438@@ -2255,12 +2255,12 @@ static int rcu_nocb_kthread(void *arg)
94439 * Extract queued callbacks, update counts, and wait
94440 * for a grace period to elapse.
94441 */
94442- ACCESS_ONCE(rdp->nocb_head) = NULL;
94443+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
94444 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
94445 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
94446 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
94447- ACCESS_ONCE(rdp->nocb_p_count) += c;
94448- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
94449+ ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
94450+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
94451 rcu_nocb_wait_gp(rdp);
94452
94453 /* Each pass through the following loop invokes a callback. */
94454@@ -2286,8 +2286,8 @@ static int rcu_nocb_kthread(void *arg)
94455 list = next;
94456 }
94457 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
94458- ACCESS_ONCE(rdp->nocb_p_count) -= c;
94459- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
94460+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
94461+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
94462 rdp->n_nocbs_invoked += c;
94463 }
94464 return 0;
94465@@ -2304,7 +2304,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
94466 {
94467 if (!rcu_nocb_need_deferred_wakeup(rdp))
94468 return;
94469- ACCESS_ONCE(rdp->nocb_defer_wakeup) = false;
94470+ ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = false;
94471 wake_up(&rdp->nocb_wq);
94472 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty"));
94473 }
94474@@ -2330,7 +2330,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
94475 t = kthread_run(rcu_nocb_kthread, rdp,
94476 "rcuo%c/%d", rsp->abbr, cpu);
94477 BUG_ON(IS_ERR(t));
94478- ACCESS_ONCE(rdp->nocb_kthread) = t;
94479+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
94480 }
94481 }
94482
94483@@ -2461,11 +2461,11 @@ static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
94484
94485 /* Record start of fully idle period. */
94486 j = jiffies;
94487- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
94488+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
94489 smp_mb__before_atomic();
94490- atomic_inc(&rdtp->dynticks_idle);
94491+ atomic_inc_unchecked(&rdtp->dynticks_idle);
94492 smp_mb__after_atomic();
94493- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
94494+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
94495 }
94496
94497 /*
94498@@ -2530,9 +2530,9 @@ static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
94499
94500 /* Record end of idle period. */
94501 smp_mb__before_atomic();
94502- atomic_inc(&rdtp->dynticks_idle);
94503+ atomic_inc_unchecked(&rdtp->dynticks_idle);
94504 smp_mb__after_atomic();
94505- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
94506+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
94507
94508 /*
94509 * If we are the timekeeping CPU, we are permitted to be non-idle
94510@@ -2573,7 +2573,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
94511 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
94512
94513 /* Pick up current idle and NMI-nesting counter and check. */
94514- cur = atomic_read(&rdtp->dynticks_idle);
94515+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
94516 if (cur & 0x1) {
94517 *isidle = false; /* We are not idle! */
94518 return;
94519@@ -2622,7 +2622,7 @@ static void rcu_sysidle(unsigned long j)
94520 case RCU_SYSIDLE_NOT:
94521
94522 /* First time all are idle, so note a short idle period. */
94523- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
94524+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
94525 break;
94526
94527 case RCU_SYSIDLE_SHORT:
94528@@ -2660,7 +2660,7 @@ static void rcu_sysidle_cancel(void)
94529 {
94530 smp_mb();
94531 if (full_sysidle_state > RCU_SYSIDLE_SHORT)
94532- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
94533+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
94534 }
94535
94536 /*
94537@@ -2708,7 +2708,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
94538 smp_mb(); /* grace period precedes setting inuse. */
94539
94540 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
94541- ACCESS_ONCE(rshp->inuse) = 0;
94542+ ACCESS_ONCE_RW(rshp->inuse) = 0;
94543 }
94544
94545 /*
94546diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
94547index 5cdc62e..cc52e88 100644
94548--- a/kernel/rcu/tree_trace.c
94549+++ b/kernel/rcu/tree_trace.c
94550@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
94551 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
94552 rdp->passed_quiesce, rdp->qs_pending);
94553 seq_printf(m, " dt=%d/%llx/%d df=%lu",
94554- atomic_read(&rdp->dynticks->dynticks),
94555+ atomic_read_unchecked(&rdp->dynticks->dynticks),
94556 rdp->dynticks->dynticks_nesting,
94557 rdp->dynticks->dynticks_nmi_nesting,
94558 rdp->dynticks_fqs);
94559@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
94560 struct rcu_state *rsp = (struct rcu_state *)m->private;
94561
94562 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
94563- atomic_long_read(&rsp->expedited_start),
94564+ atomic_long_read_unchecked(&rsp->expedited_start),
94565 atomic_long_read(&rsp->expedited_done),
94566- atomic_long_read(&rsp->expedited_wrap),
94567- atomic_long_read(&rsp->expedited_tryfail),
94568- atomic_long_read(&rsp->expedited_workdone1),
94569- atomic_long_read(&rsp->expedited_workdone2),
94570- atomic_long_read(&rsp->expedited_normal),
94571- atomic_long_read(&rsp->expedited_stoppedcpus),
94572- atomic_long_read(&rsp->expedited_done_tries),
94573- atomic_long_read(&rsp->expedited_done_lost),
94574- atomic_long_read(&rsp->expedited_done_exit));
94575+ atomic_long_read_unchecked(&rsp->expedited_wrap),
94576+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
94577+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
94578+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
94579+ atomic_long_read_unchecked(&rsp->expedited_normal),
94580+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
94581+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
94582+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
94583+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
94584 return 0;
94585 }
94586
94587diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
94588index bc78835..7691a45 100644
94589--- a/kernel/rcu/update.c
94590+++ b/kernel/rcu/update.c
94591@@ -311,10 +311,10 @@ int rcu_jiffies_till_stall_check(void)
94592 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
94593 */
94594 if (till_stall_check < 3) {
94595- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
94596+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
94597 till_stall_check = 3;
94598 } else if (till_stall_check > 300) {
94599- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
94600+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
94601 till_stall_check = 300;
94602 }
94603 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
94604diff --git a/kernel/resource.c b/kernel/resource.c
94605index 3c2237a..4568d96 100644
94606--- a/kernel/resource.c
94607+++ b/kernel/resource.c
94608@@ -152,8 +152,18 @@ static const struct file_operations proc_iomem_operations = {
94609
94610 static int __init ioresources_init(void)
94611 {
94612+#ifdef CONFIG_GRKERNSEC_PROC_ADD
94613+#ifdef CONFIG_GRKERNSEC_PROC_USER
94614+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
94615+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
94616+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
94617+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
94618+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
94619+#endif
94620+#else
94621 proc_create("ioports", 0, NULL, &proc_ioports_operations);
94622 proc_create("iomem", 0, NULL, &proc_iomem_operations);
94623+#endif
94624 return 0;
94625 }
94626 __initcall(ioresources_init);
94627diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
94628index e73efba..c9bfbd4 100644
94629--- a/kernel/sched/auto_group.c
94630+++ b/kernel/sched/auto_group.c
94631@@ -11,7 +11,7 @@
94632
94633 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
94634 static struct autogroup autogroup_default;
94635-static atomic_t autogroup_seq_nr;
94636+static atomic_unchecked_t autogroup_seq_nr;
94637
94638 void __init autogroup_init(struct task_struct *init_task)
94639 {
94640@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
94641
94642 kref_init(&ag->kref);
94643 init_rwsem(&ag->lock);
94644- ag->id = atomic_inc_return(&autogroup_seq_nr);
94645+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
94646 ag->tg = tg;
94647 #ifdef CONFIG_RT_GROUP_SCHED
94648 /*
94649diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
94650index a63f4dc..349bbb0 100644
94651--- a/kernel/sched/completion.c
94652+++ b/kernel/sched/completion.c
94653@@ -204,7 +204,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
94654 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
94655 * or number of jiffies left till timeout) if completed.
94656 */
94657-long __sched
94658+long __sched __intentional_overflow(-1)
94659 wait_for_completion_interruptible_timeout(struct completion *x,
94660 unsigned long timeout)
94661 {
94662@@ -221,7 +221,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
94663 *
94664 * Return: -ERESTARTSYS if interrupted, 0 if completed.
94665 */
94666-int __sched wait_for_completion_killable(struct completion *x)
94667+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
94668 {
94669 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
94670 if (t == -ERESTARTSYS)
94671@@ -242,7 +242,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
94672 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
94673 * or number of jiffies left till timeout) if completed.
94674 */
94675-long __sched
94676+long __sched __intentional_overflow(-1)
94677 wait_for_completion_killable_timeout(struct completion *x,
94678 unsigned long timeout)
94679 {
94680diff --git a/kernel/sched/core.c b/kernel/sched/core.c
94681index 0acf96b..80ba955 100644
94682--- a/kernel/sched/core.c
94683+++ b/kernel/sched/core.c
94684@@ -1849,7 +1849,7 @@ void set_numabalancing_state(bool enabled)
94685 int sysctl_numa_balancing(struct ctl_table *table, int write,
94686 void __user *buffer, size_t *lenp, loff_t *ppos)
94687 {
94688- struct ctl_table t;
94689+ ctl_table_no_const t;
94690 int err;
94691 int state = numabalancing_enabled;
94692
94693@@ -2312,8 +2312,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
94694 next->active_mm = oldmm;
94695 atomic_inc(&oldmm->mm_count);
94696 enter_lazy_tlb(oldmm, next);
94697- } else
94698+ } else {
94699 switch_mm(oldmm, mm, next);
94700+ populate_stack();
94701+ }
94702
94703 if (!prev->mm) {
94704 prev->active_mm = NULL;
94705@@ -3081,6 +3083,8 @@ int can_nice(const struct task_struct *p, const int nice)
94706 /* convert nice value [19,-20] to rlimit style value [1,40] */
94707 int nice_rlim = nice_to_rlimit(nice);
94708
94709+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
94710+
94711 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
94712 capable(CAP_SYS_NICE));
94713 }
94714@@ -3107,7 +3111,8 @@ SYSCALL_DEFINE1(nice, int, increment)
94715 nice = task_nice(current) + increment;
94716
94717 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
94718- if (increment < 0 && !can_nice(current, nice))
94719+ if (increment < 0 && (!can_nice(current, nice) ||
94720+ gr_handle_chroot_nice()))
94721 return -EPERM;
94722
94723 retval = security_task_setnice(current, nice);
94724@@ -3380,6 +3385,7 @@ recheck:
94725 if (policy != p->policy && !rlim_rtprio)
94726 return -EPERM;
94727
94728+ gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
94729 /* can't increase priority */
94730 if (attr->sched_priority > p->rt_priority &&
94731 attr->sched_priority > rlim_rtprio)
94732@@ -4772,6 +4778,7 @@ void idle_task_exit(void)
94733
94734 if (mm != &init_mm) {
94735 switch_mm(mm, &init_mm, current);
94736+ populate_stack();
94737 finish_arch_post_lock_switch();
94738 }
94739 mmdrop(mm);
94740@@ -4867,7 +4874,7 @@ static void migrate_tasks(unsigned int dead_cpu)
94741
94742 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
94743
94744-static struct ctl_table sd_ctl_dir[] = {
94745+static ctl_table_no_const sd_ctl_dir[] __read_only = {
94746 {
94747 .procname = "sched_domain",
94748 .mode = 0555,
94749@@ -4884,17 +4891,17 @@ static struct ctl_table sd_ctl_root[] = {
94750 {}
94751 };
94752
94753-static struct ctl_table *sd_alloc_ctl_entry(int n)
94754+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
94755 {
94756- struct ctl_table *entry =
94757+ ctl_table_no_const *entry =
94758 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
94759
94760 return entry;
94761 }
94762
94763-static void sd_free_ctl_entry(struct ctl_table **tablep)
94764+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
94765 {
94766- struct ctl_table *entry;
94767+ ctl_table_no_const *entry;
94768
94769 /*
94770 * In the intermediate directories, both the child directory and
94771@@ -4902,22 +4909,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
94772 * will always be set. In the lowest directory the names are
94773 * static strings and all have proc handlers.
94774 */
94775- for (entry = *tablep; entry->mode; entry++) {
94776- if (entry->child)
94777- sd_free_ctl_entry(&entry->child);
94778+ for (entry = tablep; entry->mode; entry++) {
94779+ if (entry->child) {
94780+ sd_free_ctl_entry(entry->child);
94781+ pax_open_kernel();
94782+ entry->child = NULL;
94783+ pax_close_kernel();
94784+ }
94785 if (entry->proc_handler == NULL)
94786 kfree(entry->procname);
94787 }
94788
94789- kfree(*tablep);
94790- *tablep = NULL;
94791+ kfree(tablep);
94792 }
94793
94794 static int min_load_idx = 0;
94795 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
94796
94797 static void
94798-set_table_entry(struct ctl_table *entry,
94799+set_table_entry(ctl_table_no_const *entry,
94800 const char *procname, void *data, int maxlen,
94801 umode_t mode, proc_handler *proc_handler,
94802 bool load_idx)
94803@@ -4937,7 +4947,7 @@ set_table_entry(struct ctl_table *entry,
94804 static struct ctl_table *
94805 sd_alloc_ctl_domain_table(struct sched_domain *sd)
94806 {
94807- struct ctl_table *table = sd_alloc_ctl_entry(14);
94808+ ctl_table_no_const *table = sd_alloc_ctl_entry(14);
94809
94810 if (table == NULL)
94811 return NULL;
94812@@ -4975,9 +4985,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
94813 return table;
94814 }
94815
94816-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
94817+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
94818 {
94819- struct ctl_table *entry, *table;
94820+ ctl_table_no_const *entry, *table;
94821 struct sched_domain *sd;
94822 int domain_num = 0, i;
94823 char buf[32];
94824@@ -5004,11 +5014,13 @@ static struct ctl_table_header *sd_sysctl_header;
94825 static void register_sched_domain_sysctl(void)
94826 {
94827 int i, cpu_num = num_possible_cpus();
94828- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
94829+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
94830 char buf[32];
94831
94832 WARN_ON(sd_ctl_dir[0].child);
94833+ pax_open_kernel();
94834 sd_ctl_dir[0].child = entry;
94835+ pax_close_kernel();
94836
94837 if (entry == NULL)
94838 return;
94839@@ -5031,8 +5043,12 @@ static void unregister_sched_domain_sysctl(void)
94840 if (sd_sysctl_header)
94841 unregister_sysctl_table(sd_sysctl_header);
94842 sd_sysctl_header = NULL;
94843- if (sd_ctl_dir[0].child)
94844- sd_free_ctl_entry(&sd_ctl_dir[0].child);
94845+ if (sd_ctl_dir[0].child) {
94846+ sd_free_ctl_entry(sd_ctl_dir[0].child);
94847+ pax_open_kernel();
94848+ sd_ctl_dir[0].child = NULL;
94849+ pax_close_kernel();
94850+ }
94851 }
94852 #else
94853 static void register_sched_domain_sysctl(void)
94854diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
94855index fea7d33..84faa94 100644
94856--- a/kernel/sched/fair.c
94857+++ b/kernel/sched/fair.c
94858@@ -1857,7 +1857,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
94859
94860 static void reset_ptenuma_scan(struct task_struct *p)
94861 {
94862- ACCESS_ONCE(p->mm->numa_scan_seq)++;
94863+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
94864 p->mm->numa_scan_offset = 0;
94865 }
94866
94867@@ -7289,7 +7289,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
94868 * run_rebalance_domains is triggered when needed from the scheduler tick.
94869 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
94870 */
94871-static void run_rebalance_domains(struct softirq_action *h)
94872+static __latent_entropy void run_rebalance_domains(void)
94873 {
94874 struct rq *this_rq = this_rq();
94875 enum cpu_idle_type idle = this_rq->idle_balance ?
94876diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
94877index 31cc02e..734fb85 100644
94878--- a/kernel/sched/sched.h
94879+++ b/kernel/sched/sched.h
94880@@ -1153,7 +1153,7 @@ struct sched_class {
94881 #ifdef CONFIG_FAIR_GROUP_SCHED
94882 void (*task_move_group) (struct task_struct *p, int on_rq);
94883 #endif
94884-};
94885+} __do_const;
94886
94887 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
94888 {
94889diff --git a/kernel/seccomp.c b/kernel/seccomp.c
94890index 301bbc2..eda2da3 100644
94891--- a/kernel/seccomp.c
94892+++ b/kernel/seccomp.c
94893@@ -39,7 +39,7 @@
94894 * is only needed for handling filters shared across tasks.
94895 * @prev: points to a previously installed, or inherited, filter
94896 * @len: the number of instructions in the program
94897- * @insnsi: the BPF program instructions to evaluate
94898+ * @insns: the BPF program instructions to evaluate
94899 *
94900 * seccomp_filter objects are organized in a tree linked via the @prev
94901 * pointer. For any task, it appears to be a singly-linked list starting
94902@@ -54,32 +54,61 @@
94903 struct seccomp_filter {
94904 atomic_t usage;
94905 struct seccomp_filter *prev;
94906- struct sk_filter *prog;
94907+ unsigned short len; /* Instruction count */
94908+ struct sock_filter insns[];
94909 };
94910
94911 /* Limit any path through the tree to 256KB worth of instructions. */
94912 #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
94913
94914-/*
94915+/**
94916+ * get_u32 - returns a u32 offset into data
94917+ * @data: a unsigned 64 bit value
94918+ * @index: 0 or 1 to return the first or second 32-bits
94919+ *
94920+ * This inline exists to hide the length of unsigned long. If a 32-bit
94921+ * unsigned long is passed in, it will be extended and the top 32-bits will be
94922+ * 0. If it is a 64-bit unsigned long, then whatever data is resident will be
94923+ * properly returned.
94924+ *
94925 * Endianness is explicitly ignored and left for BPF program authors to manage
94926 * as per the specific architecture.
94927 */
94928-static void populate_seccomp_data(struct seccomp_data *sd)
94929+static inline u32 get_u32(u64 data, int index)
94930 {
94931- struct task_struct *task = current;
94932- struct pt_regs *regs = task_pt_regs(task);
94933- unsigned long args[6];
94934+ return ((u32 *)&data)[index];
94935+}
94936
94937- sd->nr = syscall_get_nr(task, regs);
94938- sd->arch = syscall_get_arch();
94939- syscall_get_arguments(task, regs, 0, 6, args);
94940- sd->args[0] = args[0];
94941- sd->args[1] = args[1];
94942- sd->args[2] = args[2];
94943- sd->args[3] = args[3];
94944- sd->args[4] = args[4];
94945- sd->args[5] = args[5];
94946- sd->instruction_pointer = KSTK_EIP(task);
94947+/* Helper for bpf_load below. */
94948+#define BPF_DATA(_name) offsetof(struct seccomp_data, _name)
94949+/**
94950+ * bpf_load: checks and returns a pointer to the requested offset
94951+ * @off: offset into struct seccomp_data to load from
94952+ *
94953+ * Returns the requested 32-bits of data.
94954+ * seccomp_check_filter() should assure that @off is 32-bit aligned
94955+ * and not out of bounds. Failure to do so is a BUG.
94956+ */
94957+u32 seccomp_bpf_load(int off)
94958+{
94959+ struct pt_regs *regs = task_pt_regs(current);
94960+ if (off == BPF_DATA(nr))
94961+ return syscall_get_nr(current, regs);
94962+ if (off == BPF_DATA(arch))
94963+ return syscall_get_arch();
94964+ if (off >= BPF_DATA(args[0]) && off < BPF_DATA(args[6])) {
94965+ unsigned long value;
94966+ int arg = (off - BPF_DATA(args[0])) / sizeof(u64);
94967+ int index = !!(off % sizeof(u64));
94968+ syscall_get_arguments(current, regs, arg, 1, &value);
94969+ return get_u32(value, index);
94970+ }
94971+ if (off == BPF_DATA(instruction_pointer))
94972+ return get_u32(KSTK_EIP(current), 0);
94973+ if (off == BPF_DATA(instruction_pointer) + sizeof(u32))
94974+ return get_u32(KSTK_EIP(current), 1);
94975+ /* seccomp_check_filter should make this impossible. */
94976+ BUG();
94977 }
94978
94979 /**
94980@@ -103,59 +132,59 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
94981 u32 k = ftest->k;
94982
94983 switch (code) {
94984- case BPF_LD | BPF_W | BPF_ABS:
94985- ftest->code = BPF_LDX | BPF_W | BPF_ABS;
94986+ case BPF_S_LD_W_ABS:
94987+ ftest->code = BPF_S_ANC_SECCOMP_LD_W;
94988 /* 32-bit aligned and not out of bounds. */
94989 if (k >= sizeof(struct seccomp_data) || k & 3)
94990 return -EINVAL;
94991 continue;
94992- case BPF_LD | BPF_W | BPF_LEN:
94993- ftest->code = BPF_LD | BPF_IMM;
94994+ case BPF_S_LD_W_LEN:
94995+ ftest->code = BPF_S_LD_IMM;
94996 ftest->k = sizeof(struct seccomp_data);
94997 continue;
94998- case BPF_LDX | BPF_W | BPF_LEN:
94999- ftest->code = BPF_LDX | BPF_IMM;
95000+ case BPF_S_LDX_W_LEN:
95001+ ftest->code = BPF_S_LDX_IMM;
95002 ftest->k = sizeof(struct seccomp_data);
95003 continue;
95004 /* Explicitly include allowed calls. */
95005- case BPF_RET | BPF_K:
95006- case BPF_RET | BPF_A:
95007- case BPF_ALU | BPF_ADD | BPF_K:
95008- case BPF_ALU | BPF_ADD | BPF_X:
95009- case BPF_ALU | BPF_SUB | BPF_K:
95010- case BPF_ALU | BPF_SUB | BPF_X:
95011- case BPF_ALU | BPF_MUL | BPF_K:
95012- case BPF_ALU | BPF_MUL | BPF_X:
95013- case BPF_ALU | BPF_DIV | BPF_K:
95014- case BPF_ALU | BPF_DIV | BPF_X:
95015- case BPF_ALU | BPF_AND | BPF_K:
95016- case BPF_ALU | BPF_AND | BPF_X:
95017- case BPF_ALU | BPF_OR | BPF_K:
95018- case BPF_ALU | BPF_OR | BPF_X:
95019- case BPF_ALU | BPF_XOR | BPF_K:
95020- case BPF_ALU | BPF_XOR | BPF_X:
95021- case BPF_ALU | BPF_LSH | BPF_K:
95022- case BPF_ALU | BPF_LSH | BPF_X:
95023- case BPF_ALU | BPF_RSH | BPF_K:
95024- case BPF_ALU | BPF_RSH | BPF_X:
95025- case BPF_ALU | BPF_NEG:
95026- case BPF_LD | BPF_IMM:
95027- case BPF_LDX | BPF_IMM:
95028- case BPF_MISC | BPF_TAX:
95029- case BPF_MISC | BPF_TXA:
95030- case BPF_LD | BPF_MEM:
95031- case BPF_LDX | BPF_MEM:
95032- case BPF_ST:
95033- case BPF_STX:
95034- case BPF_JMP | BPF_JA:
95035- case BPF_JMP | BPF_JEQ | BPF_K:
95036- case BPF_JMP | BPF_JEQ | BPF_X:
95037- case BPF_JMP | BPF_JGE | BPF_K:
95038- case BPF_JMP | BPF_JGE | BPF_X:
95039- case BPF_JMP | BPF_JGT | BPF_K:
95040- case BPF_JMP | BPF_JGT | BPF_X:
95041- case BPF_JMP | BPF_JSET | BPF_K:
95042- case BPF_JMP | BPF_JSET | BPF_X:
95043+ case BPF_S_RET_K:
95044+ case BPF_S_RET_A:
95045+ case BPF_S_ALU_ADD_K:
95046+ case BPF_S_ALU_ADD_X:
95047+ case BPF_S_ALU_SUB_K:
95048+ case BPF_S_ALU_SUB_X:
95049+ case BPF_S_ALU_MUL_K:
95050+ case BPF_S_ALU_MUL_X:
95051+ case BPF_S_ALU_DIV_X:
95052+ case BPF_S_ALU_AND_K:
95053+ case BPF_S_ALU_AND_X:
95054+ case BPF_S_ALU_OR_K:
95055+ case BPF_S_ALU_OR_X:
95056+ case BPF_S_ALU_XOR_K:
95057+ case BPF_S_ALU_XOR_X:
95058+ case BPF_S_ALU_LSH_K:
95059+ case BPF_S_ALU_LSH_X:
95060+ case BPF_S_ALU_RSH_K:
95061+ case BPF_S_ALU_RSH_X:
95062+ case BPF_S_ALU_NEG:
95063+ case BPF_S_LD_IMM:
95064+ case BPF_S_LDX_IMM:
95065+ case BPF_S_MISC_TAX:
95066+ case BPF_S_MISC_TXA:
95067+ case BPF_S_ALU_DIV_K:
95068+ case BPF_S_LD_MEM:
95069+ case BPF_S_LDX_MEM:
95070+ case BPF_S_ST:
95071+ case BPF_S_STX:
95072+ case BPF_S_JMP_JA:
95073+ case BPF_S_JMP_JEQ_K:
95074+ case BPF_S_JMP_JEQ_X:
95075+ case BPF_S_JMP_JGE_K:
95076+ case BPF_S_JMP_JGE_X:
95077+ case BPF_S_JMP_JGT_K:
95078+ case BPF_S_JMP_JGT_X:
95079+ case BPF_S_JMP_JSET_K:
95080+ case BPF_S_JMP_JSET_X:
95081 continue;
95082 default:
95083 return -EINVAL;
95084@@ -173,22 +202,18 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
95085 static u32 seccomp_run_filters(int syscall)
95086 {
95087 struct seccomp_filter *f;
95088- struct seccomp_data sd;
95089 u32 ret = SECCOMP_RET_ALLOW;
95090
95091 /* Ensure unexpected behavior doesn't result in failing open. */
95092 if (WARN_ON(current->seccomp.filter == NULL))
95093 return SECCOMP_RET_KILL;
95094
95095- populate_seccomp_data(&sd);
95096-
95097 /*
95098 * All filters in the list are evaluated and the lowest BPF return
95099 * value always takes priority (ignoring the DATA).
95100 */
95101 for (f = current->seccomp.filter; f; f = f->prev) {
95102- u32 cur_ret = SK_RUN_FILTER(f->prog, (void *)&sd);
95103-
95104+ u32 cur_ret = sk_run_filter(NULL, f->insns);
95105 if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
95106 ret = cur_ret;
95107 }
95108@@ -206,20 +231,18 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
95109 struct seccomp_filter *filter;
95110 unsigned long fp_size = fprog->len * sizeof(struct sock_filter);
95111 unsigned long total_insns = fprog->len;
95112- struct sock_filter *fp;
95113- int new_len;
95114 long ret;
95115
95116 if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
95117 return -EINVAL;
95118
95119 for (filter = current->seccomp.filter; filter; filter = filter->prev)
95120- total_insns += filter->prog->len + 4; /* include a 4 instr penalty */
95121+ total_insns += filter->len + 4; /* include a 4 instr penalty */
95122 if (total_insns > MAX_INSNS_PER_PATH)
95123 return -ENOMEM;
95124
95125 /*
95126- * Installing a seccomp filter requires that the task has
95127+ * Installing a seccomp filter requires that the task have
95128 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
95129 * This avoids scenarios where unprivileged tasks can affect the
95130 * behavior of privileged children.
95131@@ -229,51 +252,28 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
95132 CAP_SYS_ADMIN) != 0)
95133 return -EACCES;
95134
95135- fp = kzalloc(fp_size, GFP_KERNEL|__GFP_NOWARN);
95136- if (!fp)
95137- return -ENOMEM;
95138-
95139- /* Copy the instructions from fprog. */
95140- ret = -EFAULT;
95141- if (copy_from_user(fp, fprog->filter, fp_size))
95142- goto free_prog;
95143-
95144- /* Check and rewrite the fprog via the skb checker */
95145- ret = sk_chk_filter(fp, fprog->len);
95146- if (ret)
95147- goto free_prog;
95148-
95149- /* Check and rewrite the fprog for seccomp use */
95150- ret = seccomp_check_filter(fp, fprog->len);
95151- if (ret)
95152- goto free_prog;
95153-
95154- /* Convert 'sock_filter' insns to 'sock_filter_int' insns */
95155- ret = sk_convert_filter(fp, fprog->len, NULL, &new_len);
95156- if (ret)
95157- goto free_prog;
95158-
95159 /* Allocate a new seccomp_filter */
95160- ret = -ENOMEM;
95161- filter = kzalloc(sizeof(struct seccomp_filter),
95162+ filter = kzalloc(sizeof(struct seccomp_filter) + fp_size,
95163 GFP_KERNEL|__GFP_NOWARN);
95164 if (!filter)
95165- goto free_prog;
95166-
95167- filter->prog = kzalloc(sk_filter_size(new_len),
95168- GFP_KERNEL|__GFP_NOWARN);
95169- if (!filter->prog)
95170- goto free_filter;
95171-
95172- ret = sk_convert_filter(fp, fprog->len, filter->prog->insnsi, &new_len);
95173- if (ret)
95174- goto free_filter_prog;
95175- kfree(fp);
95176-
95177+ return -ENOMEM;
95178 atomic_set(&filter->usage, 1);
95179- filter->prog->len = new_len;
95180+ filter->len = fprog->len;
95181
95182- sk_filter_select_runtime(filter->prog);
95183+ /* Copy the instructions from fprog. */
95184+ ret = -EFAULT;
95185+ if (copy_from_user(filter->insns, fprog->filter, fp_size))
95186+ goto fail;
95187+
95188+ /* Check and rewrite the fprog via the skb checker */
95189+ ret = sk_chk_filter(filter->insns, filter->len);
95190+ if (ret)
95191+ goto fail;
95192+
95193+ /* Check and rewrite the fprog for seccomp use */
95194+ ret = seccomp_check_filter(filter->insns, filter->len);
95195+ if (ret)
95196+ goto fail;
95197
95198 /*
95199 * If there is an existing filter, make it the prev and don't drop its
95200@@ -282,13 +282,8 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
95201 filter->prev = current->seccomp.filter;
95202 current->seccomp.filter = filter;
95203 return 0;
95204-
95205-free_filter_prog:
95206- kfree(filter->prog);
95207-free_filter:
95208+fail:
95209 kfree(filter);
95210-free_prog:
95211- kfree(fp);
95212 return ret;
95213 }
95214
95215@@ -298,7 +293,7 @@ free_prog:
95216 *
95217 * Returns 0 on success and non-zero otherwise.
95218 */
95219-static long seccomp_attach_user_filter(char __user *user_filter)
95220+long seccomp_attach_user_filter(char __user *user_filter)
95221 {
95222 struct sock_fprog fprog;
95223 long ret = -EFAULT;
95224@@ -337,7 +332,6 @@ void put_seccomp_filter(struct task_struct *tsk)
95225 while (orig && atomic_dec_and_test(&orig->usage)) {
95226 struct seccomp_filter *freeme = orig;
95227 orig = orig->prev;
95228- sk_filter_free(freeme->prog);
95229 kfree(freeme);
95230 }
95231 }
95232diff --git a/kernel/signal.c b/kernel/signal.c
95233index a4077e9..f0d4e5c 100644
95234--- a/kernel/signal.c
95235+++ b/kernel/signal.c
95236@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
95237
95238 int print_fatal_signals __read_mostly;
95239
95240-static void __user *sig_handler(struct task_struct *t, int sig)
95241+static __sighandler_t sig_handler(struct task_struct *t, int sig)
95242 {
95243 return t->sighand->action[sig - 1].sa.sa_handler;
95244 }
95245
95246-static int sig_handler_ignored(void __user *handler, int sig)
95247+static int sig_handler_ignored(__sighandler_t handler, int sig)
95248 {
95249 /* Is it explicitly or implicitly ignored? */
95250 return handler == SIG_IGN ||
95251@@ -67,7 +67,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
95252
95253 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
95254 {
95255- void __user *handler;
95256+ __sighandler_t handler;
95257
95258 handler = sig_handler(t, sig);
95259
95260@@ -372,6 +372,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
95261 atomic_inc(&user->sigpending);
95262 rcu_read_unlock();
95263
95264+ if (!override_rlimit)
95265+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
95266+
95267 if (override_rlimit ||
95268 atomic_read(&user->sigpending) <=
95269 task_rlimit(t, RLIMIT_SIGPENDING)) {
95270@@ -499,7 +502,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
95271
95272 int unhandled_signal(struct task_struct *tsk, int sig)
95273 {
95274- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
95275+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
95276 if (is_global_init(tsk))
95277 return 1;
95278 if (handler != SIG_IGN && handler != SIG_DFL)
95279@@ -793,6 +796,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
95280 }
95281 }
95282
95283+ /* allow glibc communication via tgkill to other threads in our
95284+ thread group */
95285+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
95286+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
95287+ && gr_handle_signal(t, sig))
95288+ return -EPERM;
95289+
95290 return security_task_kill(t, info, sig, 0);
95291 }
95292
95293@@ -1176,7 +1186,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
95294 return send_signal(sig, info, p, 1);
95295 }
95296
95297-static int
95298+int
95299 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
95300 {
95301 return send_signal(sig, info, t, 0);
95302@@ -1213,6 +1223,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
95303 unsigned long int flags;
95304 int ret, blocked, ignored;
95305 struct k_sigaction *action;
95306+ int is_unhandled = 0;
95307
95308 spin_lock_irqsave(&t->sighand->siglock, flags);
95309 action = &t->sighand->action[sig-1];
95310@@ -1227,9 +1238,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
95311 }
95312 if (action->sa.sa_handler == SIG_DFL)
95313 t->signal->flags &= ~SIGNAL_UNKILLABLE;
95314+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
95315+ is_unhandled = 1;
95316 ret = specific_send_sig_info(sig, info, t);
95317 spin_unlock_irqrestore(&t->sighand->siglock, flags);
95318
95319+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
95320+ normal operation */
95321+ if (is_unhandled) {
95322+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
95323+ gr_handle_crash(t, sig);
95324+ }
95325+
95326 return ret;
95327 }
95328
95329@@ -1296,8 +1316,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
95330 ret = check_kill_permission(sig, info, p);
95331 rcu_read_unlock();
95332
95333- if (!ret && sig)
95334+ if (!ret && sig) {
95335 ret = do_send_sig_info(sig, info, p, true);
95336+ if (!ret)
95337+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
95338+ }
95339
95340 return ret;
95341 }
95342@@ -2903,7 +2926,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
95343 int error = -ESRCH;
95344
95345 rcu_read_lock();
95346- p = find_task_by_vpid(pid);
95347+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
95348+ /* allow glibc communication via tgkill to other threads in our
95349+ thread group */
95350+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
95351+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
95352+ p = find_task_by_vpid_unrestricted(pid);
95353+ else
95354+#endif
95355+ p = find_task_by_vpid(pid);
95356 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
95357 error = check_kill_permission(sig, info, p);
95358 /*
95359@@ -3236,8 +3267,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
95360 }
95361 seg = get_fs();
95362 set_fs(KERNEL_DS);
95363- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
95364- (stack_t __force __user *) &uoss,
95365+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
95366+ (stack_t __force_user *) &uoss,
95367 compat_user_stack_pointer());
95368 set_fs(seg);
95369 if (ret >= 0 && uoss_ptr) {
95370diff --git a/kernel/smpboot.c b/kernel/smpboot.c
95371index eb89e18..a4e6792 100644
95372--- a/kernel/smpboot.c
95373+++ b/kernel/smpboot.c
95374@@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
95375 }
95376 smpboot_unpark_thread(plug_thread, cpu);
95377 }
95378- list_add(&plug_thread->list, &hotplug_threads);
95379+ pax_list_add(&plug_thread->list, &hotplug_threads);
95380 out:
95381 mutex_unlock(&smpboot_threads_lock);
95382 return ret;
95383@@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
95384 {
95385 get_online_cpus();
95386 mutex_lock(&smpboot_threads_lock);
95387- list_del(&plug_thread->list);
95388+ pax_list_del(&plug_thread->list);
95389 smpboot_destroy_threads(plug_thread);
95390 mutex_unlock(&smpboot_threads_lock);
95391 put_online_cpus();
95392diff --git a/kernel/softirq.c b/kernel/softirq.c
95393index 5918d22..e95d1926 100644
95394--- a/kernel/softirq.c
95395+++ b/kernel/softirq.c
95396@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
95397 EXPORT_SYMBOL(irq_stat);
95398 #endif
95399
95400-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
95401+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
95402
95403 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
95404
95405@@ -266,7 +266,7 @@ restart:
95406 kstat_incr_softirqs_this_cpu(vec_nr);
95407
95408 trace_softirq_entry(vec_nr);
95409- h->action(h);
95410+ h->action();
95411 trace_softirq_exit(vec_nr);
95412 if (unlikely(prev_count != preempt_count())) {
95413 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
95414@@ -426,7 +426,7 @@ void __raise_softirq_irqoff(unsigned int nr)
95415 or_softirq_pending(1UL << nr);
95416 }
95417
95418-void open_softirq(int nr, void (*action)(struct softirq_action *))
95419+void __init open_softirq(int nr, void (*action)(void))
95420 {
95421 softirq_vec[nr].action = action;
95422 }
95423@@ -478,7 +478,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
95424 }
95425 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
95426
95427-static void tasklet_action(struct softirq_action *a)
95428+static void tasklet_action(void)
95429 {
95430 struct tasklet_struct *list;
95431
95432@@ -514,7 +514,7 @@ static void tasklet_action(struct softirq_action *a)
95433 }
95434 }
95435
95436-static void tasklet_hi_action(struct softirq_action *a)
95437+static __latent_entropy void tasklet_hi_action(void)
95438 {
95439 struct tasklet_struct *list;
95440
95441@@ -741,7 +741,7 @@ static struct notifier_block cpu_nfb = {
95442 .notifier_call = cpu_callback
95443 };
95444
95445-static struct smp_hotplug_thread softirq_threads = {
95446+static struct smp_hotplug_thread softirq_threads __read_only = {
95447 .store = &ksoftirqd,
95448 .thread_should_run = ksoftirqd_should_run,
95449 .thread_fn = run_ksoftirqd,
95450diff --git a/kernel/sys.c b/kernel/sys.c
95451index 66a751e..a42497e 100644
95452--- a/kernel/sys.c
95453+++ b/kernel/sys.c
95454@@ -148,6 +148,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
95455 error = -EACCES;
95456 goto out;
95457 }
95458+
95459+ if (gr_handle_chroot_setpriority(p, niceval)) {
95460+ error = -EACCES;
95461+ goto out;
95462+ }
95463+
95464 no_nice = security_task_setnice(p, niceval);
95465 if (no_nice) {
95466 error = no_nice;
95467@@ -351,6 +357,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
95468 goto error;
95469 }
95470
95471+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
95472+ goto error;
95473+
95474+ if (!gid_eq(new->gid, old->gid)) {
95475+ /* make sure we generate a learn log for what will
95476+ end up being a role transition after a full-learning
95477+ policy is generated
95478+ CAP_SETGID is required to perform a transition
95479+ we may not log a CAP_SETGID check above, e.g.
95480+ in the case where new rgid = old egid
95481+ */
95482+ gr_learn_cap(current, new, CAP_SETGID);
95483+ }
95484+
95485 if (rgid != (gid_t) -1 ||
95486 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
95487 new->sgid = new->egid;
95488@@ -386,6 +406,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
95489 old = current_cred();
95490
95491 retval = -EPERM;
95492+
95493+ if (gr_check_group_change(kgid, kgid, kgid))
95494+ goto error;
95495+
95496 if (ns_capable(old->user_ns, CAP_SETGID))
95497 new->gid = new->egid = new->sgid = new->fsgid = kgid;
95498 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
95499@@ -403,7 +427,7 @@ error:
95500 /*
95501 * change the user struct in a credentials set to match the new UID
95502 */
95503-static int set_user(struct cred *new)
95504+int set_user(struct cred *new)
95505 {
95506 struct user_struct *new_user;
95507
95508@@ -483,7 +507,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
95509 goto error;
95510 }
95511
95512+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
95513+ goto error;
95514+
95515 if (!uid_eq(new->uid, old->uid)) {
95516+ /* make sure we generate a learn log for what will
95517+ end up being a role transition after a full-learning
95518+ policy is generated
95519+ CAP_SETUID is required to perform a transition
95520+ we may not log a CAP_SETUID check above, e.g.
95521+ in the case where new ruid = old euid
95522+ */
95523+ gr_learn_cap(current, new, CAP_SETUID);
95524 retval = set_user(new);
95525 if (retval < 0)
95526 goto error;
95527@@ -533,6 +568,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
95528 old = current_cred();
95529
95530 retval = -EPERM;
95531+
95532+ if (gr_check_crash_uid(kuid))
95533+ goto error;
95534+ if (gr_check_user_change(kuid, kuid, kuid))
95535+ goto error;
95536+
95537 if (ns_capable(old->user_ns, CAP_SETUID)) {
95538 new->suid = new->uid = kuid;
95539 if (!uid_eq(kuid, old->uid)) {
95540@@ -602,6 +643,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
95541 goto error;
95542 }
95543
95544+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
95545+ goto error;
95546+
95547 if (ruid != (uid_t) -1) {
95548 new->uid = kruid;
95549 if (!uid_eq(kruid, old->uid)) {
95550@@ -684,6 +728,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
95551 goto error;
95552 }
95553
95554+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
95555+ goto error;
95556+
95557 if (rgid != (gid_t) -1)
95558 new->gid = krgid;
95559 if (egid != (gid_t) -1)
95560@@ -745,12 +792,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
95561 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
95562 ns_capable(old->user_ns, CAP_SETUID)) {
95563 if (!uid_eq(kuid, old->fsuid)) {
95564+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
95565+ goto error;
95566+
95567 new->fsuid = kuid;
95568 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
95569 goto change_okay;
95570 }
95571 }
95572
95573+error:
95574 abort_creds(new);
95575 return old_fsuid;
95576
95577@@ -783,12 +834,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
95578 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
95579 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
95580 ns_capable(old->user_ns, CAP_SETGID)) {
95581+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
95582+ goto error;
95583+
95584 if (!gid_eq(kgid, old->fsgid)) {
95585 new->fsgid = kgid;
95586 goto change_okay;
95587 }
95588 }
95589
95590+error:
95591 abort_creds(new);
95592 return old_fsgid;
95593
95594@@ -1167,19 +1222,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
95595 return -EFAULT;
95596
95597 down_read(&uts_sem);
95598- error = __copy_to_user(&name->sysname, &utsname()->sysname,
95599+ error = __copy_to_user(name->sysname, &utsname()->sysname,
95600 __OLD_UTS_LEN);
95601 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
95602- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
95603+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
95604 __OLD_UTS_LEN);
95605 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
95606- error |= __copy_to_user(&name->release, &utsname()->release,
95607+ error |= __copy_to_user(name->release, &utsname()->release,
95608 __OLD_UTS_LEN);
95609 error |= __put_user(0, name->release + __OLD_UTS_LEN);
95610- error |= __copy_to_user(&name->version, &utsname()->version,
95611+ error |= __copy_to_user(name->version, &utsname()->version,
95612 __OLD_UTS_LEN);
95613 error |= __put_user(0, name->version + __OLD_UTS_LEN);
95614- error |= __copy_to_user(&name->machine, &utsname()->machine,
95615+ error |= __copy_to_user(name->machine, &utsname()->machine,
95616 __OLD_UTS_LEN);
95617 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
95618 up_read(&uts_sem);
95619@@ -1381,6 +1436,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
95620 */
95621 new_rlim->rlim_cur = 1;
95622 }
95623+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
95624+ is changed to a lower value. Since tasks can be created by the same
95625+ user in between this limit change and an execve by this task, force
95626+ a recheck only for this task by setting PF_NPROC_EXCEEDED
95627+ */
95628+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
95629+ tsk->flags |= PF_NPROC_EXCEEDED;
95630 }
95631 if (!retval) {
95632 if (old_rlim)
95633diff --git a/kernel/sysctl.c b/kernel/sysctl.c
95634index 75b22e2..65c0ac8 100644
95635--- a/kernel/sysctl.c
95636+++ b/kernel/sysctl.c
95637@@ -94,7 +94,6 @@
95638
95639
95640 #if defined(CONFIG_SYSCTL)
95641-
95642 /* External variables not in a header file. */
95643 extern int max_threads;
95644 extern int suid_dumpable;
95645@@ -115,19 +114,20 @@ extern int sysctl_nr_trim_pages;
95646
95647 /* Constants used for minimum and maximum */
95648 #ifdef CONFIG_LOCKUP_DETECTOR
95649-static int sixty = 60;
95650+static int sixty __read_only = 60;
95651 #endif
95652
95653-static int __maybe_unused neg_one = -1;
95654+static int __maybe_unused neg_one __read_only = -1;
95655
95656-static int zero;
95657-static int __maybe_unused one = 1;
95658-static int __maybe_unused two = 2;
95659-static int __maybe_unused four = 4;
95660-static unsigned long one_ul = 1;
95661-static int one_hundred = 100;
95662+static int zero __read_only = 0;
95663+static int __maybe_unused one __read_only = 1;
95664+static int __maybe_unused two __read_only = 2;
95665+static int __maybe_unused three __read_only = 3;
95666+static int __maybe_unused four __read_only = 4;
95667+static unsigned long one_ul __read_only = 1;
95668+static int one_hundred __read_only = 100;
95669 #ifdef CONFIG_PRINTK
95670-static int ten_thousand = 10000;
95671+static int ten_thousand __read_only = 10000;
95672 #endif
95673
95674 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
95675@@ -181,10 +181,8 @@ static int proc_taint(struct ctl_table *table, int write,
95676 void __user *buffer, size_t *lenp, loff_t *ppos);
95677 #endif
95678
95679-#ifdef CONFIG_PRINTK
95680 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
95681 void __user *buffer, size_t *lenp, loff_t *ppos);
95682-#endif
95683
95684 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
95685 void __user *buffer, size_t *lenp, loff_t *ppos);
95686@@ -215,6 +213,8 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
95687
95688 #endif
95689
95690+extern struct ctl_table grsecurity_table[];
95691+
95692 static struct ctl_table kern_table[];
95693 static struct ctl_table vm_table[];
95694 static struct ctl_table fs_table[];
95695@@ -229,6 +229,20 @@ extern struct ctl_table epoll_table[];
95696 int sysctl_legacy_va_layout;
95697 #endif
95698
95699+#ifdef CONFIG_PAX_SOFTMODE
95700+static ctl_table pax_table[] = {
95701+ {
95702+ .procname = "softmode",
95703+ .data = &pax_softmode,
95704+ .maxlen = sizeof(unsigned int),
95705+ .mode = 0600,
95706+ .proc_handler = &proc_dointvec,
95707+ },
95708+
95709+ { }
95710+};
95711+#endif
95712+
95713 /* The default sysctl tables: */
95714
95715 static struct ctl_table sysctl_base_table[] = {
95716@@ -277,6 +291,22 @@ static int max_extfrag_threshold = 1000;
95717 #endif
95718
95719 static struct ctl_table kern_table[] = {
95720+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
95721+ {
95722+ .procname = "grsecurity",
95723+ .mode = 0500,
95724+ .child = grsecurity_table,
95725+ },
95726+#endif
95727+
95728+#ifdef CONFIG_PAX_SOFTMODE
95729+ {
95730+ .procname = "pax",
95731+ .mode = 0500,
95732+ .child = pax_table,
95733+ },
95734+#endif
95735+
95736 {
95737 .procname = "sched_child_runs_first",
95738 .data = &sysctl_sched_child_runs_first,
95739@@ -641,7 +671,7 @@ static struct ctl_table kern_table[] = {
95740 .data = &modprobe_path,
95741 .maxlen = KMOD_PATH_LEN,
95742 .mode = 0644,
95743- .proc_handler = proc_dostring,
95744+ .proc_handler = proc_dostring_modpriv,
95745 },
95746 {
95747 .procname = "modules_disabled",
95748@@ -808,16 +838,20 @@ static struct ctl_table kern_table[] = {
95749 .extra1 = &zero,
95750 .extra2 = &one,
95751 },
95752+#endif
95753 {
95754 .procname = "kptr_restrict",
95755 .data = &kptr_restrict,
95756 .maxlen = sizeof(int),
95757 .mode = 0644,
95758 .proc_handler = proc_dointvec_minmax_sysadmin,
95759+#ifdef CONFIG_GRKERNSEC_HIDESYM
95760+ .extra1 = &two,
95761+#else
95762 .extra1 = &zero,
95763+#endif
95764 .extra2 = &two,
95765 },
95766-#endif
95767 {
95768 .procname = "ngroups_max",
95769 .data = &ngroups_max,
95770@@ -1073,10 +1107,17 @@ static struct ctl_table kern_table[] = {
95771 */
95772 {
95773 .procname = "perf_event_paranoid",
95774- .data = &sysctl_perf_event_paranoid,
95775- .maxlen = sizeof(sysctl_perf_event_paranoid),
95776+ .data = &sysctl_perf_event_legitimately_concerned,
95777+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
95778 .mode = 0644,
95779- .proc_handler = proc_dointvec,
95780+ /* go ahead, be a hero */
95781+ .proc_handler = proc_dointvec_minmax_sysadmin,
95782+ .extra1 = &neg_one,
95783+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
95784+ .extra2 = &three,
95785+#else
95786+ .extra2 = &two,
95787+#endif
95788 },
95789 {
95790 .procname = "perf_event_mlock_kb",
95791@@ -1338,6 +1379,13 @@ static struct ctl_table vm_table[] = {
95792 .proc_handler = proc_dointvec_minmax,
95793 .extra1 = &zero,
95794 },
95795+ {
95796+ .procname = "heap_stack_gap",
95797+ .data = &sysctl_heap_stack_gap,
95798+ .maxlen = sizeof(sysctl_heap_stack_gap),
95799+ .mode = 0644,
95800+ .proc_handler = proc_doulongvec_minmax,
95801+ },
95802 #else
95803 {
95804 .procname = "nr_trim_pages",
95805@@ -1827,6 +1875,16 @@ int proc_dostring(struct ctl_table *table, int write,
95806 (char __user *)buffer, lenp, ppos);
95807 }
95808
95809+int proc_dostring_modpriv(struct ctl_table *table, int write,
95810+ void __user *buffer, size_t *lenp, loff_t *ppos)
95811+{
95812+ if (write && !capable(CAP_SYS_MODULE))
95813+ return -EPERM;
95814+
95815+ return _proc_do_string(table->data, table->maxlen, write,
95816+ buffer, lenp, ppos);
95817+}
95818+
95819 static size_t proc_skip_spaces(char **buf)
95820 {
95821 size_t ret;
95822@@ -1932,6 +1990,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
95823 len = strlen(tmp);
95824 if (len > *size)
95825 len = *size;
95826+ if (len > sizeof(tmp))
95827+ len = sizeof(tmp);
95828 if (copy_to_user(*buf, tmp, len))
95829 return -EFAULT;
95830 *size -= len;
95831@@ -2109,7 +2169,7 @@ int proc_dointvec(struct ctl_table *table, int write,
95832 static int proc_taint(struct ctl_table *table, int write,
95833 void __user *buffer, size_t *lenp, loff_t *ppos)
95834 {
95835- struct ctl_table t;
95836+ ctl_table_no_const t;
95837 unsigned long tmptaint = get_taint();
95838 int err;
95839
95840@@ -2137,7 +2197,6 @@ static int proc_taint(struct ctl_table *table, int write,
95841 return err;
95842 }
95843
95844-#ifdef CONFIG_PRINTK
95845 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
95846 void __user *buffer, size_t *lenp, loff_t *ppos)
95847 {
95848@@ -2146,7 +2205,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
95849
95850 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
95851 }
95852-#endif
95853
95854 struct do_proc_dointvec_minmax_conv_param {
95855 int *min;
95856@@ -2706,6 +2764,12 @@ int proc_dostring(struct ctl_table *table, int write,
95857 return -ENOSYS;
95858 }
95859
95860+int proc_dostring_modpriv(struct ctl_table *table, int write,
95861+ void __user *buffer, size_t *lenp, loff_t *ppos)
95862+{
95863+ return -ENOSYS;
95864+}
95865+
95866 int proc_dointvec(struct ctl_table *table, int write,
95867 void __user *buffer, size_t *lenp, loff_t *ppos)
95868 {
95869@@ -2762,5 +2826,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
95870 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
95871 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
95872 EXPORT_SYMBOL(proc_dostring);
95873+EXPORT_SYMBOL(proc_dostring_modpriv);
95874 EXPORT_SYMBOL(proc_doulongvec_minmax);
95875 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
95876diff --git a/kernel/taskstats.c b/kernel/taskstats.c
95877index 13d2f7c..c93d0b0 100644
95878--- a/kernel/taskstats.c
95879+++ b/kernel/taskstats.c
95880@@ -28,9 +28,12 @@
95881 #include <linux/fs.h>
95882 #include <linux/file.h>
95883 #include <linux/pid_namespace.h>
95884+#include <linux/grsecurity.h>
95885 #include <net/genetlink.h>
95886 #include <linux/atomic.h>
95887
95888+extern int gr_is_taskstats_denied(int pid);
95889+
95890 /*
95891 * Maximum length of a cpumask that can be specified in
95892 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
95893@@ -576,6 +579,9 @@ err:
95894
95895 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
95896 {
95897+ if (gr_is_taskstats_denied(current->pid))
95898+ return -EACCES;
95899+
95900 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
95901 return cmd_attr_register_cpumask(info);
95902 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
95903diff --git a/kernel/time.c b/kernel/time.c
95904index 7c7964c..2a0d412 100644
95905--- a/kernel/time.c
95906+++ b/kernel/time.c
95907@@ -172,6 +172,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
95908 return error;
95909
95910 if (tz) {
95911+ /* we log in do_settimeofday called below, so don't log twice
95912+ */
95913+ if (!tv)
95914+ gr_log_timechange();
95915+
95916 sys_tz = *tz;
95917 update_vsyscall_tz();
95918 if (firsttime) {
95919diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
95920index fe75444..190c528 100644
95921--- a/kernel/time/alarmtimer.c
95922+++ b/kernel/time/alarmtimer.c
95923@@ -811,7 +811,7 @@ static int __init alarmtimer_init(void)
95924 struct platform_device *pdev;
95925 int error = 0;
95926 int i;
95927- struct k_clock alarm_clock = {
95928+ static struct k_clock alarm_clock = {
95929 .clock_getres = alarm_clock_getres,
95930 .clock_get = alarm_clock_get,
95931 .timer_create = alarm_timer_create,
95932diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
95933index 32d8d6a..11486af 100644
95934--- a/kernel/time/timekeeping.c
95935+++ b/kernel/time/timekeeping.c
95936@@ -15,6 +15,7 @@
95937 #include <linux/init.h>
95938 #include <linux/mm.h>
95939 #include <linux/sched.h>
95940+#include <linux/grsecurity.h>
95941 #include <linux/syscore_ops.h>
95942 #include <linux/clocksource.h>
95943 #include <linux/jiffies.h>
95944@@ -502,6 +503,8 @@ int do_settimeofday(const struct timespec *tv)
95945 if (!timespec_valid_strict(tv))
95946 return -EINVAL;
95947
95948+ gr_log_timechange();
95949+
95950 raw_spin_lock_irqsave(&timekeeper_lock, flags);
95951 write_seqcount_begin(&timekeeper_seq);
95952
95953diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
95954index 61ed862..3b52c65 100644
95955--- a/kernel/time/timer_list.c
95956+++ b/kernel/time/timer_list.c
95957@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
95958
95959 static void print_name_offset(struct seq_file *m, void *sym)
95960 {
95961+#ifdef CONFIG_GRKERNSEC_HIDESYM
95962+ SEQ_printf(m, "<%p>", NULL);
95963+#else
95964 char symname[KSYM_NAME_LEN];
95965
95966 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
95967 SEQ_printf(m, "<%pK>", sym);
95968 else
95969 SEQ_printf(m, "%s", symname);
95970+#endif
95971 }
95972
95973 static void
95974@@ -119,7 +123,11 @@ next_one:
95975 static void
95976 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
95977 {
95978+#ifdef CONFIG_GRKERNSEC_HIDESYM
95979+ SEQ_printf(m, " .base: %p\n", NULL);
95980+#else
95981 SEQ_printf(m, " .base: %pK\n", base);
95982+#endif
95983 SEQ_printf(m, " .index: %d\n",
95984 base->index);
95985 SEQ_printf(m, " .resolution: %Lu nsecs\n",
95986@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
95987 {
95988 struct proc_dir_entry *pe;
95989
95990+#ifdef CONFIG_GRKERNSEC_PROC_ADD
95991+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
95992+#else
95993 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
95994+#endif
95995 if (!pe)
95996 return -ENOMEM;
95997 return 0;
95998diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
95999index 1fb08f2..ca4bb1e 100644
96000--- a/kernel/time/timer_stats.c
96001+++ b/kernel/time/timer_stats.c
96002@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
96003 static unsigned long nr_entries;
96004 static struct entry entries[MAX_ENTRIES];
96005
96006-static atomic_t overflow_count;
96007+static atomic_unchecked_t overflow_count;
96008
96009 /*
96010 * The entries are in a hash-table, for fast lookup:
96011@@ -140,7 +140,7 @@ static void reset_entries(void)
96012 nr_entries = 0;
96013 memset(entries, 0, sizeof(entries));
96014 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
96015- atomic_set(&overflow_count, 0);
96016+ atomic_set_unchecked(&overflow_count, 0);
96017 }
96018
96019 static struct entry *alloc_entry(void)
96020@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
96021 if (likely(entry))
96022 entry->count++;
96023 else
96024- atomic_inc(&overflow_count);
96025+ atomic_inc_unchecked(&overflow_count);
96026
96027 out_unlock:
96028 raw_spin_unlock_irqrestore(lock, flags);
96029@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
96030
96031 static void print_name_offset(struct seq_file *m, unsigned long addr)
96032 {
96033+#ifdef CONFIG_GRKERNSEC_HIDESYM
96034+ seq_printf(m, "<%p>", NULL);
96035+#else
96036 char symname[KSYM_NAME_LEN];
96037
96038 if (lookup_symbol_name(addr, symname) < 0)
96039- seq_printf(m, "<%p>", (void *)addr);
96040+ seq_printf(m, "<%pK>", (void *)addr);
96041 else
96042 seq_printf(m, "%s", symname);
96043+#endif
96044 }
96045
96046 static int tstats_show(struct seq_file *m, void *v)
96047@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
96048
96049 seq_puts(m, "Timer Stats Version: v0.3\n");
96050 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
96051- if (atomic_read(&overflow_count))
96052- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
96053+ if (atomic_read_unchecked(&overflow_count))
96054+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
96055 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
96056
96057 for (i = 0; i < nr_entries; i++) {
96058@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
96059 {
96060 struct proc_dir_entry *pe;
96061
96062+#ifdef CONFIG_GRKERNSEC_PROC_ADD
96063+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
96064+#else
96065 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
96066+#endif
96067 if (!pe)
96068 return -ENOMEM;
96069 return 0;
96070diff --git a/kernel/timer.c b/kernel/timer.c
96071index 3bb01a3..0e7760e 100644
96072--- a/kernel/timer.c
96073+++ b/kernel/timer.c
96074@@ -1393,7 +1393,7 @@ void update_process_times(int user_tick)
96075 /*
96076 * This function runs timers and the timer-tq in bottom half context.
96077 */
96078-static void run_timer_softirq(struct softirq_action *h)
96079+static __latent_entropy void run_timer_softirq(void)
96080 {
96081 struct tvec_base *base = __this_cpu_read(tvec_bases);
96082
96083@@ -1456,7 +1456,7 @@ static void process_timeout(unsigned long __data)
96084 *
96085 * In all cases the return value is guaranteed to be non-negative.
96086 */
96087-signed long __sched schedule_timeout(signed long timeout)
96088+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
96089 {
96090 struct timer_list timer;
96091 unsigned long expire;
96092diff --git a/kernel/torture.c b/kernel/torture.c
96093index 40bb511..91190b9 100644
96094--- a/kernel/torture.c
96095+++ b/kernel/torture.c
96096@@ -484,7 +484,7 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
96097 mutex_lock(&fullstop_mutex);
96098 if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
96099 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
96100- ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
96101+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_SHUTDOWN;
96102 } else {
96103 pr_warn("Concurrent rmmod and shutdown illegal!\n");
96104 }
96105@@ -551,14 +551,14 @@ static int torture_stutter(void *arg)
96106 if (!torture_must_stop()) {
96107 if (stutter > 1) {
96108 schedule_timeout_interruptible(stutter - 1);
96109- ACCESS_ONCE(stutter_pause_test) = 2;
96110+ ACCESS_ONCE_RW(stutter_pause_test) = 2;
96111 }
96112 schedule_timeout_interruptible(1);
96113- ACCESS_ONCE(stutter_pause_test) = 1;
96114+ ACCESS_ONCE_RW(stutter_pause_test) = 1;
96115 }
96116 if (!torture_must_stop())
96117 schedule_timeout_interruptible(stutter);
96118- ACCESS_ONCE(stutter_pause_test) = 0;
96119+ ACCESS_ONCE_RW(stutter_pause_test) = 0;
96120 torture_shutdown_absorb("torture_stutter");
96121 } while (!torture_must_stop());
96122 torture_kthread_stopping("torture_stutter");
96123@@ -645,7 +645,7 @@ bool torture_cleanup(void)
96124 schedule_timeout_uninterruptible(10);
96125 return true;
96126 }
96127- ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
96128+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_RMMOD;
96129 mutex_unlock(&fullstop_mutex);
96130 torture_shutdown_cleanup();
96131 torture_shuffle_cleanup();
96132diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
96133index c1bd4ad..4b861dc 100644
96134--- a/kernel/trace/blktrace.c
96135+++ b/kernel/trace/blktrace.c
96136@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
96137 struct blk_trace *bt = filp->private_data;
96138 char buf[16];
96139
96140- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
96141+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
96142
96143 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
96144 }
96145@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
96146 return 1;
96147
96148 bt = buf->chan->private_data;
96149- atomic_inc(&bt->dropped);
96150+ atomic_inc_unchecked(&bt->dropped);
96151 return 0;
96152 }
96153
96154@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
96155
96156 bt->dir = dir;
96157 bt->dev = dev;
96158- atomic_set(&bt->dropped, 0);
96159+ atomic_set_unchecked(&bt->dropped, 0);
96160 INIT_LIST_HEAD(&bt->running_list);
96161
96162 ret = -EIO;
96163diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
96164index ac9d1da..ce98b35 100644
96165--- a/kernel/trace/ftrace.c
96166+++ b/kernel/trace/ftrace.c
96167@@ -1920,12 +1920,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
96168 if (unlikely(ftrace_disabled))
96169 return 0;
96170
96171+ ret = ftrace_arch_code_modify_prepare();
96172+ FTRACE_WARN_ON(ret);
96173+ if (ret)
96174+ return 0;
96175+
96176 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
96177+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
96178 if (ret) {
96179 ftrace_bug(ret, ip);
96180- return 0;
96181 }
96182- return 1;
96183+ return ret ? 0 : 1;
96184 }
96185
96186 /*
96187@@ -4126,8 +4131,10 @@ static int ftrace_process_locs(struct module *mod,
96188 if (!count)
96189 return 0;
96190
96191+ pax_open_kernel();
96192 sort(start, count, sizeof(*start),
96193 ftrace_cmp_ips, ftrace_swap_ips);
96194+ pax_close_kernel();
96195
96196 start_pg = ftrace_allocate_pages(count);
96197 if (!start_pg)
96198diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
96199index ff70271..4242e69 100644
96200--- a/kernel/trace/ring_buffer.c
96201+++ b/kernel/trace/ring_buffer.c
96202@@ -352,9 +352,9 @@ struct buffer_data_page {
96203 */
96204 struct buffer_page {
96205 struct list_head list; /* list of buffer pages */
96206- local_t write; /* index for next write */
96207+ local_unchecked_t write; /* index for next write */
96208 unsigned read; /* index for next read */
96209- local_t entries; /* entries on this page */
96210+ local_unchecked_t entries; /* entries on this page */
96211 unsigned long real_end; /* real end of data */
96212 struct buffer_data_page *page; /* Actual data page */
96213 };
96214@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
96215 unsigned long last_overrun;
96216 local_t entries_bytes;
96217 local_t entries;
96218- local_t overrun;
96219- local_t commit_overrun;
96220+ local_unchecked_t overrun;
96221+ local_unchecked_t commit_overrun;
96222 local_t dropped_events;
96223 local_t committing;
96224 local_t commits;
96225@@ -626,8 +626,22 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
96226 work = &cpu_buffer->irq_work;
96227 }
96228
96229- work->waiters_pending = true;
96230 poll_wait(filp, &work->waiters, poll_table);
96231+ work->waiters_pending = true;
96232+ /*
96233+ * There's a tight race between setting the waiters_pending and
96234+ * checking if the ring buffer is empty. Once the waiters_pending bit
96235+ * is set, the next event will wake the task up, but we can get stuck
96236+ * if there's only a single event in.
96237+ *
96238+ * FIXME: Ideally, we need a memory barrier on the writer side as well,
96239+ * but adding a memory barrier to all events will cause too much of a
96240+ * performance hit in the fast path. We only need a memory barrier when
96241+ * the buffer goes from empty to having content. But as this race is
96242+ * extremely small, and it's not a problem if another event comes in, we
96243+ * will fix it later.
96244+ */
96245+ smp_mb();
96246
96247 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
96248 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
96249@@ -991,8 +1005,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
96250 *
96251 * We add a counter to the write field to denote this.
96252 */
96253- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
96254- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
96255+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
96256+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
96257
96258 /*
96259 * Just make sure we have seen our old_write and synchronize
96260@@ -1020,8 +1034,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
96261 * cmpxchg to only update if an interrupt did not already
96262 * do it for us. If the cmpxchg fails, we don't care.
96263 */
96264- (void)local_cmpxchg(&next_page->write, old_write, val);
96265- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
96266+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
96267+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
96268
96269 /*
96270 * No need to worry about races with clearing out the commit.
96271@@ -1388,12 +1402,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
96272
96273 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
96274 {
96275- return local_read(&bpage->entries) & RB_WRITE_MASK;
96276+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
96277 }
96278
96279 static inline unsigned long rb_page_write(struct buffer_page *bpage)
96280 {
96281- return local_read(&bpage->write) & RB_WRITE_MASK;
96282+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
96283 }
96284
96285 static int
96286@@ -1488,7 +1502,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
96287 * bytes consumed in ring buffer from here.
96288 * Increment overrun to account for the lost events.
96289 */
96290- local_add(page_entries, &cpu_buffer->overrun);
96291+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
96292 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
96293 }
96294
96295@@ -2066,7 +2080,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
96296 * it is our responsibility to update
96297 * the counters.
96298 */
96299- local_add(entries, &cpu_buffer->overrun);
96300+ local_add_unchecked(entries, &cpu_buffer->overrun);
96301 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
96302
96303 /*
96304@@ -2216,7 +2230,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
96305 if (tail == BUF_PAGE_SIZE)
96306 tail_page->real_end = 0;
96307
96308- local_sub(length, &tail_page->write);
96309+ local_sub_unchecked(length, &tail_page->write);
96310 return;
96311 }
96312
96313@@ -2251,7 +2265,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
96314 rb_event_set_padding(event);
96315
96316 /* Set the write back to the previous setting */
96317- local_sub(length, &tail_page->write);
96318+ local_sub_unchecked(length, &tail_page->write);
96319 return;
96320 }
96321
96322@@ -2263,7 +2277,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
96323
96324 /* Set write to end of buffer */
96325 length = (tail + length) - BUF_PAGE_SIZE;
96326- local_sub(length, &tail_page->write);
96327+ local_sub_unchecked(length, &tail_page->write);
96328 }
96329
96330 /*
96331@@ -2289,7 +2303,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
96332 * about it.
96333 */
96334 if (unlikely(next_page == commit_page)) {
96335- local_inc(&cpu_buffer->commit_overrun);
96336+ local_inc_unchecked(&cpu_buffer->commit_overrun);
96337 goto out_reset;
96338 }
96339
96340@@ -2345,7 +2359,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
96341 cpu_buffer->tail_page) &&
96342 (cpu_buffer->commit_page ==
96343 cpu_buffer->reader_page))) {
96344- local_inc(&cpu_buffer->commit_overrun);
96345+ local_inc_unchecked(&cpu_buffer->commit_overrun);
96346 goto out_reset;
96347 }
96348 }
96349@@ -2393,7 +2407,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
96350 length += RB_LEN_TIME_EXTEND;
96351
96352 tail_page = cpu_buffer->tail_page;
96353- write = local_add_return(length, &tail_page->write);
96354+ write = local_add_return_unchecked(length, &tail_page->write);
96355
96356 /* set write to only the index of the write */
96357 write &= RB_WRITE_MASK;
96358@@ -2417,7 +2431,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
96359 kmemcheck_annotate_bitfield(event, bitfield);
96360 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
96361
96362- local_inc(&tail_page->entries);
96363+ local_inc_unchecked(&tail_page->entries);
96364
96365 /*
96366 * If this is the first commit on the page, then update
96367@@ -2450,7 +2464,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
96368
96369 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
96370 unsigned long write_mask =
96371- local_read(&bpage->write) & ~RB_WRITE_MASK;
96372+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
96373 unsigned long event_length = rb_event_length(event);
96374 /*
96375 * This is on the tail page. It is possible that
96376@@ -2460,7 +2474,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
96377 */
96378 old_index += write_mask;
96379 new_index += write_mask;
96380- index = local_cmpxchg(&bpage->write, old_index, new_index);
96381+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
96382 if (index == old_index) {
96383 /* update counters */
96384 local_sub(event_length, &cpu_buffer->entries_bytes);
96385@@ -2852,7 +2866,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
96386
96387 /* Do the likely case first */
96388 if (likely(bpage->page == (void *)addr)) {
96389- local_dec(&bpage->entries);
96390+ local_dec_unchecked(&bpage->entries);
96391 return;
96392 }
96393
96394@@ -2864,7 +2878,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
96395 start = bpage;
96396 do {
96397 if (bpage->page == (void *)addr) {
96398- local_dec(&bpage->entries);
96399+ local_dec_unchecked(&bpage->entries);
96400 return;
96401 }
96402 rb_inc_page(cpu_buffer, &bpage);
96403@@ -3148,7 +3162,7 @@ static inline unsigned long
96404 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
96405 {
96406 return local_read(&cpu_buffer->entries) -
96407- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
96408+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
96409 }
96410
96411 /**
96412@@ -3237,7 +3251,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
96413 return 0;
96414
96415 cpu_buffer = buffer->buffers[cpu];
96416- ret = local_read(&cpu_buffer->overrun);
96417+ ret = local_read_unchecked(&cpu_buffer->overrun);
96418
96419 return ret;
96420 }
96421@@ -3260,7 +3274,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
96422 return 0;
96423
96424 cpu_buffer = buffer->buffers[cpu];
96425- ret = local_read(&cpu_buffer->commit_overrun);
96426+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
96427
96428 return ret;
96429 }
96430@@ -3345,7 +3359,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
96431 /* if you care about this being correct, lock the buffer */
96432 for_each_buffer_cpu(buffer, cpu) {
96433 cpu_buffer = buffer->buffers[cpu];
96434- overruns += local_read(&cpu_buffer->overrun);
96435+ overruns += local_read_unchecked(&cpu_buffer->overrun);
96436 }
96437
96438 return overruns;
96439@@ -3521,8 +3535,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
96440 /*
96441 * Reset the reader page to size zero.
96442 */
96443- local_set(&cpu_buffer->reader_page->write, 0);
96444- local_set(&cpu_buffer->reader_page->entries, 0);
96445+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
96446+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
96447 local_set(&cpu_buffer->reader_page->page->commit, 0);
96448 cpu_buffer->reader_page->real_end = 0;
96449
96450@@ -3556,7 +3570,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
96451 * want to compare with the last_overrun.
96452 */
96453 smp_mb();
96454- overwrite = local_read(&(cpu_buffer->overrun));
96455+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
96456
96457 /*
96458 * Here's the tricky part.
96459@@ -4126,8 +4140,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
96460
96461 cpu_buffer->head_page
96462 = list_entry(cpu_buffer->pages, struct buffer_page, list);
96463- local_set(&cpu_buffer->head_page->write, 0);
96464- local_set(&cpu_buffer->head_page->entries, 0);
96465+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
96466+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
96467 local_set(&cpu_buffer->head_page->page->commit, 0);
96468
96469 cpu_buffer->head_page->read = 0;
96470@@ -4137,14 +4151,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
96471
96472 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
96473 INIT_LIST_HEAD(&cpu_buffer->new_pages);
96474- local_set(&cpu_buffer->reader_page->write, 0);
96475- local_set(&cpu_buffer->reader_page->entries, 0);
96476+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
96477+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
96478 local_set(&cpu_buffer->reader_page->page->commit, 0);
96479 cpu_buffer->reader_page->read = 0;
96480
96481 local_set(&cpu_buffer->entries_bytes, 0);
96482- local_set(&cpu_buffer->overrun, 0);
96483- local_set(&cpu_buffer->commit_overrun, 0);
96484+ local_set_unchecked(&cpu_buffer->overrun, 0);
96485+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
96486 local_set(&cpu_buffer->dropped_events, 0);
96487 local_set(&cpu_buffer->entries, 0);
96488 local_set(&cpu_buffer->committing, 0);
96489@@ -4549,8 +4563,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
96490 rb_init_page(bpage);
96491 bpage = reader->page;
96492 reader->page = *data_page;
96493- local_set(&reader->write, 0);
96494- local_set(&reader->entries, 0);
96495+ local_set_unchecked(&reader->write, 0);
96496+ local_set_unchecked(&reader->entries, 0);
96497 reader->read = 0;
96498 *data_page = bpage;
96499
96500diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
96501index 291397e..db3836d 100644
96502--- a/kernel/trace/trace.c
96503+++ b/kernel/trace/trace.c
96504@@ -3510,7 +3510,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
96505 return 0;
96506 }
96507
96508-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
96509+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
96510 {
96511 /* do nothing if flag is already set */
96512 if (!!(trace_flags & mask) == !!enabled)
96513diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
96514index 9258f5a..9b1e41e 100644
96515--- a/kernel/trace/trace.h
96516+++ b/kernel/trace/trace.h
96517@@ -1278,7 +1278,7 @@ extern const char *__stop___tracepoint_str[];
96518 void trace_printk_init_buffers(void);
96519 void trace_printk_start_comm(void);
96520 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
96521-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
96522+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
96523
96524 /*
96525 * Normal trace_printk() and friends allocates special buffers
96526diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
96527index 57b67b1..66082a9 100644
96528--- a/kernel/trace/trace_clock.c
96529+++ b/kernel/trace/trace_clock.c
96530@@ -124,7 +124,7 @@ u64 notrace trace_clock_global(void)
96531 return now;
96532 }
96533
96534-static atomic64_t trace_counter;
96535+static atomic64_unchecked_t trace_counter;
96536
96537 /*
96538 * trace_clock_counter(): simply an atomic counter.
96539@@ -133,5 +133,5 @@ static atomic64_t trace_counter;
96540 */
96541 u64 notrace trace_clock_counter(void)
96542 {
96543- return atomic64_add_return(1, &trace_counter);
96544+ return atomic64_inc_return_unchecked(&trace_counter);
96545 }
96546diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
96547index 2de5362..c4c7003 100644
96548--- a/kernel/trace/trace_events.c
96549+++ b/kernel/trace/trace_events.c
96550@@ -1722,7 +1722,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
96551 return 0;
96552 }
96553
96554-struct ftrace_module_file_ops;
96555 static void __add_event_to_tracers(struct ftrace_event_call *call);
96556
96557 /* Add an additional event_call dynamically */
96558diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
96559index 0abd9b8..6a663a2 100644
96560--- a/kernel/trace/trace_mmiotrace.c
96561+++ b/kernel/trace/trace_mmiotrace.c
96562@@ -24,7 +24,7 @@ struct header_iter {
96563 static struct trace_array *mmio_trace_array;
96564 static bool overrun_detected;
96565 static unsigned long prev_overruns;
96566-static atomic_t dropped_count;
96567+static atomic_unchecked_t dropped_count;
96568
96569 static void mmio_reset_data(struct trace_array *tr)
96570 {
96571@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
96572
96573 static unsigned long count_overruns(struct trace_iterator *iter)
96574 {
96575- unsigned long cnt = atomic_xchg(&dropped_count, 0);
96576+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
96577 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
96578
96579 if (over > prev_overruns)
96580@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
96581 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
96582 sizeof(*entry), 0, pc);
96583 if (!event) {
96584- atomic_inc(&dropped_count);
96585+ atomic_inc_unchecked(&dropped_count);
96586 return;
96587 }
96588 entry = ring_buffer_event_data(event);
96589@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
96590 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
96591 sizeof(*entry), 0, pc);
96592 if (!event) {
96593- atomic_inc(&dropped_count);
96594+ atomic_inc_unchecked(&dropped_count);
96595 return;
96596 }
96597 entry = ring_buffer_event_data(event);
96598diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
96599index f3dad80..d291d61 100644
96600--- a/kernel/trace/trace_output.c
96601+++ b/kernel/trace/trace_output.c
96602@@ -322,7 +322,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
96603
96604 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
96605 if (!IS_ERR(p)) {
96606- p = mangle_path(s->buffer + s->len, p, "\n");
96607+ p = mangle_path(s->buffer + s->len, p, "\n\\");
96608 if (p) {
96609 s->len = p - s->buffer;
96610 return 1;
96611@@ -980,14 +980,16 @@ int register_ftrace_event(struct trace_event *event)
96612 goto out;
96613 }
96614
96615+ pax_open_kernel();
96616 if (event->funcs->trace == NULL)
96617- event->funcs->trace = trace_nop_print;
96618+ *(void **)&event->funcs->trace = trace_nop_print;
96619 if (event->funcs->raw == NULL)
96620- event->funcs->raw = trace_nop_print;
96621+ *(void **)&event->funcs->raw = trace_nop_print;
96622 if (event->funcs->hex == NULL)
96623- event->funcs->hex = trace_nop_print;
96624+ *(void **)&event->funcs->hex = trace_nop_print;
96625 if (event->funcs->binary == NULL)
96626- event->funcs->binary = trace_nop_print;
96627+ *(void **)&event->funcs->binary = trace_nop_print;
96628+ pax_close_kernel();
96629
96630 key = event->type & (EVENT_HASHSIZE - 1);
96631
96632diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
96633index 8a4e5cb..64f270d 100644
96634--- a/kernel/trace/trace_stack.c
96635+++ b/kernel/trace/trace_stack.c
96636@@ -91,7 +91,7 @@ check_stack(unsigned long ip, unsigned long *stack)
96637 return;
96638
96639 /* we do not handle interrupt stacks yet */
96640- if (!object_is_on_stack(stack))
96641+ if (!object_starts_on_stack(stack))
96642 return;
96643
96644 local_irq_save(flags);
96645diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
96646index fcc0256..aee880f 100644
96647--- a/kernel/user_namespace.c
96648+++ b/kernel/user_namespace.c
96649@@ -82,6 +82,21 @@ int create_user_ns(struct cred *new)
96650 !kgid_has_mapping(parent_ns, group))
96651 return -EPERM;
96652
96653+#ifdef CONFIG_GRKERNSEC
96654+ /*
96655+ * This doesn't really inspire confidence:
96656+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
96657+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
96658+ * Increases kernel attack surface in areas developers
96659+ * previously cared little about ("low importance due
96660+ * to requiring "root" capability")
96661+ * To be removed when this code receives *proper* review
96662+ */
96663+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
96664+ !capable(CAP_SETGID))
96665+ return -EPERM;
96666+#endif
96667+
96668 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
96669 if (!ns)
96670 return -ENOMEM;
96671@@ -872,7 +887,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
96672 if (atomic_read(&current->mm->mm_users) > 1)
96673 return -EINVAL;
96674
96675- if (current->fs->users != 1)
96676+ if (atomic_read(&current->fs->users) != 1)
96677 return -EINVAL;
96678
96679 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
96680diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
96681index c8eac43..4b5f08f 100644
96682--- a/kernel/utsname_sysctl.c
96683+++ b/kernel/utsname_sysctl.c
96684@@ -47,7 +47,7 @@ static void put_uts(struct ctl_table *table, int write, void *which)
96685 static int proc_do_uts_string(struct ctl_table *table, int write,
96686 void __user *buffer, size_t *lenp, loff_t *ppos)
96687 {
96688- struct ctl_table uts_table;
96689+ ctl_table_no_const uts_table;
96690 int r;
96691 memcpy(&uts_table, table, sizeof(uts_table));
96692 uts_table.data = get_uts(table, write);
96693diff --git a/kernel/watchdog.c b/kernel/watchdog.c
96694index c3319bd..67efc3c 100644
96695--- a/kernel/watchdog.c
96696+++ b/kernel/watchdog.c
96697@@ -518,7 +518,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
96698 static void watchdog_nmi_disable(unsigned int cpu) { return; }
96699 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
96700
96701-static struct smp_hotplug_thread watchdog_threads = {
96702+static struct smp_hotplug_thread watchdog_threads __read_only = {
96703 .store = &softlockup_watchdog,
96704 .thread_should_run = watchdog_should_run,
96705 .thread_fn = watchdog,
96706diff --git a/kernel/workqueue.c b/kernel/workqueue.c
96707index 35974ac..43c9e87 100644
96708--- a/kernel/workqueue.c
96709+++ b/kernel/workqueue.c
96710@@ -4576,7 +4576,7 @@ static void rebind_workers(struct worker_pool *pool)
96711 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
96712 worker_flags |= WORKER_REBOUND;
96713 worker_flags &= ~WORKER_UNBOUND;
96714- ACCESS_ONCE(worker->flags) = worker_flags;
96715+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
96716 }
96717
96718 spin_unlock_irq(&pool->lock);
96719diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
96720index 7a638aa..20db901 100644
96721--- a/lib/Kconfig.debug
96722+++ b/lib/Kconfig.debug
96723@@ -858,7 +858,7 @@ config DEBUG_MUTEXES
96724
96725 config DEBUG_WW_MUTEX_SLOWPATH
96726 bool "Wait/wound mutex debugging: Slowpath testing"
96727- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
96728+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
96729 select DEBUG_LOCK_ALLOC
96730 select DEBUG_SPINLOCK
96731 select DEBUG_MUTEXES
96732@@ -871,7 +871,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
96733
96734 config DEBUG_LOCK_ALLOC
96735 bool "Lock debugging: detect incorrect freeing of live locks"
96736- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
96737+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
96738 select DEBUG_SPINLOCK
96739 select DEBUG_MUTEXES
96740 select LOCKDEP
96741@@ -885,7 +885,7 @@ config DEBUG_LOCK_ALLOC
96742
96743 config PROVE_LOCKING
96744 bool "Lock debugging: prove locking correctness"
96745- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
96746+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
96747 select LOCKDEP
96748 select DEBUG_SPINLOCK
96749 select DEBUG_MUTEXES
96750@@ -936,7 +936,7 @@ config LOCKDEP
96751
96752 config LOCK_STAT
96753 bool "Lock usage statistics"
96754- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
96755+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
96756 select LOCKDEP
96757 select DEBUG_SPINLOCK
96758 select DEBUG_MUTEXES
96759@@ -1418,6 +1418,7 @@ config LATENCYTOP
96760 depends on DEBUG_KERNEL
96761 depends on STACKTRACE_SUPPORT
96762 depends on PROC_FS
96763+ depends on !GRKERNSEC_HIDESYM
96764 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
96765 select KALLSYMS
96766 select KALLSYMS_ALL
96767@@ -1434,7 +1435,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
96768 config DEBUG_STRICT_USER_COPY_CHECKS
96769 bool "Strict user copy size checks"
96770 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
96771- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
96772+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
96773 help
96774 Enabling this option turns a certain set of sanity checks for user
96775 copy operations into compile time failures.
96776@@ -1554,7 +1555,7 @@ endmenu # runtime tests
96777
96778 config PROVIDE_OHCI1394_DMA_INIT
96779 bool "Remote debugging over FireWire early on boot"
96780- depends on PCI && X86
96781+ depends on PCI && X86 && !GRKERNSEC
96782 help
96783 If you want to debug problems which hang or crash the kernel early
96784 on boot and the crashing machine has a FireWire port, you can use
96785diff --git a/lib/Makefile b/lib/Makefile
96786index ba967a1..2cc869a 100644
96787--- a/lib/Makefile
96788+++ b/lib/Makefile
96789@@ -33,7 +33,6 @@ obj-y += kstrtox.o
96790 obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
96791 obj-$(CONFIG_TEST_MODULE) += test_module.o
96792 obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
96793-obj-$(CONFIG_TEST_BPF) += test_bpf.o
96794
96795 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
96796 CFLAGS_kobject.o += -DDEBUG
96797@@ -54,7 +53,7 @@ obj-$(CONFIG_BTREE) += btree.o
96798 obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
96799 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
96800 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
96801-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
96802+obj-y += list_debug.o
96803 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
96804
96805 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
96806diff --git a/lib/assoc_array.c b/lib/assoc_array.c
96807index c0b1007..ae146f0 100644
96808--- a/lib/assoc_array.c
96809+++ b/lib/assoc_array.c
96810@@ -1735,7 +1735,7 @@ ascend_old_tree:
96811 gc_complete:
96812 edit->set[0].to = new_root;
96813 assoc_array_apply_edit(edit);
96814- edit->array->nr_leaves_on_tree = nr_leaves_on_tree;
96815+ array->nr_leaves_on_tree = nr_leaves_on_tree;
96816 return 0;
96817
96818 enomem:
96819diff --git a/lib/average.c b/lib/average.c
96820index 114d1be..ab0350c 100644
96821--- a/lib/average.c
96822+++ b/lib/average.c
96823@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val)
96824 {
96825 unsigned long internal = ACCESS_ONCE(avg->internal);
96826
96827- ACCESS_ONCE(avg->internal) = internal ?
96828+ ACCESS_ONCE_RW(avg->internal) = internal ?
96829 (((internal << avg->weight) - internal) +
96830 (val << avg->factor)) >> avg->weight :
96831 (val << avg->factor);
96832diff --git a/lib/bitmap.c b/lib/bitmap.c
96833index 06f7e4f..f3cf2b0 100644
96834--- a/lib/bitmap.c
96835+++ b/lib/bitmap.c
96836@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
96837 {
96838 int c, old_c, totaldigits, ndigits, nchunks, nbits;
96839 u32 chunk;
96840- const char __user __force *ubuf = (const char __user __force *)buf;
96841+ const char __user *ubuf = (const char __force_user *)buf;
96842
96843 bitmap_zero(maskp, nmaskbits);
96844
96845@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
96846 {
96847 if (!access_ok(VERIFY_READ, ubuf, ulen))
96848 return -EFAULT;
96849- return __bitmap_parse((const char __force *)ubuf,
96850+ return __bitmap_parse((const char __force_kernel *)ubuf,
96851 ulen, 1, maskp, nmaskbits);
96852
96853 }
96854@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
96855 {
96856 unsigned a, b;
96857 int c, old_c, totaldigits;
96858- const char __user __force *ubuf = (const char __user __force *)buf;
96859+ const char __user *ubuf = (const char __force_user *)buf;
96860 int exp_digit, in_range;
96861
96862 totaldigits = c = 0;
96863@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
96864 {
96865 if (!access_ok(VERIFY_READ, ubuf, ulen))
96866 return -EFAULT;
96867- return __bitmap_parselist((const char __force *)ubuf,
96868+ return __bitmap_parselist((const char __force_kernel *)ubuf,
96869 ulen, 1, maskp, nmaskbits);
96870 }
96871 EXPORT_SYMBOL(bitmap_parselist_user);
96872diff --git a/lib/bug.c b/lib/bug.c
96873index d1d7c78..b354235 100644
96874--- a/lib/bug.c
96875+++ b/lib/bug.c
96876@@ -137,6 +137,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
96877 return BUG_TRAP_TYPE_NONE;
96878
96879 bug = find_bug(bugaddr);
96880+ if (!bug)
96881+ return BUG_TRAP_TYPE_NONE;
96882
96883 file = NULL;
96884 line = 0;
96885diff --git a/lib/debugobjects.c b/lib/debugobjects.c
96886index 547f7f9..a6d4ba0 100644
96887--- a/lib/debugobjects.c
96888+++ b/lib/debugobjects.c
96889@@ -289,7 +289,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
96890 if (limit > 4)
96891 return;
96892
96893- is_on_stack = object_is_on_stack(addr);
96894+ is_on_stack = object_starts_on_stack(addr);
96895 if (is_on_stack == onstack)
96896 return;
96897
96898diff --git a/lib/div64.c b/lib/div64.c
96899index 4382ad7..08aa558 100644
96900--- a/lib/div64.c
96901+++ b/lib/div64.c
96902@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
96903 EXPORT_SYMBOL(__div64_32);
96904
96905 #ifndef div_s64_rem
96906-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
96907+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
96908 {
96909 u64 quotient;
96910
96911@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
96912 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
96913 */
96914 #ifndef div64_u64
96915-u64 div64_u64(u64 dividend, u64 divisor)
96916+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
96917 {
96918 u32 high = divisor >> 32;
96919 u64 quot;
96920diff --git a/lib/dma-debug.c b/lib/dma-debug.c
96921index 98f2d7e..899da5c 100644
96922--- a/lib/dma-debug.c
96923+++ b/lib/dma-debug.c
96924@@ -971,7 +971,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
96925
96926 void dma_debug_add_bus(struct bus_type *bus)
96927 {
96928- struct notifier_block *nb;
96929+ notifier_block_no_const *nb;
96930
96931 if (global_disable)
96932 return;
96933@@ -1148,7 +1148,7 @@ static void check_unmap(struct dma_debug_entry *ref)
96934
96935 static void check_for_stack(struct device *dev, void *addr)
96936 {
96937- if (object_is_on_stack(addr))
96938+ if (object_starts_on_stack(addr))
96939 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
96940 "stack [addr=%p]\n", addr);
96941 }
96942diff --git a/lib/hash.c b/lib/hash.c
96943index fea973f..386626f 100644
96944--- a/lib/hash.c
96945+++ b/lib/hash.c
96946@@ -14,7 +14,7 @@
96947 #include <linux/hash.h>
96948 #include <linux/cache.h>
96949
96950-static struct fast_hash_ops arch_hash_ops __read_mostly = {
96951+static struct fast_hash_ops arch_hash_ops __read_only = {
96952 .hash = jhash,
96953 .hash2 = jhash2,
96954 };
96955diff --git a/lib/inflate.c b/lib/inflate.c
96956index 013a761..c28f3fc 100644
96957--- a/lib/inflate.c
96958+++ b/lib/inflate.c
96959@@ -269,7 +269,7 @@ static void free(void *where)
96960 malloc_ptr = free_mem_ptr;
96961 }
96962 #else
96963-#define malloc(a) kmalloc(a, GFP_KERNEL)
96964+#define malloc(a) kmalloc((a), GFP_KERNEL)
96965 #define free(a) kfree(a)
96966 #endif
96967
96968diff --git a/lib/ioremap.c b/lib/ioremap.c
96969index 0c9216c..863bd89 100644
96970--- a/lib/ioremap.c
96971+++ b/lib/ioremap.c
96972@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
96973 unsigned long next;
96974
96975 phys_addr -= addr;
96976- pmd = pmd_alloc(&init_mm, pud, addr);
96977+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
96978 if (!pmd)
96979 return -ENOMEM;
96980 do {
96981@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
96982 unsigned long next;
96983
96984 phys_addr -= addr;
96985- pud = pud_alloc(&init_mm, pgd, addr);
96986+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
96987 if (!pud)
96988 return -ENOMEM;
96989 do {
96990diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
96991index bd2bea9..6b3c95e 100644
96992--- a/lib/is_single_threaded.c
96993+++ b/lib/is_single_threaded.c
96994@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
96995 struct task_struct *p, *t;
96996 bool ret;
96997
96998+ if (!mm)
96999+ return true;
97000+
97001 if (atomic_read(&task->signal->live) != 1)
97002 return false;
97003
97004diff --git a/lib/kobject.c b/lib/kobject.c
97005index 58751bb..93a1853 100644
97006--- a/lib/kobject.c
97007+++ b/lib/kobject.c
97008@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
97009
97010
97011 static DEFINE_SPINLOCK(kobj_ns_type_lock);
97012-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
97013+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
97014
97015-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
97016+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
97017 {
97018 enum kobj_ns_type type = ops->type;
97019 int error;
97020diff --git a/lib/list_debug.c b/lib/list_debug.c
97021index c24c2f7..f0296f4 100644
97022--- a/lib/list_debug.c
97023+++ b/lib/list_debug.c
97024@@ -11,7 +11,9 @@
97025 #include <linux/bug.h>
97026 #include <linux/kernel.h>
97027 #include <linux/rculist.h>
97028+#include <linux/mm.h>
97029
97030+#ifdef CONFIG_DEBUG_LIST
97031 /*
97032 * Insert a new entry between two known consecutive entries.
97033 *
97034@@ -19,21 +21,40 @@
97035 * the prev/next entries already!
97036 */
97037
97038+static bool __list_add_debug(struct list_head *new,
97039+ struct list_head *prev,
97040+ struct list_head *next)
97041+{
97042+ if (unlikely(next->prev != prev)) {
97043+ printk(KERN_ERR "list_add corruption. next->prev should be "
97044+ "prev (%p), but was %p. (next=%p).\n",
97045+ prev, next->prev, next);
97046+ BUG();
97047+ return false;
97048+ }
97049+ if (unlikely(prev->next != next)) {
97050+ printk(KERN_ERR "list_add corruption. prev->next should be "
97051+ "next (%p), but was %p. (prev=%p).\n",
97052+ next, prev->next, prev);
97053+ BUG();
97054+ return false;
97055+ }
97056+ if (unlikely(new == prev || new == next)) {
97057+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
97058+ new, prev, next);
97059+ BUG();
97060+ return false;
97061+ }
97062+ return true;
97063+}
97064+
97065 void __list_add(struct list_head *new,
97066- struct list_head *prev,
97067- struct list_head *next)
97068+ struct list_head *prev,
97069+ struct list_head *next)
97070 {
97071- WARN(next->prev != prev,
97072- "list_add corruption. next->prev should be "
97073- "prev (%p), but was %p. (next=%p).\n",
97074- prev, next->prev, next);
97075- WARN(prev->next != next,
97076- "list_add corruption. prev->next should be "
97077- "next (%p), but was %p. (prev=%p).\n",
97078- next, prev->next, prev);
97079- WARN(new == prev || new == next,
97080- "list_add double add: new=%p, prev=%p, next=%p.\n",
97081- new, prev, next);
97082+ if (!__list_add_debug(new, prev, next))
97083+ return;
97084+
97085 next->prev = new;
97086 new->next = next;
97087 new->prev = prev;
97088@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
97089 }
97090 EXPORT_SYMBOL(__list_add);
97091
97092-void __list_del_entry(struct list_head *entry)
97093+static bool __list_del_entry_debug(struct list_head *entry)
97094 {
97095 struct list_head *prev, *next;
97096
97097 prev = entry->prev;
97098 next = entry->next;
97099
97100- if (WARN(next == LIST_POISON1,
97101- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
97102- entry, LIST_POISON1) ||
97103- WARN(prev == LIST_POISON2,
97104- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
97105- entry, LIST_POISON2) ||
97106- WARN(prev->next != entry,
97107- "list_del corruption. prev->next should be %p, "
97108- "but was %p\n", entry, prev->next) ||
97109- WARN(next->prev != entry,
97110- "list_del corruption. next->prev should be %p, "
97111- "but was %p\n", entry, next->prev))
97112+ if (unlikely(next == LIST_POISON1)) {
97113+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
97114+ entry, LIST_POISON1);
97115+ BUG();
97116+ return false;
97117+ }
97118+ if (unlikely(prev == LIST_POISON2)) {
97119+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
97120+ entry, LIST_POISON2);
97121+ BUG();
97122+ return false;
97123+ }
97124+ if (unlikely(entry->prev->next != entry)) {
97125+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
97126+ "but was %p\n", entry, prev->next);
97127+ BUG();
97128+ return false;
97129+ }
97130+ if (unlikely(entry->next->prev != entry)) {
97131+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
97132+ "but was %p\n", entry, next->prev);
97133+ BUG();
97134+ return false;
97135+ }
97136+ return true;
97137+}
97138+
97139+void __list_del_entry(struct list_head *entry)
97140+{
97141+ if (!__list_del_entry_debug(entry))
97142 return;
97143
97144- __list_del(prev, next);
97145+ __list_del(entry->prev, entry->next);
97146 }
97147 EXPORT_SYMBOL(__list_del_entry);
97148
97149@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
97150 void __list_add_rcu(struct list_head *new,
97151 struct list_head *prev, struct list_head *next)
97152 {
97153- WARN(next->prev != prev,
97154- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
97155- prev, next->prev, next);
97156- WARN(prev->next != next,
97157- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
97158- next, prev->next, prev);
97159+ if (!__list_add_debug(new, prev, next))
97160+ return;
97161+
97162 new->next = next;
97163 new->prev = prev;
97164 rcu_assign_pointer(list_next_rcu(prev), new);
97165 next->prev = new;
97166 }
97167 EXPORT_SYMBOL(__list_add_rcu);
97168+#endif
97169+
97170+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
97171+{
97172+#ifdef CONFIG_DEBUG_LIST
97173+ if (!__list_add_debug(new, prev, next))
97174+ return;
97175+#endif
97176+
97177+ pax_open_kernel();
97178+ next->prev = new;
97179+ new->next = next;
97180+ new->prev = prev;
97181+ prev->next = new;
97182+ pax_close_kernel();
97183+}
97184+EXPORT_SYMBOL(__pax_list_add);
97185+
97186+void pax_list_del(struct list_head *entry)
97187+{
97188+#ifdef CONFIG_DEBUG_LIST
97189+ if (!__list_del_entry_debug(entry))
97190+ return;
97191+#endif
97192+
97193+ pax_open_kernel();
97194+ __list_del(entry->prev, entry->next);
97195+ entry->next = LIST_POISON1;
97196+ entry->prev = LIST_POISON2;
97197+ pax_close_kernel();
97198+}
97199+EXPORT_SYMBOL(pax_list_del);
97200+
97201+void pax_list_del_init(struct list_head *entry)
97202+{
97203+ pax_open_kernel();
97204+ __list_del(entry->prev, entry->next);
97205+ INIT_LIST_HEAD(entry);
97206+ pax_close_kernel();
97207+}
97208+EXPORT_SYMBOL(pax_list_del_init);
97209+
97210+void __pax_list_add_rcu(struct list_head *new,
97211+ struct list_head *prev, struct list_head *next)
97212+{
97213+#ifdef CONFIG_DEBUG_LIST
97214+ if (!__list_add_debug(new, prev, next))
97215+ return;
97216+#endif
97217+
97218+ pax_open_kernel();
97219+ new->next = next;
97220+ new->prev = prev;
97221+ rcu_assign_pointer(list_next_rcu(prev), new);
97222+ next->prev = new;
97223+ pax_close_kernel();
97224+}
97225+EXPORT_SYMBOL(__pax_list_add_rcu);
97226+
97227+void pax_list_del_rcu(struct list_head *entry)
97228+{
97229+#ifdef CONFIG_DEBUG_LIST
97230+ if (!__list_del_entry_debug(entry))
97231+ return;
97232+#endif
97233+
97234+ pax_open_kernel();
97235+ __list_del(entry->prev, entry->next);
97236+ entry->next = LIST_POISON1;
97237+ entry->prev = LIST_POISON2;
97238+ pax_close_kernel();
97239+}
97240+EXPORT_SYMBOL(pax_list_del_rcu);
97241diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
97242index 963b703..438bc51 100644
97243--- a/lib/percpu-refcount.c
97244+++ b/lib/percpu-refcount.c
97245@@ -29,7 +29,7 @@
97246 * can't hit 0 before we've added up all the percpu refs.
97247 */
97248
97249-#define PCPU_COUNT_BIAS (1U << 31)
97250+#define PCPU_COUNT_BIAS (1U << 30)
97251
97252 /**
97253 * percpu_ref_init - initialize a percpu refcount
97254diff --git a/lib/radix-tree.c b/lib/radix-tree.c
97255index 3291a8e..346a91e 100644
97256--- a/lib/radix-tree.c
97257+++ b/lib/radix-tree.c
97258@@ -67,7 +67,7 @@ struct radix_tree_preload {
97259 int nr;
97260 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
97261 };
97262-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
97263+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
97264
97265 static inline void *ptr_to_indirect(void *ptr)
97266 {
97267diff --git a/lib/random32.c b/lib/random32.c
97268index fa5da61..35fe9af 100644
97269--- a/lib/random32.c
97270+++ b/lib/random32.c
97271@@ -42,7 +42,7 @@
97272 static void __init prandom_state_selftest(void);
97273 #endif
97274
97275-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
97276+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
97277
97278 /**
97279 * prandom_u32_state - seeded pseudo-random number generator.
97280diff --git a/lib/rbtree.c b/lib/rbtree.c
97281index 65f4eff..2cfa167 100644
97282--- a/lib/rbtree.c
97283+++ b/lib/rbtree.c
97284@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
97285 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
97286
97287 static const struct rb_augment_callbacks dummy_callbacks = {
97288- dummy_propagate, dummy_copy, dummy_rotate
97289+ .propagate = dummy_propagate,
97290+ .copy = dummy_copy,
97291+ .rotate = dummy_rotate
97292 };
97293
97294 void rb_insert_color(struct rb_node *node, struct rb_root *root)
97295diff --git a/lib/show_mem.c b/lib/show_mem.c
97296index 0922579..9d7adb9 100644
97297--- a/lib/show_mem.c
97298+++ b/lib/show_mem.c
97299@@ -44,6 +44,6 @@ void show_mem(unsigned int filter)
97300 quicklist_total_size());
97301 #endif
97302 #ifdef CONFIG_MEMORY_FAILURE
97303- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
97304+ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
97305 #endif
97306 }
97307diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
97308index bb2b201..46abaf9 100644
97309--- a/lib/strncpy_from_user.c
97310+++ b/lib/strncpy_from_user.c
97311@@ -21,7 +21,7 @@
97312 */
97313 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
97314 {
97315- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
97316+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
97317 long res = 0;
97318
97319 /*
97320diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
97321index a28df52..3d55877 100644
97322--- a/lib/strnlen_user.c
97323+++ b/lib/strnlen_user.c
97324@@ -26,7 +26,7 @@
97325 */
97326 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
97327 {
97328- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
97329+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
97330 long align, res = 0;
97331 unsigned long c;
97332
97333diff --git a/lib/swiotlb.c b/lib/swiotlb.c
97334index 4abda07..b9d3765 100644
97335--- a/lib/swiotlb.c
97336+++ b/lib/swiotlb.c
97337@@ -682,7 +682,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
97338
97339 void
97340 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
97341- dma_addr_t dev_addr)
97342+ dma_addr_t dev_addr, struct dma_attrs *attrs)
97343 {
97344 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
97345
97346diff --git a/lib/test_bpf.c b/lib/test_bpf.c
97347deleted file mode 100644
97348index c579e0f..0000000
97349--- a/lib/test_bpf.c
97350+++ /dev/null
97351@@ -1,1929 +0,0 @@
97352-/*
97353- * Testsuite for BPF interpreter and BPF JIT compiler
97354- *
97355- * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
97356- *
97357- * This program is free software; you can redistribute it and/or
97358- * modify it under the terms of version 2 of the GNU General Public
97359- * License as published by the Free Software Foundation.
97360- *
97361- * This program is distributed in the hope that it will be useful, but
97362- * WITHOUT ANY WARRANTY; without even the implied warranty of
97363- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
97364- * General Public License for more details.
97365- */
97366-
97367-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
97368-
97369-#include <linux/init.h>
97370-#include <linux/module.h>
97371-#include <linux/filter.h>
97372-#include <linux/skbuff.h>
97373-#include <linux/netdevice.h>
97374-#include <linux/if_vlan.h>
97375-
97376-/* General test specific settings */
97377-#define MAX_SUBTESTS 3
97378-#define MAX_TESTRUNS 10000
97379-#define MAX_DATA 128
97380-#define MAX_INSNS 512
97381-#define MAX_K 0xffffFFFF
97382-
97383-/* Few constants used to init test 'skb' */
97384-#define SKB_TYPE 3
97385-#define SKB_MARK 0x1234aaaa
97386-#define SKB_HASH 0x1234aaab
97387-#define SKB_QUEUE_MAP 123
97388-#define SKB_VLAN_TCI 0xffff
97389-#define SKB_DEV_IFINDEX 577
97390-#define SKB_DEV_TYPE 588
97391-
97392-/* Redefine REGs to make tests less verbose */
97393-#define R0 BPF_REG_0
97394-#define R1 BPF_REG_1
97395-#define R2 BPF_REG_2
97396-#define R3 BPF_REG_3
97397-#define R4 BPF_REG_4
97398-#define R5 BPF_REG_5
97399-#define R6 BPF_REG_6
97400-#define R7 BPF_REG_7
97401-#define R8 BPF_REG_8
97402-#define R9 BPF_REG_9
97403-#define R10 BPF_REG_10
97404-
97405-/* Flags that can be passed to test cases */
97406-#define FLAG_NO_DATA BIT(0)
97407-#define FLAG_EXPECTED_FAIL BIT(1)
97408-
97409-enum {
97410- CLASSIC = BIT(6), /* Old BPF instructions only. */
97411- INTERNAL = BIT(7), /* Extended instruction set. */
97412-};
97413-
97414-#define TEST_TYPE_MASK (CLASSIC | INTERNAL)
97415-
97416-struct bpf_test {
97417- const char *descr;
97418- union {
97419- struct sock_filter insns[MAX_INSNS];
97420- struct sock_filter_int insns_int[MAX_INSNS];
97421- } u;
97422- __u8 aux;
97423- __u8 data[MAX_DATA];
97424- struct {
97425- int data_size;
97426- __u32 result;
97427- } test[MAX_SUBTESTS];
97428-};
97429-
97430-static struct bpf_test tests[] = {
97431- {
97432- "TAX",
97433- .u.insns = {
97434- BPF_STMT(BPF_LD | BPF_IMM, 1),
97435- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97436- BPF_STMT(BPF_LD | BPF_IMM, 2),
97437- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
97438- BPF_STMT(BPF_ALU | BPF_NEG, 0), /* A == -3 */
97439- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97440- BPF_STMT(BPF_LD | BPF_LEN, 0),
97441- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
97442- BPF_STMT(BPF_MISC | BPF_TAX, 0), /* X == len - 3 */
97443- BPF_STMT(BPF_LD | BPF_B | BPF_IND, 1),
97444- BPF_STMT(BPF_RET | BPF_A, 0)
97445- },
97446- CLASSIC,
97447- { 10, 20, 30, 40, 50 },
97448- { { 2, 10 }, { 3, 20 }, { 4, 30 } },
97449- },
97450- {
97451- "TXA",
97452- .u.insns = {
97453- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97454- BPF_STMT(BPF_MISC | BPF_TXA, 0),
97455- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
97456- BPF_STMT(BPF_RET | BPF_A, 0) /* A == len * 2 */
97457- },
97458- CLASSIC,
97459- { 10, 20, 30, 40, 50 },
97460- { { 1, 2 }, { 3, 6 }, { 4, 8 } },
97461- },
97462- {
97463- "ADD_SUB_MUL_K",
97464- .u.insns = {
97465- BPF_STMT(BPF_LD | BPF_IMM, 1),
97466- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 2),
97467- BPF_STMT(BPF_LDX | BPF_IMM, 3),
97468- BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
97469- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0xffffffff),
97470- BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 3),
97471- BPF_STMT(BPF_RET | BPF_A, 0)
97472- },
97473- CLASSIC | FLAG_NO_DATA,
97474- { },
97475- { { 0, 0xfffffffd } }
97476- },
97477- {
97478- "DIV_KX",
97479- .u.insns = {
97480- BPF_STMT(BPF_LD | BPF_IMM, 8),
97481- BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 2),
97482- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97483- BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
97484- BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0),
97485- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97486- BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
97487- BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x70000000),
97488- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
97489- BPF_STMT(BPF_RET | BPF_A, 0)
97490- },
97491- CLASSIC | FLAG_NO_DATA,
97492- { },
97493- { { 0, 0x40000001 } }
97494- },
97495- {
97496- "AND_OR_LSH_K",
97497- .u.insns = {
97498- BPF_STMT(BPF_LD | BPF_IMM, 0xff),
97499- BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
97500- BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 27),
97501- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97502- BPF_STMT(BPF_LD | BPF_IMM, 0xf),
97503- BPF_STMT(BPF_ALU | BPF_OR | BPF_K, 0xf0),
97504- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
97505- BPF_STMT(BPF_RET | BPF_A, 0)
97506- },
97507- CLASSIC | FLAG_NO_DATA,
97508- { },
97509- { { 0, 0x800000ff }, { 1, 0x800000ff } },
97510- },
97511- {
97512- "LD_IMM_0",
97513- .u.insns = {
97514- BPF_STMT(BPF_LD | BPF_IMM, 0), /* ld #0 */
97515- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0, 1, 0),
97516- BPF_STMT(BPF_RET | BPF_K, 0),
97517- BPF_STMT(BPF_RET | BPF_K, 1),
97518- },
97519- CLASSIC,
97520- { },
97521- { { 1, 1 } },
97522- },
97523- {
97524- "LD_IND",
97525- .u.insns = {
97526- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97527- BPF_STMT(BPF_LD | BPF_H | BPF_IND, MAX_K),
97528- BPF_STMT(BPF_RET | BPF_K, 1)
97529- },
97530- CLASSIC,
97531- { },
97532- { { 1, 0 }, { 10, 0 }, { 60, 0 } },
97533- },
97534- {
97535- "LD_ABS",
97536- .u.insns = {
97537- BPF_STMT(BPF_LD | BPF_W | BPF_ABS, 1000),
97538- BPF_STMT(BPF_RET | BPF_K, 1)
97539- },
97540- CLASSIC,
97541- { },
97542- { { 1, 0 }, { 10, 0 }, { 60, 0 } },
97543- },
97544- {
97545- "LD_ABS_LL",
97546- .u.insns = {
97547- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF),
97548- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97549- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF + 1),
97550- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
97551- BPF_STMT(BPF_RET | BPF_A, 0)
97552- },
97553- CLASSIC,
97554- { 1, 2, 3 },
97555- { { 1, 0 }, { 2, 3 } },
97556- },
97557- {
97558- "LD_IND_LL",
97559- .u.insns = {
97560- BPF_STMT(BPF_LD | BPF_IMM, SKF_LL_OFF - 1),
97561- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97562- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
97563- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97564- BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
97565- BPF_STMT(BPF_RET | BPF_A, 0)
97566- },
97567- CLASSIC,
97568- { 1, 2, 3, 0xff },
97569- { { 1, 1 }, { 3, 3 }, { 4, 0xff } },
97570- },
97571- {
97572- "LD_ABS_NET",
97573- .u.insns = {
97574- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF),
97575- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97576- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF + 1),
97577- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
97578- BPF_STMT(BPF_RET | BPF_A, 0)
97579- },
97580- CLASSIC,
97581- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
97582- { { 15, 0 }, { 16, 3 } },
97583- },
97584- {
97585- "LD_IND_NET",
97586- .u.insns = {
97587- BPF_STMT(BPF_LD | BPF_IMM, SKF_NET_OFF - 15),
97588- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97589- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
97590- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97591- BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
97592- BPF_STMT(BPF_RET | BPF_A, 0)
97593- },
97594- CLASSIC,
97595- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
97596- { { 14, 0 }, { 15, 1 }, { 17, 3 } },
97597- },
97598- {
97599- "LD_PKTTYPE",
97600- .u.insns = {
97601- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97602- SKF_AD_OFF + SKF_AD_PKTTYPE),
97603- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
97604- BPF_STMT(BPF_RET | BPF_K, 1),
97605- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97606- SKF_AD_OFF + SKF_AD_PKTTYPE),
97607- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
97608- BPF_STMT(BPF_RET | BPF_K, 1),
97609- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97610- SKF_AD_OFF + SKF_AD_PKTTYPE),
97611- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
97612- BPF_STMT(BPF_RET | BPF_K, 1),
97613- BPF_STMT(BPF_RET | BPF_A, 0)
97614- },
97615- CLASSIC,
97616- { },
97617- { { 1, 3 }, { 10, 3 } },
97618- },
97619- {
97620- "LD_MARK",
97621- .u.insns = {
97622- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97623- SKF_AD_OFF + SKF_AD_MARK),
97624- BPF_STMT(BPF_RET | BPF_A, 0)
97625- },
97626- CLASSIC,
97627- { },
97628- { { 1, SKB_MARK}, { 10, SKB_MARK} },
97629- },
97630- {
97631- "LD_RXHASH",
97632- .u.insns = {
97633- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97634- SKF_AD_OFF + SKF_AD_RXHASH),
97635- BPF_STMT(BPF_RET | BPF_A, 0)
97636- },
97637- CLASSIC,
97638- { },
97639- { { 1, SKB_HASH}, { 10, SKB_HASH} },
97640- },
97641- {
97642- "LD_QUEUE",
97643- .u.insns = {
97644- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97645- SKF_AD_OFF + SKF_AD_QUEUE),
97646- BPF_STMT(BPF_RET | BPF_A, 0)
97647- },
97648- CLASSIC,
97649- { },
97650- { { 1, SKB_QUEUE_MAP }, { 10, SKB_QUEUE_MAP } },
97651- },
97652- {
97653- "LD_PROTOCOL",
97654- .u.insns = {
97655- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 1),
97656- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 20, 1, 0),
97657- BPF_STMT(BPF_RET | BPF_K, 0),
97658- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97659- SKF_AD_OFF + SKF_AD_PROTOCOL),
97660- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97661- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
97662- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 30, 1, 0),
97663- BPF_STMT(BPF_RET | BPF_K, 0),
97664- BPF_STMT(BPF_MISC | BPF_TXA, 0),
97665- BPF_STMT(BPF_RET | BPF_A, 0)
97666- },
97667- CLASSIC,
97668- { 10, 20, 30 },
97669- { { 10, ETH_P_IP }, { 100, ETH_P_IP } },
97670- },
97671- {
97672- "LD_VLAN_TAG",
97673- .u.insns = {
97674- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97675- SKF_AD_OFF + SKF_AD_VLAN_TAG),
97676- BPF_STMT(BPF_RET | BPF_A, 0)
97677- },
97678- CLASSIC,
97679- { },
97680- {
97681- { 1, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT },
97682- { 10, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT }
97683- },
97684- },
97685- {
97686- "LD_VLAN_TAG_PRESENT",
97687- .u.insns = {
97688- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97689- SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT),
97690- BPF_STMT(BPF_RET | BPF_A, 0)
97691- },
97692- CLASSIC,
97693- { },
97694- {
97695- { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
97696- { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
97697- },
97698- },
97699- {
97700- "LD_IFINDEX",
97701- .u.insns = {
97702- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97703- SKF_AD_OFF + SKF_AD_IFINDEX),
97704- BPF_STMT(BPF_RET | BPF_A, 0)
97705- },
97706- CLASSIC,
97707- { },
97708- { { 1, SKB_DEV_IFINDEX }, { 10, SKB_DEV_IFINDEX } },
97709- },
97710- {
97711- "LD_HATYPE",
97712- .u.insns = {
97713- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97714- SKF_AD_OFF + SKF_AD_HATYPE),
97715- BPF_STMT(BPF_RET | BPF_A, 0)
97716- },
97717- CLASSIC,
97718- { },
97719- { { 1, SKB_DEV_TYPE }, { 10, SKB_DEV_TYPE } },
97720- },
97721- {
97722- "LD_CPU",
97723- .u.insns = {
97724- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97725- SKF_AD_OFF + SKF_AD_CPU),
97726- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97727- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97728- SKF_AD_OFF + SKF_AD_CPU),
97729- BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
97730- BPF_STMT(BPF_RET | BPF_A, 0)
97731- },
97732- CLASSIC,
97733- { },
97734- { { 1, 0 }, { 10, 0 } },
97735- },
97736- {
97737- "LD_NLATTR",
97738- .u.insns = {
97739- BPF_STMT(BPF_LDX | BPF_IMM, 2),
97740- BPF_STMT(BPF_MISC | BPF_TXA, 0),
97741- BPF_STMT(BPF_LDX | BPF_IMM, 3),
97742- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97743- SKF_AD_OFF + SKF_AD_NLATTR),
97744- BPF_STMT(BPF_RET | BPF_A, 0)
97745- },
97746- CLASSIC,
97747-#ifdef __BIG_ENDIAN
97748- { 0xff, 0xff, 0, 4, 0, 2, 0, 4, 0, 3 },
97749-#else
97750- { 0xff, 0xff, 4, 0, 2, 0, 4, 0, 3, 0 },
97751-#endif
97752- { { 4, 0 }, { 20, 6 } },
97753- },
97754- {
97755- "LD_NLATTR_NEST",
97756- .u.insns = {
97757- BPF_STMT(BPF_LD | BPF_IMM, 2),
97758- BPF_STMT(BPF_LDX | BPF_IMM, 3),
97759- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97760- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
97761- BPF_STMT(BPF_LD | BPF_IMM, 2),
97762- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97763- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
97764- BPF_STMT(BPF_LD | BPF_IMM, 2),
97765- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97766- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
97767- BPF_STMT(BPF_LD | BPF_IMM, 2),
97768- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97769- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
97770- BPF_STMT(BPF_LD | BPF_IMM, 2),
97771- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97772- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
97773- BPF_STMT(BPF_LD | BPF_IMM, 2),
97774- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97775- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
97776- BPF_STMT(BPF_LD | BPF_IMM, 2),
97777- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97778- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
97779- BPF_STMT(BPF_LD | BPF_IMM, 2),
97780- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97781- SKF_AD_OFF + SKF_AD_NLATTR_NEST),
97782- BPF_STMT(BPF_RET | BPF_A, 0)
97783- },
97784- CLASSIC,
97785-#ifdef __BIG_ENDIAN
97786- { 0xff, 0xff, 0, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3 },
97787-#else
97788- { 0xff, 0xff, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3, 0 },
97789-#endif
97790- { { 4, 0 }, { 20, 10 } },
97791- },
97792- {
97793- "LD_PAYLOAD_OFF",
97794- .u.insns = {
97795- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97796- SKF_AD_OFF + SKF_AD_PAY_OFFSET),
97797- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97798- SKF_AD_OFF + SKF_AD_PAY_OFFSET),
97799- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97800- SKF_AD_OFF + SKF_AD_PAY_OFFSET),
97801- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97802- SKF_AD_OFF + SKF_AD_PAY_OFFSET),
97803- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97804- SKF_AD_OFF + SKF_AD_PAY_OFFSET),
97805- BPF_STMT(BPF_RET | BPF_A, 0)
97806- },
97807- CLASSIC,
97808- /* 00:00:00:00:00:00 > 00:00:00:00:00:00, ethtype IPv4 (0x0800),
97809- * length 98: 127.0.0.1 > 127.0.0.1: ICMP echo request,
97810- * id 9737, seq 1, length 64
97811- */
97812- { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
97813- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
97814- 0x08, 0x00,
97815- 0x45, 0x00, 0x00, 0x54, 0xac, 0x8b, 0x40, 0x00, 0x40,
97816- 0x01, 0x90, 0x1b, 0x7f, 0x00, 0x00, 0x01 },
97817- { { 30, 0 }, { 100, 42 } },
97818- },
97819- {
97820- "LD_ANC_XOR",
97821- .u.insns = {
97822- BPF_STMT(BPF_LD | BPF_IMM, 10),
97823- BPF_STMT(BPF_LDX | BPF_IMM, 300),
97824- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
97825- SKF_AD_OFF + SKF_AD_ALU_XOR_X),
97826- BPF_STMT(BPF_RET | BPF_A, 0)
97827- },
97828- CLASSIC,
97829- { },
97830- { { 4, 10 ^ 300 }, { 20, 10 ^ 300 } },
97831- },
97832- {
97833- "SPILL_FILL",
97834- .u.insns = {
97835- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97836- BPF_STMT(BPF_LD | BPF_IMM, 2),
97837- BPF_STMT(BPF_ALU | BPF_RSH, 1),
97838- BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
97839- BPF_STMT(BPF_ST, 1), /* M1 = 1 ^ len */
97840- BPF_STMT(BPF_ALU | BPF_XOR | BPF_K, 0x80000000),
97841- BPF_STMT(BPF_ST, 2), /* M2 = 1 ^ len ^ 0x80000000 */
97842- BPF_STMT(BPF_STX, 15), /* M3 = len */
97843- BPF_STMT(BPF_LDX | BPF_MEM, 1),
97844- BPF_STMT(BPF_LD | BPF_MEM, 2),
97845- BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
97846- BPF_STMT(BPF_LDX | BPF_MEM, 15),
97847- BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
97848- BPF_STMT(BPF_RET | BPF_A, 0)
97849- },
97850- CLASSIC,
97851- { },
97852- { { 1, 0x80000001 }, { 2, 0x80000002 }, { 60, 0x80000000 ^ 60 } }
97853- },
97854- {
97855- "JEQ",
97856- .u.insns = {
97857- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97858- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
97859- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 1),
97860- BPF_STMT(BPF_RET | BPF_K, 1),
97861- BPF_STMT(BPF_RET | BPF_K, MAX_K)
97862- },
97863- CLASSIC,
97864- { 3, 3, 3, 3, 3 },
97865- { { 1, 0 }, { 3, 1 }, { 4, MAX_K } },
97866- },
97867- {
97868- "JGT",
97869- .u.insns = {
97870- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97871- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
97872- BPF_JUMP(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 1),
97873- BPF_STMT(BPF_RET | BPF_K, 1),
97874- BPF_STMT(BPF_RET | BPF_K, MAX_K)
97875- },
97876- CLASSIC,
97877- { 4, 4, 4, 3, 3 },
97878- { { 2, 0 }, { 3, 1 }, { 4, MAX_K } },
97879- },
97880- {
97881- "JGE",
97882- .u.insns = {
97883- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97884- BPF_STMT(BPF_LD | BPF_B | BPF_IND, MAX_K),
97885- BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 1, 1, 0),
97886- BPF_STMT(BPF_RET | BPF_K, 10),
97887- BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 2, 1, 0),
97888- BPF_STMT(BPF_RET | BPF_K, 20),
97889- BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 3, 1, 0),
97890- BPF_STMT(BPF_RET | BPF_K, 30),
97891- BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 4, 1, 0),
97892- BPF_STMT(BPF_RET | BPF_K, 40),
97893- BPF_STMT(BPF_RET | BPF_K, MAX_K)
97894- },
97895- CLASSIC,
97896- { 1, 2, 3, 4, 5 },
97897- { { 1, 20 }, { 3, 40 }, { 5, MAX_K } },
97898- },
97899- {
97900- "JSET",
97901- .u.insns = {
97902- BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
97903- BPF_JUMP(BPF_JMP | BPF_JA, 1, 1, 1),
97904- BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
97905- BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
97906- BPF_STMT(BPF_LDX | BPF_LEN, 0),
97907- BPF_STMT(BPF_MISC | BPF_TXA, 0),
97908- BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, 4),
97909- BPF_STMT(BPF_MISC | BPF_TAX, 0),
97910- BPF_STMT(BPF_LD | BPF_W | BPF_IND, 0),
97911- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 1, 0, 1),
97912- BPF_STMT(BPF_RET | BPF_K, 10),
97913- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x80000000, 0, 1),
97914- BPF_STMT(BPF_RET | BPF_K, 20),
97915- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
97916- BPF_STMT(BPF_RET | BPF_K, 30),
97917- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
97918- BPF_STMT(BPF_RET | BPF_K, 30),
97919- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
97920- BPF_STMT(BPF_RET | BPF_K, 30),
97921- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
97922- BPF_STMT(BPF_RET | BPF_K, 30),
97923- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
97924- BPF_STMT(BPF_RET | BPF_K, 30),
97925- BPF_STMT(BPF_RET | BPF_K, MAX_K)
97926- },
97927- CLASSIC,
97928- { 0, 0xAA, 0x55, 1 },
97929- { { 4, 10 }, { 5, 20 }, { 6, MAX_K } },
97930- },
97931- {
97932- "tcpdump port 22",
97933- .u.insns = {
97934- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
97935- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 0, 8), /* IPv6 */
97936- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 20),
97937- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
97938- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
97939- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 17),
97940- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 54),
97941- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 14, 0),
97942- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 56),
97943- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 12, 13),
97944- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0800, 0, 12), /* IPv4 */
97945- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
97946- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
97947- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
97948- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 8),
97949- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
97950- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 6, 0),
97951- BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
97952- BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
97953- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
97954- BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
97955- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 1),
97956- BPF_STMT(BPF_RET | BPF_K, 0xffff),
97957- BPF_STMT(BPF_RET | BPF_K, 0),
97958- },
97959- CLASSIC,
97960- /* 3c:07:54:43:e5:76 > 10:bf:48:d6:43:d6, ethertype IPv4(0x0800)
97961- * length 114: 10.1.1.149.49700 > 10.1.2.10.22: Flags [P.],
97962- * seq 1305692979:1305693027, ack 3650467037, win 65535,
97963- * options [nop,nop,TS val 2502645400 ecr 3971138], length 48
97964- */
97965- { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
97966- 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
97967- 0x08, 0x00,
97968- 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
97969- 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
97970- 0x0a, 0x01, 0x01, 0x95, /* ip src */
97971- 0x0a, 0x01, 0x02, 0x0a, /* ip dst */
97972- 0xc2, 0x24,
97973- 0x00, 0x16 /* dst port */ },
97974- { { 10, 0 }, { 30, 0 }, { 100, 65535 } },
97975- },
97976- {
97977- "tcpdump complex",
97978- .u.insns = {
97979- /* tcpdump -nei eth0 'tcp port 22 and (((ip[2:2] -
97980- * ((ip[0]&0xf)<<2)) - ((tcp[12]&0xf0)>>2)) != 0) and
97981- * (len > 115 or len < 30000000000)' -d
97982- */
97983- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
97984- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 30, 0),
97985- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x800, 0, 29),
97986- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
97987- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 0, 27),
97988- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
97989- BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 25, 0),
97990- BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
97991- BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
97992- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
97993- BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
97994- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 20),
97995- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 16),
97996- BPF_STMT(BPF_ST, 1),
97997- BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 14),
97998- BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf),
97999- BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 2),
98000- BPF_STMT(BPF_MISC | BPF_TAX, 0x5), /* libpcap emits K on TAX */
98001- BPF_STMT(BPF_LD | BPF_MEM, 1),
98002- BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
98003- BPF_STMT(BPF_ST, 5),
98004- BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
98005- BPF_STMT(BPF_LD | BPF_B | BPF_IND, 26),
98006- BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
98007- BPF_STMT(BPF_ALU | BPF_RSH | BPF_K, 2),
98008- BPF_STMT(BPF_MISC | BPF_TAX, 0x9), /* libpcap emits K on TAX */
98009- BPF_STMT(BPF_LD | BPF_MEM, 5),
98010- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 4, 0),
98011- BPF_STMT(BPF_LD | BPF_LEN, 0),
98012- BPF_JUMP(BPF_JMP | BPF_JGT | BPF_K, 0x73, 1, 0),
98013- BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 0xfc23ac00, 1, 0),
98014- BPF_STMT(BPF_RET | BPF_K, 0xffff),
98015- BPF_STMT(BPF_RET | BPF_K, 0),
98016- },
98017- CLASSIC,
98018- { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
98019- 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
98020- 0x08, 0x00,
98021- 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
98022- 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
98023- 0x0a, 0x01, 0x01, 0x95, /* ip src */
98024- 0x0a, 0x01, 0x02, 0x0a, /* ip dst */
98025- 0xc2, 0x24,
98026- 0x00, 0x16 /* dst port */ },
98027- { { 10, 0 }, { 30, 0 }, { 100, 65535 } },
98028- },
98029- {
98030- "RET_A",
98031- .u.insns = {
98032- /* check that unitialized X and A contain zeros */
98033- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98034- BPF_STMT(BPF_RET | BPF_A, 0)
98035- },
98036- CLASSIC,
98037- { },
98038- { {1, 0}, {2, 0} },
98039- },
98040- {
98041- "INT: ADD trivial",
98042- .u.insns_int = {
98043- BPF_ALU64_IMM(BPF_MOV, R1, 1),
98044- BPF_ALU64_IMM(BPF_ADD, R1, 2),
98045- BPF_ALU64_IMM(BPF_MOV, R2, 3),
98046- BPF_ALU64_REG(BPF_SUB, R1, R2),
98047- BPF_ALU64_IMM(BPF_ADD, R1, -1),
98048- BPF_ALU64_IMM(BPF_MUL, R1, 3),
98049- BPF_ALU64_REG(BPF_MOV, R0, R1),
98050- BPF_EXIT_INSN(),
98051- },
98052- INTERNAL,
98053- { },
98054- { { 0, 0xfffffffd } }
98055- },
98056- {
98057- "INT: MUL_X",
98058- .u.insns_int = {
98059- BPF_ALU64_IMM(BPF_MOV, R0, -1),
98060- BPF_ALU64_IMM(BPF_MOV, R1, -1),
98061- BPF_ALU64_IMM(BPF_MOV, R2, 3),
98062- BPF_ALU64_REG(BPF_MUL, R1, R2),
98063- BPF_JMP_IMM(BPF_JEQ, R1, 0xfffffffd, 1),
98064- BPF_EXIT_INSN(),
98065- BPF_ALU64_IMM(BPF_MOV, R0, 1),
98066- BPF_EXIT_INSN(),
98067- },
98068- INTERNAL,
98069- { },
98070- { { 0, 1 } }
98071- },
98072- {
98073- "INT: MUL_X2",
98074- .u.insns_int = {
98075- BPF_ALU32_IMM(BPF_MOV, R0, -1),
98076- BPF_ALU32_IMM(BPF_MOV, R1, -1),
98077- BPF_ALU32_IMM(BPF_MOV, R2, 3),
98078- BPF_ALU64_REG(BPF_MUL, R1, R2),
98079- BPF_ALU64_IMM(BPF_RSH, R1, 8),
98080- BPF_JMP_IMM(BPF_JEQ, R1, 0x2ffffff, 1),
98081- BPF_EXIT_INSN(),
98082- BPF_ALU32_IMM(BPF_MOV, R0, 1),
98083- BPF_EXIT_INSN(),
98084- },
98085- INTERNAL,
98086- { },
98087- { { 0, 1 } }
98088- },
98089- {
98090- "INT: MUL32_X",
98091- .u.insns_int = {
98092- BPF_ALU32_IMM(BPF_MOV, R0, -1),
98093- BPF_ALU64_IMM(BPF_MOV, R1, -1),
98094- BPF_ALU32_IMM(BPF_MOV, R2, 3),
98095- BPF_ALU32_REG(BPF_MUL, R1, R2),
98096- BPF_ALU64_IMM(BPF_RSH, R1, 8),
98097- BPF_JMP_IMM(BPF_JEQ, R1, 0xffffff, 1),
98098- BPF_EXIT_INSN(),
98099- BPF_ALU32_IMM(BPF_MOV, R0, 1),
98100- BPF_EXIT_INSN(),
98101- },
98102- INTERNAL,
98103- { },
98104- { { 0, 1 } }
98105- },
98106- {
98107- /* Have to test all register combinations, since
98108- * JITing of different registers will produce
98109- * different asm code.
98110- */
98111- "INT: ADD 64-bit",
98112- .u.insns_int = {
98113- BPF_ALU64_IMM(BPF_MOV, R0, 0),
98114- BPF_ALU64_IMM(BPF_MOV, R1, 1),
98115- BPF_ALU64_IMM(BPF_MOV, R2, 2),
98116- BPF_ALU64_IMM(BPF_MOV, R3, 3),
98117- BPF_ALU64_IMM(BPF_MOV, R4, 4),
98118- BPF_ALU64_IMM(BPF_MOV, R5, 5),
98119- BPF_ALU64_IMM(BPF_MOV, R6, 6),
98120- BPF_ALU64_IMM(BPF_MOV, R7, 7),
98121- BPF_ALU64_IMM(BPF_MOV, R8, 8),
98122- BPF_ALU64_IMM(BPF_MOV, R9, 9),
98123- BPF_ALU64_IMM(BPF_ADD, R0, 20),
98124- BPF_ALU64_IMM(BPF_ADD, R1, 20),
98125- BPF_ALU64_IMM(BPF_ADD, R2, 20),
98126- BPF_ALU64_IMM(BPF_ADD, R3, 20),
98127- BPF_ALU64_IMM(BPF_ADD, R4, 20),
98128- BPF_ALU64_IMM(BPF_ADD, R5, 20),
98129- BPF_ALU64_IMM(BPF_ADD, R6, 20),
98130- BPF_ALU64_IMM(BPF_ADD, R7, 20),
98131- BPF_ALU64_IMM(BPF_ADD, R8, 20),
98132- BPF_ALU64_IMM(BPF_ADD, R9, 20),
98133- BPF_ALU64_IMM(BPF_SUB, R0, 10),
98134- BPF_ALU64_IMM(BPF_SUB, R1, 10),
98135- BPF_ALU64_IMM(BPF_SUB, R2, 10),
98136- BPF_ALU64_IMM(BPF_SUB, R3, 10),
98137- BPF_ALU64_IMM(BPF_SUB, R4, 10),
98138- BPF_ALU64_IMM(BPF_SUB, R5, 10),
98139- BPF_ALU64_IMM(BPF_SUB, R6, 10),
98140- BPF_ALU64_IMM(BPF_SUB, R7, 10),
98141- BPF_ALU64_IMM(BPF_SUB, R8, 10),
98142- BPF_ALU64_IMM(BPF_SUB, R9, 10),
98143- BPF_ALU64_REG(BPF_ADD, R0, R0),
98144- BPF_ALU64_REG(BPF_ADD, R0, R1),
98145- BPF_ALU64_REG(BPF_ADD, R0, R2),
98146- BPF_ALU64_REG(BPF_ADD, R0, R3),
98147- BPF_ALU64_REG(BPF_ADD, R0, R4),
98148- BPF_ALU64_REG(BPF_ADD, R0, R5),
98149- BPF_ALU64_REG(BPF_ADD, R0, R6),
98150- BPF_ALU64_REG(BPF_ADD, R0, R7),
98151- BPF_ALU64_REG(BPF_ADD, R0, R8),
98152- BPF_ALU64_REG(BPF_ADD, R0, R9), /* R0 == 155 */
98153- BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
98154- BPF_EXIT_INSN(),
98155- BPF_ALU64_REG(BPF_ADD, R1, R0),
98156- BPF_ALU64_REG(BPF_ADD, R1, R1),
98157- BPF_ALU64_REG(BPF_ADD, R1, R2),
98158- BPF_ALU64_REG(BPF_ADD, R1, R3),
98159- BPF_ALU64_REG(BPF_ADD, R1, R4),
98160- BPF_ALU64_REG(BPF_ADD, R1, R5),
98161- BPF_ALU64_REG(BPF_ADD, R1, R6),
98162- BPF_ALU64_REG(BPF_ADD, R1, R7),
98163- BPF_ALU64_REG(BPF_ADD, R1, R8),
98164- BPF_ALU64_REG(BPF_ADD, R1, R9), /* R1 == 456 */
98165- BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
98166- BPF_EXIT_INSN(),
98167- BPF_ALU64_REG(BPF_ADD, R2, R0),
98168- BPF_ALU64_REG(BPF_ADD, R2, R1),
98169- BPF_ALU64_REG(BPF_ADD, R2, R2),
98170- BPF_ALU64_REG(BPF_ADD, R2, R3),
98171- BPF_ALU64_REG(BPF_ADD, R2, R4),
98172- BPF_ALU64_REG(BPF_ADD, R2, R5),
98173- BPF_ALU64_REG(BPF_ADD, R2, R6),
98174- BPF_ALU64_REG(BPF_ADD, R2, R7),
98175- BPF_ALU64_REG(BPF_ADD, R2, R8),
98176- BPF_ALU64_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
98177- BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
98178- BPF_EXIT_INSN(),
98179- BPF_ALU64_REG(BPF_ADD, R3, R0),
98180- BPF_ALU64_REG(BPF_ADD, R3, R1),
98181- BPF_ALU64_REG(BPF_ADD, R3, R2),
98182- BPF_ALU64_REG(BPF_ADD, R3, R3),
98183- BPF_ALU64_REG(BPF_ADD, R3, R4),
98184- BPF_ALU64_REG(BPF_ADD, R3, R5),
98185- BPF_ALU64_REG(BPF_ADD, R3, R6),
98186- BPF_ALU64_REG(BPF_ADD, R3, R7),
98187- BPF_ALU64_REG(BPF_ADD, R3, R8),
98188- BPF_ALU64_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
98189- BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
98190- BPF_EXIT_INSN(),
98191- BPF_ALU64_REG(BPF_ADD, R4, R0),
98192- BPF_ALU64_REG(BPF_ADD, R4, R1),
98193- BPF_ALU64_REG(BPF_ADD, R4, R2),
98194- BPF_ALU64_REG(BPF_ADD, R4, R3),
98195- BPF_ALU64_REG(BPF_ADD, R4, R4),
98196- BPF_ALU64_REG(BPF_ADD, R4, R5),
98197- BPF_ALU64_REG(BPF_ADD, R4, R6),
98198- BPF_ALU64_REG(BPF_ADD, R4, R7),
98199- BPF_ALU64_REG(BPF_ADD, R4, R8),
98200- BPF_ALU64_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
98201- BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
98202- BPF_EXIT_INSN(),
98203- BPF_ALU64_REG(BPF_ADD, R5, R0),
98204- BPF_ALU64_REG(BPF_ADD, R5, R1),
98205- BPF_ALU64_REG(BPF_ADD, R5, R2),
98206- BPF_ALU64_REG(BPF_ADD, R5, R3),
98207- BPF_ALU64_REG(BPF_ADD, R5, R4),
98208- BPF_ALU64_REG(BPF_ADD, R5, R5),
98209- BPF_ALU64_REG(BPF_ADD, R5, R6),
98210- BPF_ALU64_REG(BPF_ADD, R5, R7),
98211- BPF_ALU64_REG(BPF_ADD, R5, R8),
98212- BPF_ALU64_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
98213- BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
98214- BPF_EXIT_INSN(),
98215- BPF_ALU64_REG(BPF_ADD, R6, R0),
98216- BPF_ALU64_REG(BPF_ADD, R6, R1),
98217- BPF_ALU64_REG(BPF_ADD, R6, R2),
98218- BPF_ALU64_REG(BPF_ADD, R6, R3),
98219- BPF_ALU64_REG(BPF_ADD, R6, R4),
98220- BPF_ALU64_REG(BPF_ADD, R6, R5),
98221- BPF_ALU64_REG(BPF_ADD, R6, R6),
98222- BPF_ALU64_REG(BPF_ADD, R6, R7),
98223- BPF_ALU64_REG(BPF_ADD, R6, R8),
98224- BPF_ALU64_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
98225- BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
98226- BPF_EXIT_INSN(),
98227- BPF_ALU64_REG(BPF_ADD, R7, R0),
98228- BPF_ALU64_REG(BPF_ADD, R7, R1),
98229- BPF_ALU64_REG(BPF_ADD, R7, R2),
98230- BPF_ALU64_REG(BPF_ADD, R7, R3),
98231- BPF_ALU64_REG(BPF_ADD, R7, R4),
98232- BPF_ALU64_REG(BPF_ADD, R7, R5),
98233- BPF_ALU64_REG(BPF_ADD, R7, R6),
98234- BPF_ALU64_REG(BPF_ADD, R7, R7),
98235- BPF_ALU64_REG(BPF_ADD, R7, R8),
98236- BPF_ALU64_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
98237- BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
98238- BPF_EXIT_INSN(),
98239- BPF_ALU64_REG(BPF_ADD, R8, R0),
98240- BPF_ALU64_REG(BPF_ADD, R8, R1),
98241- BPF_ALU64_REG(BPF_ADD, R8, R2),
98242- BPF_ALU64_REG(BPF_ADD, R8, R3),
98243- BPF_ALU64_REG(BPF_ADD, R8, R4),
98244- BPF_ALU64_REG(BPF_ADD, R8, R5),
98245- BPF_ALU64_REG(BPF_ADD, R8, R6),
98246- BPF_ALU64_REG(BPF_ADD, R8, R7),
98247- BPF_ALU64_REG(BPF_ADD, R8, R8),
98248- BPF_ALU64_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
98249- BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
98250- BPF_EXIT_INSN(),
98251- BPF_ALU64_REG(BPF_ADD, R9, R0),
98252- BPF_ALU64_REG(BPF_ADD, R9, R1),
98253- BPF_ALU64_REG(BPF_ADD, R9, R2),
98254- BPF_ALU64_REG(BPF_ADD, R9, R3),
98255- BPF_ALU64_REG(BPF_ADD, R9, R4),
98256- BPF_ALU64_REG(BPF_ADD, R9, R5),
98257- BPF_ALU64_REG(BPF_ADD, R9, R6),
98258- BPF_ALU64_REG(BPF_ADD, R9, R7),
98259- BPF_ALU64_REG(BPF_ADD, R9, R8),
98260- BPF_ALU64_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
98261- BPF_ALU64_REG(BPF_MOV, R0, R9),
98262- BPF_EXIT_INSN(),
98263- },
98264- INTERNAL,
98265- { },
98266- { { 0, 2957380 } }
98267- },
98268- {
98269- "INT: ADD 32-bit",
98270- .u.insns_int = {
98271- BPF_ALU32_IMM(BPF_MOV, R0, 20),
98272- BPF_ALU32_IMM(BPF_MOV, R1, 1),
98273- BPF_ALU32_IMM(BPF_MOV, R2, 2),
98274- BPF_ALU32_IMM(BPF_MOV, R3, 3),
98275- BPF_ALU32_IMM(BPF_MOV, R4, 4),
98276- BPF_ALU32_IMM(BPF_MOV, R5, 5),
98277- BPF_ALU32_IMM(BPF_MOV, R6, 6),
98278- BPF_ALU32_IMM(BPF_MOV, R7, 7),
98279- BPF_ALU32_IMM(BPF_MOV, R8, 8),
98280- BPF_ALU32_IMM(BPF_MOV, R9, 9),
98281- BPF_ALU64_IMM(BPF_ADD, R1, 10),
98282- BPF_ALU64_IMM(BPF_ADD, R2, 10),
98283- BPF_ALU64_IMM(BPF_ADD, R3, 10),
98284- BPF_ALU64_IMM(BPF_ADD, R4, 10),
98285- BPF_ALU64_IMM(BPF_ADD, R5, 10),
98286- BPF_ALU64_IMM(BPF_ADD, R6, 10),
98287- BPF_ALU64_IMM(BPF_ADD, R7, 10),
98288- BPF_ALU64_IMM(BPF_ADD, R8, 10),
98289- BPF_ALU64_IMM(BPF_ADD, R9, 10),
98290- BPF_ALU32_REG(BPF_ADD, R0, R1),
98291- BPF_ALU32_REG(BPF_ADD, R0, R2),
98292- BPF_ALU32_REG(BPF_ADD, R0, R3),
98293- BPF_ALU32_REG(BPF_ADD, R0, R4),
98294- BPF_ALU32_REG(BPF_ADD, R0, R5),
98295- BPF_ALU32_REG(BPF_ADD, R0, R6),
98296- BPF_ALU32_REG(BPF_ADD, R0, R7),
98297- BPF_ALU32_REG(BPF_ADD, R0, R8),
98298- BPF_ALU32_REG(BPF_ADD, R0, R9), /* R0 == 155 */
98299- BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
98300- BPF_EXIT_INSN(),
98301- BPF_ALU32_REG(BPF_ADD, R1, R0),
98302- BPF_ALU32_REG(BPF_ADD, R1, R1),
98303- BPF_ALU32_REG(BPF_ADD, R1, R2),
98304- BPF_ALU32_REG(BPF_ADD, R1, R3),
98305- BPF_ALU32_REG(BPF_ADD, R1, R4),
98306- BPF_ALU32_REG(BPF_ADD, R1, R5),
98307- BPF_ALU32_REG(BPF_ADD, R1, R6),
98308- BPF_ALU32_REG(BPF_ADD, R1, R7),
98309- BPF_ALU32_REG(BPF_ADD, R1, R8),
98310- BPF_ALU32_REG(BPF_ADD, R1, R9), /* R1 == 456 */
98311- BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
98312- BPF_EXIT_INSN(),
98313- BPF_ALU32_REG(BPF_ADD, R2, R0),
98314- BPF_ALU32_REG(BPF_ADD, R2, R1),
98315- BPF_ALU32_REG(BPF_ADD, R2, R2),
98316- BPF_ALU32_REG(BPF_ADD, R2, R3),
98317- BPF_ALU32_REG(BPF_ADD, R2, R4),
98318- BPF_ALU32_REG(BPF_ADD, R2, R5),
98319- BPF_ALU32_REG(BPF_ADD, R2, R6),
98320- BPF_ALU32_REG(BPF_ADD, R2, R7),
98321- BPF_ALU32_REG(BPF_ADD, R2, R8),
98322- BPF_ALU32_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
98323- BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
98324- BPF_EXIT_INSN(),
98325- BPF_ALU32_REG(BPF_ADD, R3, R0),
98326- BPF_ALU32_REG(BPF_ADD, R3, R1),
98327- BPF_ALU32_REG(BPF_ADD, R3, R2),
98328- BPF_ALU32_REG(BPF_ADD, R3, R3),
98329- BPF_ALU32_REG(BPF_ADD, R3, R4),
98330- BPF_ALU32_REG(BPF_ADD, R3, R5),
98331- BPF_ALU32_REG(BPF_ADD, R3, R6),
98332- BPF_ALU32_REG(BPF_ADD, R3, R7),
98333- BPF_ALU32_REG(BPF_ADD, R3, R8),
98334- BPF_ALU32_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
98335- BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
98336- BPF_EXIT_INSN(),
98337- BPF_ALU32_REG(BPF_ADD, R4, R0),
98338- BPF_ALU32_REG(BPF_ADD, R4, R1),
98339- BPF_ALU32_REG(BPF_ADD, R4, R2),
98340- BPF_ALU32_REG(BPF_ADD, R4, R3),
98341- BPF_ALU32_REG(BPF_ADD, R4, R4),
98342- BPF_ALU32_REG(BPF_ADD, R4, R5),
98343- BPF_ALU32_REG(BPF_ADD, R4, R6),
98344- BPF_ALU32_REG(BPF_ADD, R4, R7),
98345- BPF_ALU32_REG(BPF_ADD, R4, R8),
98346- BPF_ALU32_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
98347- BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
98348- BPF_EXIT_INSN(),
98349- BPF_ALU32_REG(BPF_ADD, R5, R0),
98350- BPF_ALU32_REG(BPF_ADD, R5, R1),
98351- BPF_ALU32_REG(BPF_ADD, R5, R2),
98352- BPF_ALU32_REG(BPF_ADD, R5, R3),
98353- BPF_ALU32_REG(BPF_ADD, R5, R4),
98354- BPF_ALU32_REG(BPF_ADD, R5, R5),
98355- BPF_ALU32_REG(BPF_ADD, R5, R6),
98356- BPF_ALU32_REG(BPF_ADD, R5, R7),
98357- BPF_ALU32_REG(BPF_ADD, R5, R8),
98358- BPF_ALU32_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
98359- BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
98360- BPF_EXIT_INSN(),
98361- BPF_ALU32_REG(BPF_ADD, R6, R0),
98362- BPF_ALU32_REG(BPF_ADD, R6, R1),
98363- BPF_ALU32_REG(BPF_ADD, R6, R2),
98364- BPF_ALU32_REG(BPF_ADD, R6, R3),
98365- BPF_ALU32_REG(BPF_ADD, R6, R4),
98366- BPF_ALU32_REG(BPF_ADD, R6, R5),
98367- BPF_ALU32_REG(BPF_ADD, R6, R6),
98368- BPF_ALU32_REG(BPF_ADD, R6, R7),
98369- BPF_ALU32_REG(BPF_ADD, R6, R8),
98370- BPF_ALU32_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
98371- BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
98372- BPF_EXIT_INSN(),
98373- BPF_ALU32_REG(BPF_ADD, R7, R0),
98374- BPF_ALU32_REG(BPF_ADD, R7, R1),
98375- BPF_ALU32_REG(BPF_ADD, R7, R2),
98376- BPF_ALU32_REG(BPF_ADD, R7, R3),
98377- BPF_ALU32_REG(BPF_ADD, R7, R4),
98378- BPF_ALU32_REG(BPF_ADD, R7, R5),
98379- BPF_ALU32_REG(BPF_ADD, R7, R6),
98380- BPF_ALU32_REG(BPF_ADD, R7, R7),
98381- BPF_ALU32_REG(BPF_ADD, R7, R8),
98382- BPF_ALU32_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
98383- BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
98384- BPF_EXIT_INSN(),
98385- BPF_ALU32_REG(BPF_ADD, R8, R0),
98386- BPF_ALU32_REG(BPF_ADD, R8, R1),
98387- BPF_ALU32_REG(BPF_ADD, R8, R2),
98388- BPF_ALU32_REG(BPF_ADD, R8, R3),
98389- BPF_ALU32_REG(BPF_ADD, R8, R4),
98390- BPF_ALU32_REG(BPF_ADD, R8, R5),
98391- BPF_ALU32_REG(BPF_ADD, R8, R6),
98392- BPF_ALU32_REG(BPF_ADD, R8, R7),
98393- BPF_ALU32_REG(BPF_ADD, R8, R8),
98394- BPF_ALU32_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
98395- BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
98396- BPF_EXIT_INSN(),
98397- BPF_ALU32_REG(BPF_ADD, R9, R0),
98398- BPF_ALU32_REG(BPF_ADD, R9, R1),
98399- BPF_ALU32_REG(BPF_ADD, R9, R2),
98400- BPF_ALU32_REG(BPF_ADD, R9, R3),
98401- BPF_ALU32_REG(BPF_ADD, R9, R4),
98402- BPF_ALU32_REG(BPF_ADD, R9, R5),
98403- BPF_ALU32_REG(BPF_ADD, R9, R6),
98404- BPF_ALU32_REG(BPF_ADD, R9, R7),
98405- BPF_ALU32_REG(BPF_ADD, R9, R8),
98406- BPF_ALU32_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
98407- BPF_ALU32_REG(BPF_MOV, R0, R9),
98408- BPF_EXIT_INSN(),
98409- },
98410- INTERNAL,
98411- { },
98412- { { 0, 2957380 } }
98413- },
98414- { /* Mainly checking JIT here. */
98415- "INT: SUB",
98416- .u.insns_int = {
98417- BPF_ALU64_IMM(BPF_MOV, R0, 0),
98418- BPF_ALU64_IMM(BPF_MOV, R1, 1),
98419- BPF_ALU64_IMM(BPF_MOV, R2, 2),
98420- BPF_ALU64_IMM(BPF_MOV, R3, 3),
98421- BPF_ALU64_IMM(BPF_MOV, R4, 4),
98422- BPF_ALU64_IMM(BPF_MOV, R5, 5),
98423- BPF_ALU64_IMM(BPF_MOV, R6, 6),
98424- BPF_ALU64_IMM(BPF_MOV, R7, 7),
98425- BPF_ALU64_IMM(BPF_MOV, R8, 8),
98426- BPF_ALU64_IMM(BPF_MOV, R9, 9),
98427- BPF_ALU64_REG(BPF_SUB, R0, R0),
98428- BPF_ALU64_REG(BPF_SUB, R0, R1),
98429- BPF_ALU64_REG(BPF_SUB, R0, R2),
98430- BPF_ALU64_REG(BPF_SUB, R0, R3),
98431- BPF_ALU64_REG(BPF_SUB, R0, R4),
98432- BPF_ALU64_REG(BPF_SUB, R0, R5),
98433- BPF_ALU64_REG(BPF_SUB, R0, R6),
98434- BPF_ALU64_REG(BPF_SUB, R0, R7),
98435- BPF_ALU64_REG(BPF_SUB, R0, R8),
98436- BPF_ALU64_REG(BPF_SUB, R0, R9),
98437- BPF_ALU64_IMM(BPF_SUB, R0, 10),
98438- BPF_JMP_IMM(BPF_JEQ, R0, -55, 1),
98439- BPF_EXIT_INSN(),
98440- BPF_ALU64_REG(BPF_SUB, R1, R0),
98441- BPF_ALU64_REG(BPF_SUB, R1, R2),
98442- BPF_ALU64_REG(BPF_SUB, R1, R3),
98443- BPF_ALU64_REG(BPF_SUB, R1, R4),
98444- BPF_ALU64_REG(BPF_SUB, R1, R5),
98445- BPF_ALU64_REG(BPF_SUB, R1, R6),
98446- BPF_ALU64_REG(BPF_SUB, R1, R7),
98447- BPF_ALU64_REG(BPF_SUB, R1, R8),
98448- BPF_ALU64_REG(BPF_SUB, R1, R9),
98449- BPF_ALU64_IMM(BPF_SUB, R1, 10),
98450- BPF_ALU64_REG(BPF_SUB, R2, R0),
98451- BPF_ALU64_REG(BPF_SUB, R2, R1),
98452- BPF_ALU64_REG(BPF_SUB, R2, R3),
98453- BPF_ALU64_REG(BPF_SUB, R2, R4),
98454- BPF_ALU64_REG(BPF_SUB, R2, R5),
98455- BPF_ALU64_REG(BPF_SUB, R2, R6),
98456- BPF_ALU64_REG(BPF_SUB, R2, R7),
98457- BPF_ALU64_REG(BPF_SUB, R2, R8),
98458- BPF_ALU64_REG(BPF_SUB, R2, R9),
98459- BPF_ALU64_IMM(BPF_SUB, R2, 10),
98460- BPF_ALU64_REG(BPF_SUB, R3, R0),
98461- BPF_ALU64_REG(BPF_SUB, R3, R1),
98462- BPF_ALU64_REG(BPF_SUB, R3, R2),
98463- BPF_ALU64_REG(BPF_SUB, R3, R4),
98464- BPF_ALU64_REG(BPF_SUB, R3, R5),
98465- BPF_ALU64_REG(BPF_SUB, R3, R6),
98466- BPF_ALU64_REG(BPF_SUB, R3, R7),
98467- BPF_ALU64_REG(BPF_SUB, R3, R8),
98468- BPF_ALU64_REG(BPF_SUB, R3, R9),
98469- BPF_ALU64_IMM(BPF_SUB, R3, 10),
98470- BPF_ALU64_REG(BPF_SUB, R4, R0),
98471- BPF_ALU64_REG(BPF_SUB, R4, R1),
98472- BPF_ALU64_REG(BPF_SUB, R4, R2),
98473- BPF_ALU64_REG(BPF_SUB, R4, R3),
98474- BPF_ALU64_REG(BPF_SUB, R4, R5),
98475- BPF_ALU64_REG(BPF_SUB, R4, R6),
98476- BPF_ALU64_REG(BPF_SUB, R4, R7),
98477- BPF_ALU64_REG(BPF_SUB, R4, R8),
98478- BPF_ALU64_REG(BPF_SUB, R4, R9),
98479- BPF_ALU64_IMM(BPF_SUB, R4, 10),
98480- BPF_ALU64_REG(BPF_SUB, R5, R0),
98481- BPF_ALU64_REG(BPF_SUB, R5, R1),
98482- BPF_ALU64_REG(BPF_SUB, R5, R2),
98483- BPF_ALU64_REG(BPF_SUB, R5, R3),
98484- BPF_ALU64_REG(BPF_SUB, R5, R4),
98485- BPF_ALU64_REG(BPF_SUB, R5, R6),
98486- BPF_ALU64_REG(BPF_SUB, R5, R7),
98487- BPF_ALU64_REG(BPF_SUB, R5, R8),
98488- BPF_ALU64_REG(BPF_SUB, R5, R9),
98489- BPF_ALU64_IMM(BPF_SUB, R5, 10),
98490- BPF_ALU64_REG(BPF_SUB, R6, R0),
98491- BPF_ALU64_REG(BPF_SUB, R6, R1),
98492- BPF_ALU64_REG(BPF_SUB, R6, R2),
98493- BPF_ALU64_REG(BPF_SUB, R6, R3),
98494- BPF_ALU64_REG(BPF_SUB, R6, R4),
98495- BPF_ALU64_REG(BPF_SUB, R6, R5),
98496- BPF_ALU64_REG(BPF_SUB, R6, R7),
98497- BPF_ALU64_REG(BPF_SUB, R6, R8),
98498- BPF_ALU64_REG(BPF_SUB, R6, R9),
98499- BPF_ALU64_IMM(BPF_SUB, R6, 10),
98500- BPF_ALU64_REG(BPF_SUB, R7, R0),
98501- BPF_ALU64_REG(BPF_SUB, R7, R1),
98502- BPF_ALU64_REG(BPF_SUB, R7, R2),
98503- BPF_ALU64_REG(BPF_SUB, R7, R3),
98504- BPF_ALU64_REG(BPF_SUB, R7, R4),
98505- BPF_ALU64_REG(BPF_SUB, R7, R5),
98506- BPF_ALU64_REG(BPF_SUB, R7, R6),
98507- BPF_ALU64_REG(BPF_SUB, R7, R8),
98508- BPF_ALU64_REG(BPF_SUB, R7, R9),
98509- BPF_ALU64_IMM(BPF_SUB, R7, 10),
98510- BPF_ALU64_REG(BPF_SUB, R8, R0),
98511- BPF_ALU64_REG(BPF_SUB, R8, R1),
98512- BPF_ALU64_REG(BPF_SUB, R8, R2),
98513- BPF_ALU64_REG(BPF_SUB, R8, R3),
98514- BPF_ALU64_REG(BPF_SUB, R8, R4),
98515- BPF_ALU64_REG(BPF_SUB, R8, R5),
98516- BPF_ALU64_REG(BPF_SUB, R8, R6),
98517- BPF_ALU64_REG(BPF_SUB, R8, R7),
98518- BPF_ALU64_REG(BPF_SUB, R8, R9),
98519- BPF_ALU64_IMM(BPF_SUB, R8, 10),
98520- BPF_ALU64_REG(BPF_SUB, R9, R0),
98521- BPF_ALU64_REG(BPF_SUB, R9, R1),
98522- BPF_ALU64_REG(BPF_SUB, R9, R2),
98523- BPF_ALU64_REG(BPF_SUB, R9, R3),
98524- BPF_ALU64_REG(BPF_SUB, R9, R4),
98525- BPF_ALU64_REG(BPF_SUB, R9, R5),
98526- BPF_ALU64_REG(BPF_SUB, R9, R6),
98527- BPF_ALU64_REG(BPF_SUB, R9, R7),
98528- BPF_ALU64_REG(BPF_SUB, R9, R8),
98529- BPF_ALU64_IMM(BPF_SUB, R9, 10),
98530- BPF_ALU64_IMM(BPF_SUB, R0, 10),
98531- BPF_ALU64_IMM(BPF_NEG, R0, 0),
98532- BPF_ALU64_REG(BPF_SUB, R0, R1),
98533- BPF_ALU64_REG(BPF_SUB, R0, R2),
98534- BPF_ALU64_REG(BPF_SUB, R0, R3),
98535- BPF_ALU64_REG(BPF_SUB, R0, R4),
98536- BPF_ALU64_REG(BPF_SUB, R0, R5),
98537- BPF_ALU64_REG(BPF_SUB, R0, R6),
98538- BPF_ALU64_REG(BPF_SUB, R0, R7),
98539- BPF_ALU64_REG(BPF_SUB, R0, R8),
98540- BPF_ALU64_REG(BPF_SUB, R0, R9),
98541- BPF_EXIT_INSN(),
98542- },
98543- INTERNAL,
98544- { },
98545- { { 0, 11 } }
98546- },
98547- { /* Mainly checking JIT here. */
98548- "INT: XOR",
98549- .u.insns_int = {
98550- BPF_ALU64_REG(BPF_SUB, R0, R0),
98551- BPF_ALU64_REG(BPF_XOR, R1, R1),
98552- BPF_JMP_REG(BPF_JEQ, R0, R1, 1),
98553- BPF_EXIT_INSN(),
98554- BPF_ALU64_IMM(BPF_MOV, R0, 10),
98555- BPF_ALU64_IMM(BPF_MOV, R1, -1),
98556- BPF_ALU64_REG(BPF_SUB, R1, R1),
98557- BPF_ALU64_REG(BPF_XOR, R2, R2),
98558- BPF_JMP_REG(BPF_JEQ, R1, R2, 1),
98559- BPF_EXIT_INSN(),
98560- BPF_ALU64_REG(BPF_SUB, R2, R2),
98561- BPF_ALU64_REG(BPF_XOR, R3, R3),
98562- BPF_ALU64_IMM(BPF_MOV, R0, 10),
98563- BPF_ALU64_IMM(BPF_MOV, R1, -1),
98564- BPF_JMP_REG(BPF_JEQ, R2, R3, 1),
98565- BPF_EXIT_INSN(),
98566- BPF_ALU64_REG(BPF_SUB, R3, R3),
98567- BPF_ALU64_REG(BPF_XOR, R4, R4),
98568- BPF_ALU64_IMM(BPF_MOV, R2, 1),
98569- BPF_ALU64_IMM(BPF_MOV, R5, -1),
98570- BPF_JMP_REG(BPF_JEQ, R3, R4, 1),
98571- BPF_EXIT_INSN(),
98572- BPF_ALU64_REG(BPF_SUB, R4, R4),
98573- BPF_ALU64_REG(BPF_XOR, R5, R5),
98574- BPF_ALU64_IMM(BPF_MOV, R3, 1),
98575- BPF_ALU64_IMM(BPF_MOV, R7, -1),
98576- BPF_JMP_REG(BPF_JEQ, R5, R4, 1),
98577- BPF_EXIT_INSN(),
98578- BPF_ALU64_IMM(BPF_MOV, R5, 1),
98579- BPF_ALU64_REG(BPF_SUB, R5, R5),
98580- BPF_ALU64_REG(BPF_XOR, R6, R6),
98581- BPF_ALU64_IMM(BPF_MOV, R1, 1),
98582- BPF_ALU64_IMM(BPF_MOV, R8, -1),
98583- BPF_JMP_REG(BPF_JEQ, R5, R6, 1),
98584- BPF_EXIT_INSN(),
98585- BPF_ALU64_REG(BPF_SUB, R6, R6),
98586- BPF_ALU64_REG(BPF_XOR, R7, R7),
98587- BPF_JMP_REG(BPF_JEQ, R7, R6, 1),
98588- BPF_EXIT_INSN(),
98589- BPF_ALU64_REG(BPF_SUB, R7, R7),
98590- BPF_ALU64_REG(BPF_XOR, R8, R8),
98591- BPF_JMP_REG(BPF_JEQ, R7, R8, 1),
98592- BPF_EXIT_INSN(),
98593- BPF_ALU64_REG(BPF_SUB, R8, R8),
98594- BPF_ALU64_REG(BPF_XOR, R9, R9),
98595- BPF_JMP_REG(BPF_JEQ, R9, R8, 1),
98596- BPF_EXIT_INSN(),
98597- BPF_ALU64_REG(BPF_SUB, R9, R9),
98598- BPF_ALU64_REG(BPF_XOR, R0, R0),
98599- BPF_JMP_REG(BPF_JEQ, R9, R0, 1),
98600- BPF_EXIT_INSN(),
98601- BPF_ALU64_REG(BPF_SUB, R1, R1),
98602- BPF_ALU64_REG(BPF_XOR, R0, R0),
98603- BPF_JMP_REG(BPF_JEQ, R9, R0, 2),
98604- BPF_ALU64_IMM(BPF_MOV, R0, 0),
98605- BPF_EXIT_INSN(),
98606- BPF_ALU64_IMM(BPF_MOV, R0, 1),
98607- BPF_EXIT_INSN(),
98608- },
98609- INTERNAL,
98610- { },
98611- { { 0, 1 } }
98612- },
98613- { /* Mainly checking JIT here. */
98614- "INT: MUL",
98615- .u.insns_int = {
98616- BPF_ALU64_IMM(BPF_MOV, R0, 11),
98617- BPF_ALU64_IMM(BPF_MOV, R1, 1),
98618- BPF_ALU64_IMM(BPF_MOV, R2, 2),
98619- BPF_ALU64_IMM(BPF_MOV, R3, 3),
98620- BPF_ALU64_IMM(BPF_MOV, R4, 4),
98621- BPF_ALU64_IMM(BPF_MOV, R5, 5),
98622- BPF_ALU64_IMM(BPF_MOV, R6, 6),
98623- BPF_ALU64_IMM(BPF_MOV, R7, 7),
98624- BPF_ALU64_IMM(BPF_MOV, R8, 8),
98625- BPF_ALU64_IMM(BPF_MOV, R9, 9),
98626- BPF_ALU64_REG(BPF_MUL, R0, R0),
98627- BPF_ALU64_REG(BPF_MUL, R0, R1),
98628- BPF_ALU64_REG(BPF_MUL, R0, R2),
98629- BPF_ALU64_REG(BPF_MUL, R0, R3),
98630- BPF_ALU64_REG(BPF_MUL, R0, R4),
98631- BPF_ALU64_REG(BPF_MUL, R0, R5),
98632- BPF_ALU64_REG(BPF_MUL, R0, R6),
98633- BPF_ALU64_REG(BPF_MUL, R0, R7),
98634- BPF_ALU64_REG(BPF_MUL, R0, R8),
98635- BPF_ALU64_REG(BPF_MUL, R0, R9),
98636- BPF_ALU64_IMM(BPF_MUL, R0, 10),
98637- BPF_JMP_IMM(BPF_JEQ, R0, 439084800, 1),
98638- BPF_EXIT_INSN(),
98639- BPF_ALU64_REG(BPF_MUL, R1, R0),
98640- BPF_ALU64_REG(BPF_MUL, R1, R2),
98641- BPF_ALU64_REG(BPF_MUL, R1, R3),
98642- BPF_ALU64_REG(BPF_MUL, R1, R4),
98643- BPF_ALU64_REG(BPF_MUL, R1, R5),
98644- BPF_ALU64_REG(BPF_MUL, R1, R6),
98645- BPF_ALU64_REG(BPF_MUL, R1, R7),
98646- BPF_ALU64_REG(BPF_MUL, R1, R8),
98647- BPF_ALU64_REG(BPF_MUL, R1, R9),
98648- BPF_ALU64_IMM(BPF_MUL, R1, 10),
98649- BPF_ALU64_REG(BPF_MOV, R2, R1),
98650- BPF_ALU64_IMM(BPF_RSH, R2, 32),
98651- BPF_JMP_IMM(BPF_JEQ, R2, 0x5a924, 1),
98652- BPF_EXIT_INSN(),
98653- BPF_ALU64_IMM(BPF_LSH, R1, 32),
98654- BPF_ALU64_IMM(BPF_ARSH, R1, 32),
98655- BPF_JMP_IMM(BPF_JEQ, R1, 0xebb90000, 1),
98656- BPF_EXIT_INSN(),
98657- BPF_ALU64_REG(BPF_MUL, R2, R0),
98658- BPF_ALU64_REG(BPF_MUL, R2, R1),
98659- BPF_ALU64_REG(BPF_MUL, R2, R3),
98660- BPF_ALU64_REG(BPF_MUL, R2, R4),
98661- BPF_ALU64_REG(BPF_MUL, R2, R5),
98662- BPF_ALU64_REG(BPF_MUL, R2, R6),
98663- BPF_ALU64_REG(BPF_MUL, R2, R7),
98664- BPF_ALU64_REG(BPF_MUL, R2, R8),
98665- BPF_ALU64_REG(BPF_MUL, R2, R9),
98666- BPF_ALU64_IMM(BPF_MUL, R2, 10),
98667- BPF_ALU64_IMM(BPF_RSH, R2, 32),
98668- BPF_ALU64_REG(BPF_MOV, R0, R2),
98669- BPF_EXIT_INSN(),
98670- },
98671- INTERNAL,
98672- { },
98673- { { 0, 0x35d97ef2 } }
98674- },
98675- {
98676- "INT: ALU MIX",
98677- .u.insns_int = {
98678- BPF_ALU64_IMM(BPF_MOV, R0, 11),
98679- BPF_ALU64_IMM(BPF_ADD, R0, -1),
98680- BPF_ALU64_IMM(BPF_MOV, R2, 2),
98681- BPF_ALU64_IMM(BPF_XOR, R2, 3),
98682- BPF_ALU64_REG(BPF_DIV, R0, R2),
98683- BPF_JMP_IMM(BPF_JEQ, R0, 10, 1),
98684- BPF_EXIT_INSN(),
98685- BPF_ALU64_IMM(BPF_MOD, R0, 3),
98686- BPF_JMP_IMM(BPF_JEQ, R0, 1, 1),
98687- BPF_EXIT_INSN(),
98688- BPF_ALU64_IMM(BPF_MOV, R0, -1),
98689- BPF_EXIT_INSN(),
98690- },
98691- INTERNAL,
98692- { },
98693- { { 0, -1 } }
98694- },
98695- {
98696- "INT: DIV + ABS",
98697- .u.insns_int = {
98698- BPF_ALU64_REG(BPF_MOV, R6, R1),
98699- BPF_LD_ABS(BPF_B, 3),
98700- BPF_ALU64_IMM(BPF_MOV, R2, 2),
98701- BPF_ALU32_REG(BPF_DIV, R0, R2),
98702- BPF_ALU64_REG(BPF_MOV, R8, R0),
98703- BPF_LD_ABS(BPF_B, 4),
98704- BPF_ALU64_REG(BPF_ADD, R8, R0),
98705- BPF_LD_IND(BPF_B, R8, -70),
98706- BPF_EXIT_INSN(),
98707- },
98708- INTERNAL,
98709- { 10, 20, 30, 40, 50 },
98710- { { 4, 0 }, { 5, 10 } }
98711- },
98712- {
98713- "INT: DIV by zero",
98714- .u.insns_int = {
98715- BPF_ALU64_REG(BPF_MOV, R6, R1),
98716- BPF_ALU64_IMM(BPF_MOV, R7, 0),
98717- BPF_LD_ABS(BPF_B, 3),
98718- BPF_ALU32_REG(BPF_DIV, R0, R7),
98719- BPF_EXIT_INSN(),
98720- },
98721- INTERNAL,
98722- { 10, 20, 30, 40, 50 },
98723- { { 3, 0 }, { 4, 0 } }
98724- },
98725- {
98726- "check: missing ret",
98727- .u.insns = {
98728- BPF_STMT(BPF_LD | BPF_IMM, 1),
98729- },
98730- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
98731- { },
98732- { }
98733- },
98734- {
98735- "check: div_k_0",
98736- .u.insns = {
98737- BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0),
98738- BPF_STMT(BPF_RET | BPF_K, 0)
98739- },
98740- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
98741- { },
98742- { }
98743- },
98744- {
98745- "check: unknown insn",
98746- .u.insns = {
98747- /* seccomp insn, rejected in socket filter */
98748- BPF_STMT(BPF_LDX | BPF_W | BPF_ABS, 0),
98749- BPF_STMT(BPF_RET | BPF_K, 0)
98750- },
98751- CLASSIC | FLAG_EXPECTED_FAIL,
98752- { },
98753- { }
98754- },
98755- {
98756- "check: out of range spill/fill",
98757- .u.insns = {
98758- BPF_STMT(BPF_STX, 16),
98759- BPF_STMT(BPF_RET | BPF_K, 0)
98760- },
98761- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
98762- { },
98763- { }
98764- },
98765- {
98766- "JUMPS + HOLES",
98767- .u.insns = {
98768- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98769- BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 15),
98770- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98771- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98772- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98773- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98774- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98775- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98776- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98777- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98778- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98779- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98780- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98781- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98782- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98783- BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 3, 4),
98784- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98785- BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 1, 2),
98786- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98787- BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
98788- BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
98789- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98790- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98791- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98792- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98793- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98794- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98795- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98796- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98797- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98798- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98799- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98800- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98801- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98802- BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 2, 3),
98803- BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 1, 2),
98804- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98805- BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
98806- BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
98807- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98808- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98809- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98810- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98811- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98812- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98813- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98814- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98815- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98816- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98817- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98818- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98819- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98820- BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 2, 3),
98821- BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 1, 2),
98822- BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
98823- BPF_STMT(BPF_RET | BPF_A, 0),
98824- BPF_STMT(BPF_RET | BPF_A, 0),
98825- },
98826- CLASSIC,
98827- { 0x00, 0x1b, 0x21, 0x3c, 0x9d, 0xf8,
98828- 0x90, 0xe2, 0xba, 0x0a, 0x56, 0xb4,
98829- 0x08, 0x00,
98830- 0x45, 0x00, 0x00, 0x28, 0x00, 0x00,
98831- 0x20, 0x00, 0x40, 0x11, 0x00, 0x00, /* IP header */
98832- 0xc0, 0xa8, 0x33, 0x01,
98833- 0xc0, 0xa8, 0x33, 0x02,
98834- 0xbb, 0xb6,
98835- 0xa9, 0xfa,
98836- 0x00, 0x14, 0x00, 0x00,
98837- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
98838- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
98839- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
98840- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
98841- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
98842- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
98843- 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
98844- 0xcc, 0xcc, 0xcc, 0xcc },
98845- { { 88, 0x001b } }
98846- },
98847- {
98848- "check: RET X",
98849- .u.insns = {
98850- BPF_STMT(BPF_RET | BPF_X, 0),
98851- },
98852- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
98853- { },
98854- { },
98855- },
98856- {
98857- "check: LDX + RET X",
98858- .u.insns = {
98859- BPF_STMT(BPF_LDX | BPF_IMM, 42),
98860- BPF_STMT(BPF_RET | BPF_X, 0),
98861- },
98862- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
98863- { },
98864- { },
98865- },
98866- { /* Mainly checking JIT here. */
98867- "M[]: alt STX + LDX",
98868- .u.insns = {
98869- BPF_STMT(BPF_LDX | BPF_IMM, 100),
98870- BPF_STMT(BPF_STX, 0),
98871- BPF_STMT(BPF_LDX | BPF_MEM, 0),
98872- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98873- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98874- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98875- BPF_STMT(BPF_STX, 1),
98876- BPF_STMT(BPF_LDX | BPF_MEM, 1),
98877- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98878- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98879- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98880- BPF_STMT(BPF_STX, 2),
98881- BPF_STMT(BPF_LDX | BPF_MEM, 2),
98882- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98883- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98884- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98885- BPF_STMT(BPF_STX, 3),
98886- BPF_STMT(BPF_LDX | BPF_MEM, 3),
98887- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98888- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98889- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98890- BPF_STMT(BPF_STX, 4),
98891- BPF_STMT(BPF_LDX | BPF_MEM, 4),
98892- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98893- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98894- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98895- BPF_STMT(BPF_STX, 5),
98896- BPF_STMT(BPF_LDX | BPF_MEM, 5),
98897- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98898- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98899- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98900- BPF_STMT(BPF_STX, 6),
98901- BPF_STMT(BPF_LDX | BPF_MEM, 6),
98902- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98903- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98904- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98905- BPF_STMT(BPF_STX, 7),
98906- BPF_STMT(BPF_LDX | BPF_MEM, 7),
98907- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98908- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98909- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98910- BPF_STMT(BPF_STX, 8),
98911- BPF_STMT(BPF_LDX | BPF_MEM, 8),
98912- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98913- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98914- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98915- BPF_STMT(BPF_STX, 9),
98916- BPF_STMT(BPF_LDX | BPF_MEM, 9),
98917- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98918- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98919- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98920- BPF_STMT(BPF_STX, 10),
98921- BPF_STMT(BPF_LDX | BPF_MEM, 10),
98922- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98923- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98924- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98925- BPF_STMT(BPF_STX, 11),
98926- BPF_STMT(BPF_LDX | BPF_MEM, 11),
98927- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98928- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98929- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98930- BPF_STMT(BPF_STX, 12),
98931- BPF_STMT(BPF_LDX | BPF_MEM, 12),
98932- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98933- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98934- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98935- BPF_STMT(BPF_STX, 13),
98936- BPF_STMT(BPF_LDX | BPF_MEM, 13),
98937- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98938- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98939- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98940- BPF_STMT(BPF_STX, 14),
98941- BPF_STMT(BPF_LDX | BPF_MEM, 14),
98942- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98943- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98944- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98945- BPF_STMT(BPF_STX, 15),
98946- BPF_STMT(BPF_LDX | BPF_MEM, 15),
98947- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98948- BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
98949- BPF_STMT(BPF_MISC | BPF_TAX, 0),
98950- BPF_STMT(BPF_RET | BPF_A, 0),
98951- },
98952- CLASSIC | FLAG_NO_DATA,
98953- { },
98954- { { 0, 116 } },
98955- },
98956- { /* Mainly checking JIT here. */
98957- "M[]: full STX + full LDX",
98958- .u.insns = {
98959- BPF_STMT(BPF_LDX | BPF_IMM, 0xbadfeedb),
98960- BPF_STMT(BPF_STX, 0),
98961- BPF_STMT(BPF_LDX | BPF_IMM, 0xecabedae),
98962- BPF_STMT(BPF_STX, 1),
98963- BPF_STMT(BPF_LDX | BPF_IMM, 0xafccfeaf),
98964- BPF_STMT(BPF_STX, 2),
98965- BPF_STMT(BPF_LDX | BPF_IMM, 0xbffdcedc),
98966- BPF_STMT(BPF_STX, 3),
98967- BPF_STMT(BPF_LDX | BPF_IMM, 0xfbbbdccb),
98968- BPF_STMT(BPF_STX, 4),
98969- BPF_STMT(BPF_LDX | BPF_IMM, 0xfbabcbda),
98970- BPF_STMT(BPF_STX, 5),
98971- BPF_STMT(BPF_LDX | BPF_IMM, 0xaedecbdb),
98972- BPF_STMT(BPF_STX, 6),
98973- BPF_STMT(BPF_LDX | BPF_IMM, 0xadebbade),
98974- BPF_STMT(BPF_STX, 7),
98975- BPF_STMT(BPF_LDX | BPF_IMM, 0xfcfcfaec),
98976- BPF_STMT(BPF_STX, 8),
98977- BPF_STMT(BPF_LDX | BPF_IMM, 0xbcdddbdc),
98978- BPF_STMT(BPF_STX, 9),
98979- BPF_STMT(BPF_LDX | BPF_IMM, 0xfeefdfac),
98980- BPF_STMT(BPF_STX, 10),
98981- BPF_STMT(BPF_LDX | BPF_IMM, 0xcddcdeea),
98982- BPF_STMT(BPF_STX, 11),
98983- BPF_STMT(BPF_LDX | BPF_IMM, 0xaccfaebb),
98984- BPF_STMT(BPF_STX, 12),
98985- BPF_STMT(BPF_LDX | BPF_IMM, 0xbdcccdcf),
98986- BPF_STMT(BPF_STX, 13),
98987- BPF_STMT(BPF_LDX | BPF_IMM, 0xaaedecde),
98988- BPF_STMT(BPF_STX, 14),
98989- BPF_STMT(BPF_LDX | BPF_IMM, 0xfaeacdad),
98990- BPF_STMT(BPF_STX, 15),
98991- BPF_STMT(BPF_LDX | BPF_MEM, 0),
98992- BPF_STMT(BPF_MISC | BPF_TXA, 0),
98993- BPF_STMT(BPF_LDX | BPF_MEM, 1),
98994- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98995- BPF_STMT(BPF_LDX | BPF_MEM, 2),
98996- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98997- BPF_STMT(BPF_LDX | BPF_MEM, 3),
98998- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
98999- BPF_STMT(BPF_LDX | BPF_MEM, 4),
99000- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99001- BPF_STMT(BPF_LDX | BPF_MEM, 5),
99002- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99003- BPF_STMT(BPF_LDX | BPF_MEM, 6),
99004- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99005- BPF_STMT(BPF_LDX | BPF_MEM, 7),
99006- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99007- BPF_STMT(BPF_LDX | BPF_MEM, 8),
99008- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99009- BPF_STMT(BPF_LDX | BPF_MEM, 9),
99010- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99011- BPF_STMT(BPF_LDX | BPF_MEM, 10),
99012- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99013- BPF_STMT(BPF_LDX | BPF_MEM, 11),
99014- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99015- BPF_STMT(BPF_LDX | BPF_MEM, 12),
99016- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99017- BPF_STMT(BPF_LDX | BPF_MEM, 13),
99018- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99019- BPF_STMT(BPF_LDX | BPF_MEM, 14),
99020- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99021- BPF_STMT(BPF_LDX | BPF_MEM, 15),
99022- BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
99023- BPF_STMT(BPF_RET | BPF_A, 0),
99024- },
99025- CLASSIC | FLAG_NO_DATA,
99026- { },
99027- { { 0, 0x2a5a5e5 } },
99028- },
99029- {
99030- "check: SKF_AD_MAX",
99031- .u.insns = {
99032- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
99033- SKF_AD_OFF + SKF_AD_MAX),
99034- BPF_STMT(BPF_RET | BPF_A, 0),
99035- },
99036- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
99037- { },
99038- { },
99039- },
99040- { /* Passes checker but fails during runtime. */
99041- "LD [SKF_AD_OFF-1]",
99042- .u.insns = {
99043- BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
99044- SKF_AD_OFF - 1),
99045- BPF_STMT(BPF_RET | BPF_K, 1),
99046- },
99047- CLASSIC,
99048- { },
99049- { { 1, 0 } },
99050- },
99051-};
99052-
99053-static struct net_device dev;
99054-
99055-static struct sk_buff *populate_skb(char *buf, int size)
99056-{
99057- struct sk_buff *skb;
99058-
99059- if (size >= MAX_DATA)
99060- return NULL;
99061-
99062- skb = alloc_skb(MAX_DATA, GFP_KERNEL);
99063- if (!skb)
99064- return NULL;
99065-
99066- memcpy(__skb_put(skb, size), buf, size);
99067-
99068- /* Initialize a fake skb with test pattern. */
99069- skb_reset_mac_header(skb);
99070- skb->protocol = htons(ETH_P_IP);
99071- skb->pkt_type = SKB_TYPE;
99072- skb->mark = SKB_MARK;
99073- skb->hash = SKB_HASH;
99074- skb->queue_mapping = SKB_QUEUE_MAP;
99075- skb->vlan_tci = SKB_VLAN_TCI;
99076- skb->dev = &dev;
99077- skb->dev->ifindex = SKB_DEV_IFINDEX;
99078- skb->dev->type = SKB_DEV_TYPE;
99079- skb_set_network_header(skb, min(size, ETH_HLEN));
99080-
99081- return skb;
99082-}
99083-
99084-static void *generate_test_data(struct bpf_test *test, int sub)
99085-{
99086- if (test->aux & FLAG_NO_DATA)
99087- return NULL;
99088-
99089- /* Test case expects an skb, so populate one. Various
99090- * subtests generate skbs of different sizes based on
99091- * the same data.
99092- */
99093- return populate_skb(test->data, test->test[sub].data_size);
99094-}
99095-
99096-static void release_test_data(const struct bpf_test *test, void *data)
99097-{
99098- if (test->aux & FLAG_NO_DATA)
99099- return;
99100-
99101- kfree_skb(data);
99102-}
99103-
99104-static int probe_filter_length(struct sock_filter *fp)
99105-{
99106- int len = 0;
99107-
99108- for (len = MAX_INSNS - 1; len > 0; --len)
99109- if (fp[len].code != 0 || fp[len].k != 0)
99110- break;
99111-
99112- return len + 1;
99113-}
99114-
99115-static struct sk_filter *generate_filter(int which, int *err)
99116-{
99117- struct sk_filter *fp;
99118- struct sock_fprog_kern fprog;
99119- unsigned int flen = probe_filter_length(tests[which].u.insns);
99120- __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
99121-
99122- switch (test_type) {
99123- case CLASSIC:
99124- fprog.filter = tests[which].u.insns;
99125- fprog.len = flen;
99126-
99127- *err = sk_unattached_filter_create(&fp, &fprog);
99128- if (tests[which].aux & FLAG_EXPECTED_FAIL) {
99129- if (*err == -EINVAL) {
99130- pr_cont("PASS\n");
99131- /* Verifier rejected filter as expected. */
99132- *err = 0;
99133- return NULL;
99134- } else {
99135- pr_cont("UNEXPECTED_PASS\n");
99136- /* Verifier didn't reject the test that's
99137- * bad enough, just return!
99138- */
99139- *err = -EINVAL;
99140- return NULL;
99141- }
99142- }
99143- /* We don't expect to fail. */
99144- if (*err) {
99145- pr_cont("FAIL to attach err=%d len=%d\n",
99146- *err, fprog.len);
99147- return NULL;
99148- }
99149- break;
99150-
99151- case INTERNAL:
99152- fp = kzalloc(sk_filter_size(flen), GFP_KERNEL);
99153- if (fp == NULL) {
99154- pr_cont("UNEXPECTED_FAIL no memory left\n");
99155- *err = -ENOMEM;
99156- return NULL;
99157- }
99158-
99159- fp->len = flen;
99160- memcpy(fp->insnsi, tests[which].u.insns_int,
99161- fp->len * sizeof(struct sock_filter_int));
99162-
99163- sk_filter_select_runtime(fp);
99164- break;
99165- }
99166-
99167- *err = 0;
99168- return fp;
99169-}
99170-
99171-static void release_filter(struct sk_filter *fp, int which)
99172-{
99173- __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
99174-
99175- switch (test_type) {
99176- case CLASSIC:
99177- sk_unattached_filter_destroy(fp);
99178- break;
99179- case INTERNAL:
99180- sk_filter_free(fp);
99181- break;
99182- }
99183-}
99184-
99185-static int __run_one(const struct sk_filter *fp, const void *data,
99186- int runs, u64 *duration)
99187-{
99188- u64 start, finish;
99189- int ret, i;
99190-
99191- start = ktime_to_us(ktime_get());
99192-
99193- for (i = 0; i < runs; i++)
99194- ret = SK_RUN_FILTER(fp, data);
99195-
99196- finish = ktime_to_us(ktime_get());
99197-
99198- *duration = (finish - start) * 1000ULL;
99199- do_div(*duration, runs);
99200-
99201- return ret;
99202-}
99203-
99204-static int run_one(const struct sk_filter *fp, struct bpf_test *test)
99205-{
99206- int err_cnt = 0, i, runs = MAX_TESTRUNS;
99207-
99208- for (i = 0; i < MAX_SUBTESTS; i++) {
99209- void *data;
99210- u64 duration;
99211- u32 ret;
99212-
99213- if (test->test[i].data_size == 0 &&
99214- test->test[i].result == 0)
99215- break;
99216-
99217- data = generate_test_data(test, i);
99218- ret = __run_one(fp, data, runs, &duration);
99219- release_test_data(test, data);
99220-
99221- if (ret == test->test[i].result) {
99222- pr_cont("%lld ", duration);
99223- } else {
99224- pr_cont("ret %d != %d ", ret,
99225- test->test[i].result);
99226- err_cnt++;
99227- }
99228- }
99229-
99230- return err_cnt;
99231-}
99232-
99233-static __init int test_bpf(void)
99234-{
99235- int i, err_cnt = 0, pass_cnt = 0;
99236-
99237- for (i = 0; i < ARRAY_SIZE(tests); i++) {
99238- struct sk_filter *fp;
99239- int err;
99240-
99241- pr_info("#%d %s ", i, tests[i].descr);
99242-
99243- fp = generate_filter(i, &err);
99244- if (fp == NULL) {
99245- if (err == 0) {
99246- pass_cnt++;
99247- continue;
99248- }
99249-
99250- return err;
99251- }
99252- err = run_one(fp, &tests[i]);
99253- release_filter(fp, i);
99254-
99255- if (err) {
99256- pr_cont("FAIL (%d times)\n", err);
99257- err_cnt++;
99258- } else {
99259- pr_cont("PASS\n");
99260- pass_cnt++;
99261- }
99262- }
99263-
99264- pr_info("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);
99265- return err_cnt ? -EINVAL : 0;
99266-}
99267-
99268-static int __init test_bpf_init(void)
99269-{
99270- return test_bpf();
99271-}
99272-
99273-static void __exit test_bpf_exit(void)
99274-{
99275-}
99276-
99277-module_init(test_bpf_init);
99278-module_exit(test_bpf_exit);
99279-
99280-MODULE_LICENSE("GPL");
99281diff --git a/lib/usercopy.c b/lib/usercopy.c
99282index 4f5b1dd..7cab418 100644
99283--- a/lib/usercopy.c
99284+++ b/lib/usercopy.c
99285@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
99286 WARN(1, "Buffer overflow detected!\n");
99287 }
99288 EXPORT_SYMBOL(copy_from_user_overflow);
99289+
99290+void copy_to_user_overflow(void)
99291+{
99292+ WARN(1, "Buffer overflow detected!\n");
99293+}
99294+EXPORT_SYMBOL(copy_to_user_overflow);
99295diff --git a/lib/vsprintf.c b/lib/vsprintf.c
99296index 6fe2c84..2fe5ec6 100644
99297--- a/lib/vsprintf.c
99298+++ b/lib/vsprintf.c
99299@@ -16,6 +16,9 @@
99300 * - scnprintf and vscnprintf
99301 */
99302
99303+#ifdef CONFIG_GRKERNSEC_HIDESYM
99304+#define __INCLUDED_BY_HIDESYM 1
99305+#endif
99306 #include <stdarg.h>
99307 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
99308 #include <linux/types.h>
99309@@ -624,7 +627,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
99310 #ifdef CONFIG_KALLSYMS
99311 if (*fmt == 'B')
99312 sprint_backtrace(sym, value);
99313- else if (*fmt != 'f' && *fmt != 's')
99314+ else if (*fmt != 'f' && *fmt != 's' && *fmt != 'X')
99315 sprint_symbol(sym, value);
99316 else
99317 sprint_symbol_no_offset(sym, value);
99318@@ -1183,7 +1186,11 @@ char *address_val(char *buf, char *end, const void *addr,
99319 return number(buf, end, num, spec);
99320 }
99321
99322+#ifdef CONFIG_GRKERNSEC_HIDESYM
99323+int kptr_restrict __read_mostly = 2;
99324+#else
99325 int kptr_restrict __read_mostly;
99326+#endif
99327
99328 /*
99329 * Show a '%p' thing. A kernel extension is that the '%p' is followed
99330@@ -1194,8 +1201,10 @@ int kptr_restrict __read_mostly;
99331 *
99332 * - 'F' For symbolic function descriptor pointers with offset
99333 * - 'f' For simple symbolic function names without offset
99334+ * - 'X' For simple symbolic function names without offset approved for use with GRKERNSEC_HIDESYM
99335 * - 'S' For symbolic direct pointers with offset
99336 * - 's' For symbolic direct pointers without offset
99337+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
99338 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
99339 * - 'B' For backtraced symbolic direct pointers with offset
99340 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
99341@@ -1263,12 +1272,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
99342
99343 if (!ptr && *fmt != 'K') {
99344 /*
99345- * Print (null) with the same width as a pointer so it makes
99346+ * Print (nil) with the same width as a pointer so it makes
99347 * tabular output look nice.
99348 */
99349 if (spec.field_width == -1)
99350 spec.field_width = default_width;
99351- return string(buf, end, "(null)", spec);
99352+ return string(buf, end, "(nil)", spec);
99353 }
99354
99355 switch (*fmt) {
99356@@ -1278,6 +1287,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
99357 /* Fallthrough */
99358 case 'S':
99359 case 's':
99360+#ifdef CONFIG_GRKERNSEC_HIDESYM
99361+ break;
99362+#else
99363+ return symbol_string(buf, end, ptr, spec, fmt);
99364+#endif
99365+ case 'X':
99366+ ptr = dereference_function_descriptor(ptr);
99367+ case 'A':
99368 case 'B':
99369 return symbol_string(buf, end, ptr, spec, fmt);
99370 case 'R':
99371@@ -1333,6 +1350,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
99372 va_end(va);
99373 return buf;
99374 }
99375+ case 'P':
99376+ break;
99377 case 'K':
99378 /*
99379 * %pK cannot be used in IRQ context because its test
99380@@ -1390,6 +1409,22 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
99381 ((const struct file *)ptr)->f_path.dentry,
99382 spec, fmt);
99383 }
99384+
99385+#ifdef CONFIG_GRKERNSEC_HIDESYM
99386+ /* 'P' = approved pointers to copy to userland,
99387+ as in the /proc/kallsyms case, as we make it display nothing
99388+ for non-root users, and the real contents for root users
99389+ 'X' = approved simple symbols
99390+ Also ignore 'K' pointers, since we force their NULLing for non-root users
99391+ above
99392+ */
99393+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'X' && *fmt != 'K' && is_usercopy_object(buf)) {
99394+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
99395+ dump_stack();
99396+ ptr = NULL;
99397+ }
99398+#endif
99399+
99400 spec.flags |= SMALL;
99401 if (spec.field_width == -1) {
99402 spec.field_width = default_width;
99403@@ -2089,11 +2124,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
99404 typeof(type) value; \
99405 if (sizeof(type) == 8) { \
99406 args = PTR_ALIGN(args, sizeof(u32)); \
99407- *(u32 *)&value = *(u32 *)args; \
99408- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
99409+ *(u32 *)&value = *(const u32 *)args; \
99410+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
99411 } else { \
99412 args = PTR_ALIGN(args, sizeof(type)); \
99413- value = *(typeof(type) *)args; \
99414+ value = *(const typeof(type) *)args; \
99415 } \
99416 args += sizeof(type); \
99417 value; \
99418@@ -2156,7 +2191,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
99419 case FORMAT_TYPE_STR: {
99420 const char *str_arg = args;
99421 args += strlen(str_arg) + 1;
99422- str = string(str, end, (char *)str_arg, spec);
99423+ str = string(str, end, str_arg, spec);
99424 break;
99425 }
99426
99427diff --git a/localversion-grsec b/localversion-grsec
99428new file mode 100644
99429index 0000000..7cd6065
99430--- /dev/null
99431+++ b/localversion-grsec
99432@@ -0,0 +1 @@
99433+-grsec
99434diff --git a/mm/Kconfig b/mm/Kconfig
99435index 3e9977a..205074f 100644
99436--- a/mm/Kconfig
99437+++ b/mm/Kconfig
99438@@ -333,10 +333,11 @@ config KSM
99439 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
99440
99441 config DEFAULT_MMAP_MIN_ADDR
99442- int "Low address space to protect from user allocation"
99443+ int "Low address space to protect from user allocation"
99444 depends on MMU
99445- default 4096
99446- help
99447+ default 32768 if ALPHA || ARM || PARISC || SPARC32
99448+ default 65536
99449+ help
99450 This is the portion of low virtual memory which should be protected
99451 from userspace allocation. Keeping a user from writing to low pages
99452 can help reduce the impact of kernel NULL pointer bugs.
99453@@ -367,7 +368,7 @@ config MEMORY_FAILURE
99454
99455 config HWPOISON_INJECT
99456 tristate "HWPoison pages injector"
99457- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
99458+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
99459 select PROC_PAGE_MONITOR
99460
99461 config NOMMU_INITIAL_TRIM_EXCESS
99462diff --git a/mm/backing-dev.c b/mm/backing-dev.c
99463index 1706cbb..f89dbca 100644
99464--- a/mm/backing-dev.c
99465+++ b/mm/backing-dev.c
99466@@ -12,7 +12,7 @@
99467 #include <linux/device.h>
99468 #include <trace/events/writeback.h>
99469
99470-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
99471+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
99472
99473 struct backing_dev_info default_backing_dev_info = {
99474 .name = "default",
99475@@ -533,7 +533,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
99476 return err;
99477
99478 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
99479- atomic_long_inc_return(&bdi_seq));
99480+ atomic_long_inc_return_unchecked(&bdi_seq));
99481 if (err) {
99482 bdi_destroy(bdi);
99483 return err;
99484diff --git a/mm/filemap.c b/mm/filemap.c
99485index 900edfa..ff056b1 100644
99486--- a/mm/filemap.c
99487+++ b/mm/filemap.c
99488@@ -2074,7 +2074,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
99489 struct address_space *mapping = file->f_mapping;
99490
99491 if (!mapping->a_ops->readpage)
99492- return -ENOEXEC;
99493+ return -ENODEV;
99494 file_accessed(file);
99495 vma->vm_ops = &generic_file_vm_ops;
99496 return 0;
99497@@ -2252,6 +2252,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
99498 *pos = i_size_read(inode);
99499
99500 if (limit != RLIM_INFINITY) {
99501+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
99502 if (*pos >= limit) {
99503 send_sig(SIGXFSZ, current, 0);
99504 return -EFBIG;
99505diff --git a/mm/fremap.c b/mm/fremap.c
99506index 72b8fa3..c5b39f1 100644
99507--- a/mm/fremap.c
99508+++ b/mm/fremap.c
99509@@ -180,6 +180,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
99510 retry:
99511 vma = find_vma(mm, start);
99512
99513+#ifdef CONFIG_PAX_SEGMEXEC
99514+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
99515+ goto out;
99516+#endif
99517+
99518 /*
99519 * Make sure the vma is shared, that it supports prefaulting,
99520 * and that the remapped range is valid and fully within
99521diff --git a/mm/gup.c b/mm/gup.c
99522index cc5a9e7..d496acf 100644
99523--- a/mm/gup.c
99524+++ b/mm/gup.c
99525@@ -265,11 +265,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
99526 unsigned int fault_flags = 0;
99527 int ret;
99528
99529- /* For mlock, just skip the stack guard page. */
99530- if ((*flags & FOLL_MLOCK) &&
99531- (stack_guard_page_start(vma, address) ||
99532- stack_guard_page_end(vma, address + PAGE_SIZE)))
99533- return -ENOENT;
99534 if (*flags & FOLL_WRITE)
99535 fault_flags |= FAULT_FLAG_WRITE;
99536 if (nonblocking)
99537@@ -424,14 +419,14 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
99538 if (!(gup_flags & FOLL_FORCE))
99539 gup_flags |= FOLL_NUMA;
99540
99541- do {
99542+ while (nr_pages) {
99543 struct page *page;
99544 unsigned int foll_flags = gup_flags;
99545 unsigned int page_increm;
99546
99547 /* first iteration or cross vma bound */
99548 if (!vma || start >= vma->vm_end) {
99549- vma = find_extend_vma(mm, start);
99550+ vma = find_vma(mm, start);
99551 if (!vma && in_gate_area(mm, start)) {
99552 int ret;
99553 ret = get_gate_page(mm, start & PAGE_MASK,
99554@@ -443,7 +438,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
99555 goto next_page;
99556 }
99557
99558- if (!vma || check_vma_flags(vma, gup_flags))
99559+ if (!vma || start < vma->vm_start || check_vma_flags(vma, gup_flags))
99560 return i ? : -EFAULT;
99561 if (is_vm_hugetlb_page(vma)) {
99562 i = follow_hugetlb_page(mm, vma, pages, vmas,
99563@@ -498,7 +493,7 @@ next_page:
99564 i += page_increm;
99565 start += page_increm * PAGE_SIZE;
99566 nr_pages -= page_increm;
99567- } while (nr_pages);
99568+ }
99569 return i;
99570 }
99571 EXPORT_SYMBOL(__get_user_pages);
99572diff --git a/mm/highmem.c b/mm/highmem.c
99573index b32b70c..e512eb0 100644
99574--- a/mm/highmem.c
99575+++ b/mm/highmem.c
99576@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
99577 * So no dangers, even with speculative execution.
99578 */
99579 page = pte_page(pkmap_page_table[i]);
99580+ pax_open_kernel();
99581 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
99582-
99583+ pax_close_kernel();
99584 set_page_address(page, NULL);
99585 need_flush = 1;
99586 }
99587@@ -198,9 +199,11 @@ start:
99588 }
99589 }
99590 vaddr = PKMAP_ADDR(last_pkmap_nr);
99591+
99592+ pax_open_kernel();
99593 set_pte_at(&init_mm, vaddr,
99594 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
99595-
99596+ pax_close_kernel();
99597 pkmap_count[last_pkmap_nr] = 1;
99598 set_page_address(page, (void *)vaddr);
99599
99600diff --git a/mm/hugetlb.c b/mm/hugetlb.c
99601index 7a0a73d..d583cca 100644
99602--- a/mm/hugetlb.c
99603+++ b/mm/hugetlb.c
99604@@ -2250,6 +2250,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
99605 struct hstate *h = &default_hstate;
99606 unsigned long tmp;
99607 int ret;
99608+ ctl_table_no_const hugetlb_table;
99609
99610 if (!hugepages_supported())
99611 return -ENOTSUPP;
99612@@ -2259,9 +2260,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
99613 if (write && hstate_is_gigantic(h) && !gigantic_page_supported())
99614 return -EINVAL;
99615
99616- table->data = &tmp;
99617- table->maxlen = sizeof(unsigned long);
99618- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
99619+ hugetlb_table = *table;
99620+ hugetlb_table.data = &tmp;
99621+ hugetlb_table.maxlen = sizeof(unsigned long);
99622+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
99623 if (ret)
99624 goto out;
99625
99626@@ -2306,6 +2308,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
99627 struct hstate *h = &default_hstate;
99628 unsigned long tmp;
99629 int ret;
99630+ ctl_table_no_const hugetlb_table;
99631
99632 if (!hugepages_supported())
99633 return -ENOTSUPP;
99634@@ -2315,9 +2318,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
99635 if (write && hstate_is_gigantic(h))
99636 return -EINVAL;
99637
99638- table->data = &tmp;
99639- table->maxlen = sizeof(unsigned long);
99640- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
99641+ hugetlb_table = *table;
99642+ hugetlb_table.data = &tmp;
99643+ hugetlb_table.maxlen = sizeof(unsigned long);
99644+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
99645 if (ret)
99646 goto out;
99647
99648@@ -2798,6 +2802,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
99649 return 1;
99650 }
99651
99652+#ifdef CONFIG_PAX_SEGMEXEC
99653+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
99654+{
99655+ struct mm_struct *mm = vma->vm_mm;
99656+ struct vm_area_struct *vma_m;
99657+ unsigned long address_m;
99658+ pte_t *ptep_m;
99659+
99660+ vma_m = pax_find_mirror_vma(vma);
99661+ if (!vma_m)
99662+ return;
99663+
99664+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
99665+ address_m = address + SEGMEXEC_TASK_SIZE;
99666+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
99667+ get_page(page_m);
99668+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
99669+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
99670+}
99671+#endif
99672+
99673 /*
99674 * Hugetlb_cow() should be called with page lock of the original hugepage held.
99675 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
99676@@ -2915,6 +2940,11 @@ retry_avoidcopy:
99677 make_huge_pte(vma, new_page, 1));
99678 page_remove_rmap(old_page);
99679 hugepage_add_new_anon_rmap(new_page, vma, address);
99680+
99681+#ifdef CONFIG_PAX_SEGMEXEC
99682+ pax_mirror_huge_pte(vma, address, new_page);
99683+#endif
99684+
99685 /* Make the old page be freed below */
99686 new_page = old_page;
99687 }
99688@@ -3074,6 +3104,10 @@ retry:
99689 && (vma->vm_flags & VM_SHARED)));
99690 set_huge_pte_at(mm, address, ptep, new_pte);
99691
99692+#ifdef CONFIG_PAX_SEGMEXEC
99693+ pax_mirror_huge_pte(vma, address, page);
99694+#endif
99695+
99696 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
99697 /* Optimization, do the COW without a second fault */
99698 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
99699@@ -3140,6 +3174,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
99700 struct hstate *h = hstate_vma(vma);
99701 struct address_space *mapping;
99702
99703+#ifdef CONFIG_PAX_SEGMEXEC
99704+ struct vm_area_struct *vma_m;
99705+#endif
99706+
99707 address &= huge_page_mask(h);
99708
99709 ptep = huge_pte_offset(mm, address);
99710@@ -3153,6 +3191,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
99711 VM_FAULT_SET_HINDEX(hstate_index(h));
99712 }
99713
99714+#ifdef CONFIG_PAX_SEGMEXEC
99715+ vma_m = pax_find_mirror_vma(vma);
99716+ if (vma_m) {
99717+ unsigned long address_m;
99718+
99719+ if (vma->vm_start > vma_m->vm_start) {
99720+ address_m = address;
99721+ address -= SEGMEXEC_TASK_SIZE;
99722+ vma = vma_m;
99723+ h = hstate_vma(vma);
99724+ } else
99725+ address_m = address + SEGMEXEC_TASK_SIZE;
99726+
99727+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
99728+ return VM_FAULT_OOM;
99729+ address_m &= HPAGE_MASK;
99730+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
99731+ }
99732+#endif
99733+
99734 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
99735 if (!ptep)
99736 return VM_FAULT_OOM;
99737diff --git a/mm/internal.h b/mm/internal.h
99738index 7f22a11f..f3c207f 100644
99739--- a/mm/internal.h
99740+++ b/mm/internal.h
99741@@ -109,6 +109,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
99742 * in mm/page_alloc.c
99743 */
99744 extern void __free_pages_bootmem(struct page *page, unsigned int order);
99745+extern void free_compound_page(struct page *page);
99746 extern void prep_compound_page(struct page *page, unsigned long order);
99747 #ifdef CONFIG_MEMORY_FAILURE
99748 extern bool is_free_buddy_page(struct page *page);
99749@@ -351,7 +352,7 @@ extern u32 hwpoison_filter_enable;
99750
99751 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
99752 unsigned long, unsigned long,
99753- unsigned long, unsigned long);
99754+ unsigned long, unsigned long) __intentional_overflow(-1);
99755
99756 extern void set_pageblock_order(void);
99757 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
99758diff --git a/mm/iov_iter.c b/mm/iov_iter.c
99759index 7b5dbd1..af0e329 100644
99760--- a/mm/iov_iter.c
99761+++ b/mm/iov_iter.c
99762@@ -173,7 +173,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
99763
99764 while (bytes) {
99765 char __user *buf = iov->iov_base + base;
99766- int copy = min(bytes, iov->iov_len - base);
99767+ size_t copy = min(bytes, iov->iov_len - base);
99768
99769 base = 0;
99770 left = __copy_from_user_inatomic(vaddr, buf, copy);
99771@@ -201,7 +201,7 @@ static size_t copy_from_user_atomic_iovec(struct page *page,
99772
99773 kaddr = kmap_atomic(page);
99774 if (likely(i->nr_segs == 1)) {
99775- int left;
99776+ size_t left;
99777 char __user *buf = i->iov->iov_base + i->iov_offset;
99778 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
99779 copied = bytes - left;
99780@@ -231,7 +231,7 @@ static void advance_iovec(struct iov_iter *i, size_t bytes)
99781 * zero-length segments (without overruning the iovec).
99782 */
99783 while (bytes || unlikely(i->count && !iov->iov_len)) {
99784- int copy;
99785+ size_t copy;
99786
99787 copy = min(bytes, iov->iov_len - base);
99788 BUG_ON(!i->count || i->count < copy);
99789diff --git a/mm/kmemleak.c b/mm/kmemleak.c
99790index 3cda50c..032ba634 100644
99791--- a/mm/kmemleak.c
99792+++ b/mm/kmemleak.c
99793@@ -364,7 +364,7 @@ static void print_unreferenced(struct seq_file *seq,
99794
99795 for (i = 0; i < object->trace_len; i++) {
99796 void *ptr = (void *)object->trace[i];
99797- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
99798+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
99799 }
99800 }
99801
99802@@ -1905,7 +1905,7 @@ static int __init kmemleak_late_init(void)
99803 return -ENOMEM;
99804 }
99805
99806- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
99807+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
99808 &kmemleak_fops);
99809 if (!dentry)
99810 pr_warning("Failed to create the debugfs kmemleak file\n");
99811diff --git a/mm/maccess.c b/mm/maccess.c
99812index d53adf9..03a24bf 100644
99813--- a/mm/maccess.c
99814+++ b/mm/maccess.c
99815@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
99816 set_fs(KERNEL_DS);
99817 pagefault_disable();
99818 ret = __copy_from_user_inatomic(dst,
99819- (__force const void __user *)src, size);
99820+ (const void __force_user *)src, size);
99821 pagefault_enable();
99822 set_fs(old_fs);
99823
99824@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
99825
99826 set_fs(KERNEL_DS);
99827 pagefault_disable();
99828- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
99829+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
99830 pagefault_enable();
99831 set_fs(old_fs);
99832
99833diff --git a/mm/madvise.c b/mm/madvise.c
99834index a402f8f..f5e5daa 100644
99835--- a/mm/madvise.c
99836+++ b/mm/madvise.c
99837@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
99838 pgoff_t pgoff;
99839 unsigned long new_flags = vma->vm_flags;
99840
99841+#ifdef CONFIG_PAX_SEGMEXEC
99842+ struct vm_area_struct *vma_m;
99843+#endif
99844+
99845 switch (behavior) {
99846 case MADV_NORMAL:
99847 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
99848@@ -126,6 +130,13 @@ success:
99849 /*
99850 * vm_flags is protected by the mmap_sem held in write mode.
99851 */
99852+
99853+#ifdef CONFIG_PAX_SEGMEXEC
99854+ vma_m = pax_find_mirror_vma(vma);
99855+ if (vma_m)
99856+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
99857+#endif
99858+
99859 vma->vm_flags = new_flags;
99860
99861 out:
99862@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct *vma,
99863 struct vm_area_struct **prev,
99864 unsigned long start, unsigned long end)
99865 {
99866+
99867+#ifdef CONFIG_PAX_SEGMEXEC
99868+ struct vm_area_struct *vma_m;
99869+#endif
99870+
99871 *prev = vma;
99872 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
99873 return -EINVAL;
99874@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct *vma,
99875 zap_page_range(vma, start, end - start, &details);
99876 } else
99877 zap_page_range(vma, start, end - start, NULL);
99878+
99879+#ifdef CONFIG_PAX_SEGMEXEC
99880+ vma_m = pax_find_mirror_vma(vma);
99881+ if (vma_m) {
99882+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
99883+ struct zap_details details = {
99884+ .nonlinear_vma = vma_m,
99885+ .last_index = ULONG_MAX,
99886+ };
99887+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
99888+ } else
99889+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
99890+ }
99891+#endif
99892+
99893 return 0;
99894 }
99895
99896@@ -491,6 +522,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
99897 if (end < start)
99898 return error;
99899
99900+#ifdef CONFIG_PAX_SEGMEXEC
99901+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
99902+ if (end > SEGMEXEC_TASK_SIZE)
99903+ return error;
99904+ } else
99905+#endif
99906+
99907+ if (end > TASK_SIZE)
99908+ return error;
99909+
99910 error = 0;
99911 if (end == start)
99912 return error;
99913diff --git a/mm/memory-failure.c b/mm/memory-failure.c
99914index a013bc9..a897a14 100644
99915--- a/mm/memory-failure.c
99916+++ b/mm/memory-failure.c
99917@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
99918
99919 int sysctl_memory_failure_recovery __read_mostly = 1;
99920
99921-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
99922+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
99923
99924 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
99925
99926@@ -198,7 +198,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
99927 pfn, t->comm, t->pid);
99928 si.si_signo = SIGBUS;
99929 si.si_errno = 0;
99930- si.si_addr = (void *)addr;
99931+ si.si_addr = (void __user *)addr;
99932 #ifdef __ARCH_SI_TRAPNO
99933 si.si_trapno = trapno;
99934 #endif
99935@@ -791,7 +791,7 @@ static struct page_state {
99936 unsigned long res;
99937 char *msg;
99938 int (*action)(struct page *p, unsigned long pfn);
99939-} error_states[] = {
99940+} __do_const error_states[] = {
99941 { reserved, reserved, "reserved kernel", me_kernel },
99942 /*
99943 * free pages are specially detected outside this table:
99944@@ -1099,7 +1099,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
99945 nr_pages = 1 << compound_order(hpage);
99946 else /* normal page or thp */
99947 nr_pages = 1;
99948- atomic_long_add(nr_pages, &num_poisoned_pages);
99949+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
99950
99951 /*
99952 * We need/can do nothing about count=0 pages.
99953@@ -1128,7 +1128,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
99954 if (PageHWPoison(hpage)) {
99955 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
99956 || (p != hpage && TestSetPageHWPoison(hpage))) {
99957- atomic_long_sub(nr_pages, &num_poisoned_pages);
99958+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
99959 unlock_page(hpage);
99960 return 0;
99961 }
99962@@ -1186,14 +1186,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
99963 */
99964 if (!PageHWPoison(p)) {
99965 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
99966- atomic_long_sub(nr_pages, &num_poisoned_pages);
99967+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
99968 put_page(hpage);
99969 res = 0;
99970 goto out;
99971 }
99972 if (hwpoison_filter(p)) {
99973 if (TestClearPageHWPoison(p))
99974- atomic_long_sub(nr_pages, &num_poisoned_pages);
99975+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
99976 unlock_page(hpage);
99977 put_page(hpage);
99978 return 0;
99979@@ -1423,7 +1423,7 @@ int unpoison_memory(unsigned long pfn)
99980 return 0;
99981 }
99982 if (TestClearPageHWPoison(p))
99983- atomic_long_dec(&num_poisoned_pages);
99984+ atomic_long_dec_unchecked(&num_poisoned_pages);
99985 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
99986 return 0;
99987 }
99988@@ -1437,7 +1437,7 @@ int unpoison_memory(unsigned long pfn)
99989 */
99990 if (TestClearPageHWPoison(page)) {
99991 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
99992- atomic_long_sub(nr_pages, &num_poisoned_pages);
99993+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
99994 freeit = 1;
99995 if (PageHuge(page))
99996 clear_page_hwpoison_huge_page(page);
99997@@ -1562,11 +1562,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
99998 if (PageHuge(page)) {
99999 set_page_hwpoison_huge_page(hpage);
100000 dequeue_hwpoisoned_huge_page(hpage);
100001- atomic_long_add(1 << compound_order(hpage),
100002+ atomic_long_add_unchecked(1 << compound_order(hpage),
100003 &num_poisoned_pages);
100004 } else {
100005 SetPageHWPoison(page);
100006- atomic_long_inc(&num_poisoned_pages);
100007+ atomic_long_inc_unchecked(&num_poisoned_pages);
100008 }
100009 }
100010 return ret;
100011@@ -1605,7 +1605,7 @@ static int __soft_offline_page(struct page *page, int flags)
100012 put_page(page);
100013 pr_info("soft_offline: %#lx: invalidated\n", pfn);
100014 SetPageHWPoison(page);
100015- atomic_long_inc(&num_poisoned_pages);
100016+ atomic_long_inc_unchecked(&num_poisoned_pages);
100017 return 0;
100018 }
100019
100020@@ -1656,7 +1656,7 @@ static int __soft_offline_page(struct page *page, int flags)
100021 if (!is_free_buddy_page(page))
100022 pr_info("soft offline: %#lx: page leaked\n",
100023 pfn);
100024- atomic_long_inc(&num_poisoned_pages);
100025+ atomic_long_inc_unchecked(&num_poisoned_pages);
100026 }
100027 } else {
100028 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
100029@@ -1726,11 +1726,11 @@ int soft_offline_page(struct page *page, int flags)
100030 if (PageHuge(page)) {
100031 set_page_hwpoison_huge_page(hpage);
100032 dequeue_hwpoisoned_huge_page(hpage);
100033- atomic_long_add(1 << compound_order(hpage),
100034+ atomic_long_add_unchecked(1 << compound_order(hpage),
100035 &num_poisoned_pages);
100036 } else {
100037 SetPageHWPoison(page);
100038- atomic_long_inc(&num_poisoned_pages);
100039+ atomic_long_inc_unchecked(&num_poisoned_pages);
100040 }
100041 }
100042 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
100043diff --git a/mm/memory.c b/mm/memory.c
100044index 0a21f3d..babeaec 100644
100045--- a/mm/memory.c
100046+++ b/mm/memory.c
100047@@ -413,6 +413,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
100048 free_pte_range(tlb, pmd, addr);
100049 } while (pmd++, addr = next, addr != end);
100050
100051+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
100052 start &= PUD_MASK;
100053 if (start < floor)
100054 return;
100055@@ -427,6 +428,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
100056 pmd = pmd_offset(pud, start);
100057 pud_clear(pud);
100058 pmd_free_tlb(tlb, pmd, start);
100059+#endif
100060+
100061 }
100062
100063 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
100064@@ -446,6 +449,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
100065 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
100066 } while (pud++, addr = next, addr != end);
100067
100068+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
100069 start &= PGDIR_MASK;
100070 if (start < floor)
100071 return;
100072@@ -460,6 +464,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
100073 pud = pud_offset(pgd, start);
100074 pgd_clear(pgd);
100075 pud_free_tlb(tlb, pud, start);
100076+#endif
100077+
100078 }
100079
100080 /*
100081@@ -1500,6 +1506,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
100082 page_add_file_rmap(page);
100083 set_pte_at(mm, addr, pte, mk_pte(page, prot));
100084
100085+#ifdef CONFIG_PAX_SEGMEXEC
100086+ pax_mirror_file_pte(vma, addr, page, ptl);
100087+#endif
100088+
100089 retval = 0;
100090 pte_unmap_unlock(pte, ptl);
100091 return retval;
100092@@ -1544,9 +1554,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
100093 if (!page_count(page))
100094 return -EINVAL;
100095 if (!(vma->vm_flags & VM_MIXEDMAP)) {
100096+
100097+#ifdef CONFIG_PAX_SEGMEXEC
100098+ struct vm_area_struct *vma_m;
100099+#endif
100100+
100101 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
100102 BUG_ON(vma->vm_flags & VM_PFNMAP);
100103 vma->vm_flags |= VM_MIXEDMAP;
100104+
100105+#ifdef CONFIG_PAX_SEGMEXEC
100106+ vma_m = pax_find_mirror_vma(vma);
100107+ if (vma_m)
100108+ vma_m->vm_flags |= VM_MIXEDMAP;
100109+#endif
100110+
100111 }
100112 return insert_page(vma, addr, page, vma->vm_page_prot);
100113 }
100114@@ -1629,6 +1651,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
100115 unsigned long pfn)
100116 {
100117 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
100118+ BUG_ON(vma->vm_mirror);
100119
100120 if (addr < vma->vm_start || addr >= vma->vm_end)
100121 return -EFAULT;
100122@@ -1876,7 +1899,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
100123
100124 BUG_ON(pud_huge(*pud));
100125
100126- pmd = pmd_alloc(mm, pud, addr);
100127+ pmd = (mm == &init_mm) ?
100128+ pmd_alloc_kernel(mm, pud, addr) :
100129+ pmd_alloc(mm, pud, addr);
100130 if (!pmd)
100131 return -ENOMEM;
100132 do {
100133@@ -1896,7 +1921,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
100134 unsigned long next;
100135 int err;
100136
100137- pud = pud_alloc(mm, pgd, addr);
100138+ pud = (mm == &init_mm) ?
100139+ pud_alloc_kernel(mm, pgd, addr) :
100140+ pud_alloc(mm, pgd, addr);
100141 if (!pud)
100142 return -ENOMEM;
100143 do {
100144@@ -2018,6 +2045,186 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
100145 return ret;
100146 }
100147
100148+#ifdef CONFIG_PAX_SEGMEXEC
100149+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
100150+{
100151+ struct mm_struct *mm = vma->vm_mm;
100152+ spinlock_t *ptl;
100153+ pte_t *pte, entry;
100154+
100155+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
100156+ entry = *pte;
100157+ if (!pte_present(entry)) {
100158+ if (!pte_none(entry)) {
100159+ BUG_ON(pte_file(entry));
100160+ free_swap_and_cache(pte_to_swp_entry(entry));
100161+ pte_clear_not_present_full(mm, address, pte, 0);
100162+ }
100163+ } else {
100164+ struct page *page;
100165+
100166+ flush_cache_page(vma, address, pte_pfn(entry));
100167+ entry = ptep_clear_flush(vma, address, pte);
100168+ BUG_ON(pte_dirty(entry));
100169+ page = vm_normal_page(vma, address, entry);
100170+ if (page) {
100171+ update_hiwater_rss(mm);
100172+ if (PageAnon(page))
100173+ dec_mm_counter_fast(mm, MM_ANONPAGES);
100174+ else
100175+ dec_mm_counter_fast(mm, MM_FILEPAGES);
100176+ page_remove_rmap(page);
100177+ page_cache_release(page);
100178+ }
100179+ }
100180+ pte_unmap_unlock(pte, ptl);
100181+}
100182+
100183+/* PaX: if vma is mirrored, synchronize the mirror's PTE
100184+ *
100185+ * the ptl of the lower mapped page is held on entry and is not released on exit
100186+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
100187+ */
100188+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
100189+{
100190+ struct mm_struct *mm = vma->vm_mm;
100191+ unsigned long address_m;
100192+ spinlock_t *ptl_m;
100193+ struct vm_area_struct *vma_m;
100194+ pmd_t *pmd_m;
100195+ pte_t *pte_m, entry_m;
100196+
100197+ BUG_ON(!page_m || !PageAnon(page_m));
100198+
100199+ vma_m = pax_find_mirror_vma(vma);
100200+ if (!vma_m)
100201+ return;
100202+
100203+ BUG_ON(!PageLocked(page_m));
100204+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
100205+ address_m = address + SEGMEXEC_TASK_SIZE;
100206+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
100207+ pte_m = pte_offset_map(pmd_m, address_m);
100208+ ptl_m = pte_lockptr(mm, pmd_m);
100209+ if (ptl != ptl_m) {
100210+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
100211+ if (!pte_none(*pte_m))
100212+ goto out;
100213+ }
100214+
100215+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
100216+ page_cache_get(page_m);
100217+ page_add_anon_rmap(page_m, vma_m, address_m);
100218+ inc_mm_counter_fast(mm, MM_ANONPAGES);
100219+ set_pte_at(mm, address_m, pte_m, entry_m);
100220+ update_mmu_cache(vma_m, address_m, pte_m);
100221+out:
100222+ if (ptl != ptl_m)
100223+ spin_unlock(ptl_m);
100224+ pte_unmap(pte_m);
100225+ unlock_page(page_m);
100226+}
100227+
100228+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
100229+{
100230+ struct mm_struct *mm = vma->vm_mm;
100231+ unsigned long address_m;
100232+ spinlock_t *ptl_m;
100233+ struct vm_area_struct *vma_m;
100234+ pmd_t *pmd_m;
100235+ pte_t *pte_m, entry_m;
100236+
100237+ BUG_ON(!page_m || PageAnon(page_m));
100238+
100239+ vma_m = pax_find_mirror_vma(vma);
100240+ if (!vma_m)
100241+ return;
100242+
100243+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
100244+ address_m = address + SEGMEXEC_TASK_SIZE;
100245+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
100246+ pte_m = pte_offset_map(pmd_m, address_m);
100247+ ptl_m = pte_lockptr(mm, pmd_m);
100248+ if (ptl != ptl_m) {
100249+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
100250+ if (!pte_none(*pte_m))
100251+ goto out;
100252+ }
100253+
100254+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
100255+ page_cache_get(page_m);
100256+ page_add_file_rmap(page_m);
100257+ inc_mm_counter_fast(mm, MM_FILEPAGES);
100258+ set_pte_at(mm, address_m, pte_m, entry_m);
100259+ update_mmu_cache(vma_m, address_m, pte_m);
100260+out:
100261+ if (ptl != ptl_m)
100262+ spin_unlock(ptl_m);
100263+ pte_unmap(pte_m);
100264+}
100265+
100266+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
100267+{
100268+ struct mm_struct *mm = vma->vm_mm;
100269+ unsigned long address_m;
100270+ spinlock_t *ptl_m;
100271+ struct vm_area_struct *vma_m;
100272+ pmd_t *pmd_m;
100273+ pte_t *pte_m, entry_m;
100274+
100275+ vma_m = pax_find_mirror_vma(vma);
100276+ if (!vma_m)
100277+ return;
100278+
100279+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
100280+ address_m = address + SEGMEXEC_TASK_SIZE;
100281+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
100282+ pte_m = pte_offset_map(pmd_m, address_m);
100283+ ptl_m = pte_lockptr(mm, pmd_m);
100284+ if (ptl != ptl_m) {
100285+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
100286+ if (!pte_none(*pte_m))
100287+ goto out;
100288+ }
100289+
100290+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
100291+ set_pte_at(mm, address_m, pte_m, entry_m);
100292+out:
100293+ if (ptl != ptl_m)
100294+ spin_unlock(ptl_m);
100295+ pte_unmap(pte_m);
100296+}
100297+
100298+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
100299+{
100300+ struct page *page_m;
100301+ pte_t entry;
100302+
100303+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
100304+ goto out;
100305+
100306+ entry = *pte;
100307+ page_m = vm_normal_page(vma, address, entry);
100308+ if (!page_m)
100309+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
100310+ else if (PageAnon(page_m)) {
100311+ if (pax_find_mirror_vma(vma)) {
100312+ pte_unmap_unlock(pte, ptl);
100313+ lock_page(page_m);
100314+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
100315+ if (pte_same(entry, *pte))
100316+ pax_mirror_anon_pte(vma, address, page_m, ptl);
100317+ else
100318+ unlock_page(page_m);
100319+ }
100320+ } else
100321+ pax_mirror_file_pte(vma, address, page_m, ptl);
100322+
100323+out:
100324+ pte_unmap_unlock(pte, ptl);
100325+}
100326+#endif
100327+
100328 /*
100329 * This routine handles present pages, when users try to write
100330 * to a shared page. It is done by copying the page to a new address
100331@@ -2215,6 +2422,12 @@ gotten:
100332 */
100333 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
100334 if (likely(pte_same(*page_table, orig_pte))) {
100335+
100336+#ifdef CONFIG_PAX_SEGMEXEC
100337+ if (pax_find_mirror_vma(vma))
100338+ BUG_ON(!trylock_page(new_page));
100339+#endif
100340+
100341 if (old_page) {
100342 if (!PageAnon(old_page)) {
100343 dec_mm_counter_fast(mm, MM_FILEPAGES);
100344@@ -2266,6 +2479,10 @@ gotten:
100345 page_remove_rmap(old_page);
100346 }
100347
100348+#ifdef CONFIG_PAX_SEGMEXEC
100349+ pax_mirror_anon_pte(vma, address, new_page, ptl);
100350+#endif
100351+
100352 /* Free the old page.. */
100353 new_page = old_page;
100354 ret |= VM_FAULT_WRITE;
100355@@ -2539,6 +2756,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
100356 swap_free(entry);
100357 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
100358 try_to_free_swap(page);
100359+
100360+#ifdef CONFIG_PAX_SEGMEXEC
100361+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
100362+#endif
100363+
100364 unlock_page(page);
100365 if (page != swapcache) {
100366 /*
100367@@ -2562,6 +2784,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
100368
100369 /* No need to invalidate - it was non-present before */
100370 update_mmu_cache(vma, address, page_table);
100371+
100372+#ifdef CONFIG_PAX_SEGMEXEC
100373+ pax_mirror_anon_pte(vma, address, page, ptl);
100374+#endif
100375+
100376 unlock:
100377 pte_unmap_unlock(page_table, ptl);
100378 out:
100379@@ -2581,40 +2808,6 @@ out_release:
100380 }
100381
100382 /*
100383- * This is like a special single-page "expand_{down|up}wards()",
100384- * except we must first make sure that 'address{-|+}PAGE_SIZE'
100385- * doesn't hit another vma.
100386- */
100387-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
100388-{
100389- address &= PAGE_MASK;
100390- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
100391- struct vm_area_struct *prev = vma->vm_prev;
100392-
100393- /*
100394- * Is there a mapping abutting this one below?
100395- *
100396- * That's only ok if it's the same stack mapping
100397- * that has gotten split..
100398- */
100399- if (prev && prev->vm_end == address)
100400- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
100401-
100402- expand_downwards(vma, address - PAGE_SIZE);
100403- }
100404- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
100405- struct vm_area_struct *next = vma->vm_next;
100406-
100407- /* As VM_GROWSDOWN but s/below/above/ */
100408- if (next && next->vm_start == address + PAGE_SIZE)
100409- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
100410-
100411- expand_upwards(vma, address + PAGE_SIZE);
100412- }
100413- return 0;
100414-}
100415-
100416-/*
100417 * We enter with non-exclusive mmap_sem (to exclude vma changes,
100418 * but allow concurrent faults), and pte mapped but not yet locked.
100419 * We return with mmap_sem still held, but pte unmapped and unlocked.
100420@@ -2623,27 +2816,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
100421 unsigned long address, pte_t *page_table, pmd_t *pmd,
100422 unsigned int flags)
100423 {
100424- struct page *page;
100425+ struct page *page = NULL;
100426 spinlock_t *ptl;
100427 pte_t entry;
100428
100429- pte_unmap(page_table);
100430-
100431- /* Check if we need to add a guard page to the stack */
100432- if (check_stack_guard_page(vma, address) < 0)
100433- return VM_FAULT_SIGBUS;
100434-
100435- /* Use the zero-page for reads */
100436 if (!(flags & FAULT_FLAG_WRITE)) {
100437 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
100438 vma->vm_page_prot));
100439- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
100440+ ptl = pte_lockptr(mm, pmd);
100441+ spin_lock(ptl);
100442 if (!pte_none(*page_table))
100443 goto unlock;
100444 goto setpte;
100445 }
100446
100447 /* Allocate our own private page. */
100448+ pte_unmap(page_table);
100449+
100450 if (unlikely(anon_vma_prepare(vma)))
100451 goto oom;
100452 page = alloc_zeroed_user_highpage_movable(vma, address);
100453@@ -2667,6 +2856,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
100454 if (!pte_none(*page_table))
100455 goto release;
100456
100457+#ifdef CONFIG_PAX_SEGMEXEC
100458+ if (pax_find_mirror_vma(vma))
100459+ BUG_ON(!trylock_page(page));
100460+#endif
100461+
100462 inc_mm_counter_fast(mm, MM_ANONPAGES);
100463 page_add_new_anon_rmap(page, vma, address);
100464 setpte:
100465@@ -2674,6 +2868,12 @@ setpte:
100466
100467 /* No need to invalidate - it was non-present before */
100468 update_mmu_cache(vma, address, page_table);
100469+
100470+#ifdef CONFIG_PAX_SEGMEXEC
100471+ if (page)
100472+ pax_mirror_anon_pte(vma, address, page, ptl);
100473+#endif
100474+
100475 unlock:
100476 pte_unmap_unlock(page_table, ptl);
100477 return 0;
100478@@ -2905,6 +3105,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
100479 return ret;
100480 }
100481 do_set_pte(vma, address, fault_page, pte, false, false);
100482+
100483+#ifdef CONFIG_PAX_SEGMEXEC
100484+ pax_mirror_file_pte(vma, address, fault_page, ptl);
100485+#endif
100486+
100487 unlock_page(fault_page);
100488 unlock_out:
100489 pte_unmap_unlock(pte, ptl);
100490@@ -2946,7 +3151,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
100491 page_cache_release(fault_page);
100492 goto uncharge_out;
100493 }
100494+
100495+#ifdef CONFIG_PAX_SEGMEXEC
100496+ if (pax_find_mirror_vma(vma))
100497+ BUG_ON(!trylock_page(new_page));
100498+#endif
100499+
100500 do_set_pte(vma, address, new_page, pte, true, true);
100501+
100502+#ifdef CONFIG_PAX_SEGMEXEC
100503+ pax_mirror_anon_pte(vma, address, new_page, ptl);
100504+#endif
100505+
100506 pte_unmap_unlock(pte, ptl);
100507 unlock_page(fault_page);
100508 page_cache_release(fault_page);
100509@@ -2994,6 +3210,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
100510 return ret;
100511 }
100512 do_set_pte(vma, address, fault_page, pte, true, false);
100513+
100514+#ifdef CONFIG_PAX_SEGMEXEC
100515+ pax_mirror_file_pte(vma, address, fault_page, ptl);
100516+#endif
100517+
100518 pte_unmap_unlock(pte, ptl);
100519
100520 if (set_page_dirty(fault_page))
100521@@ -3224,6 +3445,12 @@ static int handle_pte_fault(struct mm_struct *mm,
100522 if (flags & FAULT_FLAG_WRITE)
100523 flush_tlb_fix_spurious_fault(vma, address);
100524 }
100525+
100526+#ifdef CONFIG_PAX_SEGMEXEC
100527+ pax_mirror_pte(vma, address, pte, pmd, ptl);
100528+ return 0;
100529+#endif
100530+
100531 unlock:
100532 pte_unmap_unlock(pte, ptl);
100533 return 0;
100534@@ -3240,9 +3467,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
100535 pmd_t *pmd;
100536 pte_t *pte;
100537
100538+#ifdef CONFIG_PAX_SEGMEXEC
100539+ struct vm_area_struct *vma_m;
100540+#endif
100541+
100542 if (unlikely(is_vm_hugetlb_page(vma)))
100543 return hugetlb_fault(mm, vma, address, flags);
100544
100545+#ifdef CONFIG_PAX_SEGMEXEC
100546+ vma_m = pax_find_mirror_vma(vma);
100547+ if (vma_m) {
100548+ unsigned long address_m;
100549+ pgd_t *pgd_m;
100550+ pud_t *pud_m;
100551+ pmd_t *pmd_m;
100552+
100553+ if (vma->vm_start > vma_m->vm_start) {
100554+ address_m = address;
100555+ address -= SEGMEXEC_TASK_SIZE;
100556+ vma = vma_m;
100557+ } else
100558+ address_m = address + SEGMEXEC_TASK_SIZE;
100559+
100560+ pgd_m = pgd_offset(mm, address_m);
100561+ pud_m = pud_alloc(mm, pgd_m, address_m);
100562+ if (!pud_m)
100563+ return VM_FAULT_OOM;
100564+ pmd_m = pmd_alloc(mm, pud_m, address_m);
100565+ if (!pmd_m)
100566+ return VM_FAULT_OOM;
100567+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
100568+ return VM_FAULT_OOM;
100569+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
100570+ }
100571+#endif
100572+
100573 pgd = pgd_offset(mm, address);
100574 pud = pud_alloc(mm, pgd, address);
100575 if (!pud)
100576@@ -3370,6 +3629,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
100577 spin_unlock(&mm->page_table_lock);
100578 return 0;
100579 }
100580+
100581+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
100582+{
100583+ pud_t *new = pud_alloc_one(mm, address);
100584+ if (!new)
100585+ return -ENOMEM;
100586+
100587+ smp_wmb(); /* See comment in __pte_alloc */
100588+
100589+ spin_lock(&mm->page_table_lock);
100590+ if (pgd_present(*pgd)) /* Another has populated it */
100591+ pud_free(mm, new);
100592+ else
100593+ pgd_populate_kernel(mm, pgd, new);
100594+ spin_unlock(&mm->page_table_lock);
100595+ return 0;
100596+}
100597 #endif /* __PAGETABLE_PUD_FOLDED */
100598
100599 #ifndef __PAGETABLE_PMD_FOLDED
100600@@ -3400,6 +3676,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
100601 spin_unlock(&mm->page_table_lock);
100602 return 0;
100603 }
100604+
100605+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
100606+{
100607+ pmd_t *new = pmd_alloc_one(mm, address);
100608+ if (!new)
100609+ return -ENOMEM;
100610+
100611+ smp_wmb(); /* See comment in __pte_alloc */
100612+
100613+ spin_lock(&mm->page_table_lock);
100614+#ifndef __ARCH_HAS_4LEVEL_HACK
100615+ if (pud_present(*pud)) /* Another has populated it */
100616+ pmd_free(mm, new);
100617+ else
100618+ pud_populate_kernel(mm, pud, new);
100619+#else
100620+ if (pgd_present(*pud)) /* Another has populated it */
100621+ pmd_free(mm, new);
100622+ else
100623+ pgd_populate_kernel(mm, pud, new);
100624+#endif /* __ARCH_HAS_4LEVEL_HACK */
100625+ spin_unlock(&mm->page_table_lock);
100626+ return 0;
100627+}
100628 #endif /* __PAGETABLE_PMD_FOLDED */
100629
100630 #if !defined(__HAVE_ARCH_GATE_AREA)
100631@@ -3413,7 +3713,7 @@ static int __init gate_vma_init(void)
100632 gate_vma.vm_start = FIXADDR_USER_START;
100633 gate_vma.vm_end = FIXADDR_USER_END;
100634 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
100635- gate_vma.vm_page_prot = __P101;
100636+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
100637
100638 return 0;
100639 }
100640@@ -3547,8 +3847,8 @@ out:
100641 return ret;
100642 }
100643
100644-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
100645- void *buf, int len, int write)
100646+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
100647+ void *buf, size_t len, int write)
100648 {
100649 resource_size_t phys_addr;
100650 unsigned long prot = 0;
100651@@ -3574,8 +3874,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
100652 * Access another process' address space as given in mm. If non-NULL, use the
100653 * given task for page fault accounting.
100654 */
100655-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
100656- unsigned long addr, void *buf, int len, int write)
100657+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
100658+ unsigned long addr, void *buf, size_t len, int write)
100659 {
100660 struct vm_area_struct *vma;
100661 void *old_buf = buf;
100662@@ -3583,7 +3883,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
100663 down_read(&mm->mmap_sem);
100664 /* ignore errors, just check how much was successfully transferred */
100665 while (len) {
100666- int bytes, ret, offset;
100667+ ssize_t bytes, ret, offset;
100668 void *maddr;
100669 struct page *page = NULL;
100670
100671@@ -3642,8 +3942,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
100672 *
100673 * The caller must hold a reference on @mm.
100674 */
100675-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
100676- void *buf, int len, int write)
100677+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
100678+ void *buf, size_t len, int write)
100679 {
100680 return __access_remote_vm(NULL, mm, addr, buf, len, write);
100681 }
100682@@ -3653,11 +3953,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
100683 * Source/target buffer must be kernel space,
100684 * Do not walk the page table directly, use get_user_pages
100685 */
100686-int access_process_vm(struct task_struct *tsk, unsigned long addr,
100687- void *buf, int len, int write)
100688+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
100689+ void *buf, size_t len, int write)
100690 {
100691 struct mm_struct *mm;
100692- int ret;
100693+ ssize_t ret;
100694
100695 mm = get_task_mm(tsk);
100696 if (!mm)
100697diff --git a/mm/mempolicy.c b/mm/mempolicy.c
100698index 8f5330d..b41914b 100644
100699--- a/mm/mempolicy.c
100700+++ b/mm/mempolicy.c
100701@@ -750,6 +750,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
100702 unsigned long vmstart;
100703 unsigned long vmend;
100704
100705+#ifdef CONFIG_PAX_SEGMEXEC
100706+ struct vm_area_struct *vma_m;
100707+#endif
100708+
100709 vma = find_vma(mm, start);
100710 if (!vma || vma->vm_start > start)
100711 return -EFAULT;
100712@@ -793,6 +797,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
100713 err = vma_replace_policy(vma, new_pol);
100714 if (err)
100715 goto out;
100716+
100717+#ifdef CONFIG_PAX_SEGMEXEC
100718+ vma_m = pax_find_mirror_vma(vma);
100719+ if (vma_m) {
100720+ err = vma_replace_policy(vma_m, new_pol);
100721+ if (err)
100722+ goto out;
100723+ }
100724+#endif
100725+
100726 }
100727
100728 out:
100729@@ -1225,6 +1239,17 @@ static long do_mbind(unsigned long start, unsigned long len,
100730
100731 if (end < start)
100732 return -EINVAL;
100733+
100734+#ifdef CONFIG_PAX_SEGMEXEC
100735+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
100736+ if (end > SEGMEXEC_TASK_SIZE)
100737+ return -EINVAL;
100738+ } else
100739+#endif
100740+
100741+ if (end > TASK_SIZE)
100742+ return -EINVAL;
100743+
100744 if (end == start)
100745 return 0;
100746
100747@@ -1450,8 +1475,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
100748 */
100749 tcred = __task_cred(task);
100750 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
100751- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
100752- !capable(CAP_SYS_NICE)) {
100753+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
100754 rcu_read_unlock();
100755 err = -EPERM;
100756 goto out_put;
100757@@ -1482,6 +1506,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
100758 goto out;
100759 }
100760
100761+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
100762+ if (mm != current->mm &&
100763+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
100764+ mmput(mm);
100765+ err = -EPERM;
100766+ goto out;
100767+ }
100768+#endif
100769+
100770 err = do_migrate_pages(mm, old, new,
100771 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
100772
100773diff --git a/mm/migrate.c b/mm/migrate.c
100774index be6dbf9..febb8ec 100644
100775--- a/mm/migrate.c
100776+++ b/mm/migrate.c
100777@@ -1506,8 +1506,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
100778 */
100779 tcred = __task_cred(task);
100780 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
100781- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
100782- !capable(CAP_SYS_NICE)) {
100783+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
100784 rcu_read_unlock();
100785 err = -EPERM;
100786 goto out;
100787diff --git a/mm/mlock.c b/mm/mlock.c
100788index b1eb536..091d154 100644
100789--- a/mm/mlock.c
100790+++ b/mm/mlock.c
100791@@ -14,6 +14,7 @@
100792 #include <linux/pagevec.h>
100793 #include <linux/mempolicy.h>
100794 #include <linux/syscalls.h>
100795+#include <linux/security.h>
100796 #include <linux/sched.h>
100797 #include <linux/export.h>
100798 #include <linux/rmap.h>
100799@@ -606,7 +607,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
100800 {
100801 unsigned long nstart, end, tmp;
100802 struct vm_area_struct * vma, * prev;
100803- int error;
100804+ int error = 0;
100805
100806 VM_BUG_ON(start & ~PAGE_MASK);
100807 VM_BUG_ON(len != PAGE_ALIGN(len));
100808@@ -615,6 +616,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
100809 return -EINVAL;
100810 if (end == start)
100811 return 0;
100812+ if (end > TASK_SIZE)
100813+ return -EINVAL;
100814+
100815 vma = find_vma(current->mm, start);
100816 if (!vma || vma->vm_start > start)
100817 return -ENOMEM;
100818@@ -626,6 +630,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
100819 for (nstart = start ; ; ) {
100820 vm_flags_t newflags;
100821
100822+#ifdef CONFIG_PAX_SEGMEXEC
100823+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
100824+ break;
100825+#endif
100826+
100827 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
100828
100829 newflags = vma->vm_flags & ~VM_LOCKED;
100830@@ -739,6 +748,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
100831 locked += current->mm->locked_vm;
100832
100833 /* check against resource limits */
100834+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
100835 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
100836 error = do_mlock(start, len, 1);
100837
100838@@ -776,6 +786,11 @@ static int do_mlockall(int flags)
100839 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
100840 vm_flags_t newflags;
100841
100842+#ifdef CONFIG_PAX_SEGMEXEC
100843+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
100844+ break;
100845+#endif
100846+
100847 newflags = vma->vm_flags & ~VM_LOCKED;
100848 if (flags & MCL_CURRENT)
100849 newflags |= VM_LOCKED;
100850@@ -807,8 +822,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
100851 lock_limit >>= PAGE_SHIFT;
100852
100853 ret = -ENOMEM;
100854+
100855+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
100856+
100857 down_write(&current->mm->mmap_sem);
100858-
100859 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
100860 capable(CAP_IPC_LOCK))
100861 ret = do_mlockall(flags);
100862diff --git a/mm/mmap.c b/mm/mmap.c
100863index 129b847..fbed804 100644
100864--- a/mm/mmap.c
100865+++ b/mm/mmap.c
100866@@ -40,6 +40,7 @@
100867 #include <linux/notifier.h>
100868 #include <linux/memory.h>
100869 #include <linux/printk.h>
100870+#include <linux/random.h>
100871
100872 #include <asm/uaccess.h>
100873 #include <asm/cacheflush.h>
100874@@ -56,6 +57,16 @@
100875 #define arch_rebalance_pgtables(addr, len) (addr)
100876 #endif
100877
100878+static inline void verify_mm_writelocked(struct mm_struct *mm)
100879+{
100880+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
100881+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
100882+ up_read(&mm->mmap_sem);
100883+ BUG();
100884+ }
100885+#endif
100886+}
100887+
100888 static void unmap_region(struct mm_struct *mm,
100889 struct vm_area_struct *vma, struct vm_area_struct *prev,
100890 unsigned long start, unsigned long end);
100891@@ -75,16 +86,25 @@ static void unmap_region(struct mm_struct *mm,
100892 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
100893 *
100894 */
100895-pgprot_t protection_map[16] = {
100896+pgprot_t protection_map[16] __read_only = {
100897 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
100898 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
100899 };
100900
100901-pgprot_t vm_get_page_prot(unsigned long vm_flags)
100902+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
100903 {
100904- return __pgprot(pgprot_val(protection_map[vm_flags &
100905+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
100906 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
100907 pgprot_val(arch_vm_get_page_prot(vm_flags)));
100908+
100909+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
100910+ if (!(__supported_pte_mask & _PAGE_NX) &&
100911+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
100912+ (vm_flags & (VM_READ | VM_WRITE)))
100913+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
100914+#endif
100915+
100916+ return prot;
100917 }
100918 EXPORT_SYMBOL(vm_get_page_prot);
100919
100920@@ -94,6 +114,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
100921 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
100922 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
100923 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
100924+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
100925 /*
100926 * Make sure vm_committed_as in one cacheline and not cacheline shared with
100927 * other variables. It can be updated by several CPUs frequently.
100928@@ -250,6 +271,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
100929 struct vm_area_struct *next = vma->vm_next;
100930
100931 might_sleep();
100932+ BUG_ON(vma->vm_mirror);
100933 if (vma->vm_ops && vma->vm_ops->close)
100934 vma->vm_ops->close(vma);
100935 if (vma->vm_file)
100936@@ -294,6 +316,12 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
100937 * not page aligned -Ram Gupta
100938 */
100939 rlim = rlimit(RLIMIT_DATA);
100940+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
100941+ /* force a minimum 16MB brk heap on setuid/setgid binaries */
100942+ if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid()))
100943+ rlim = 4096 * PAGE_SIZE;
100944+#endif
100945+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
100946 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
100947 (mm->end_data - mm->start_data) > rlim)
100948 goto out;
100949@@ -944,6 +972,12 @@ static int
100950 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
100951 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
100952 {
100953+
100954+#ifdef CONFIG_PAX_SEGMEXEC
100955+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
100956+ return 0;
100957+#endif
100958+
100959 if (is_mergeable_vma(vma, file, vm_flags) &&
100960 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
100961 if (vma->vm_pgoff == vm_pgoff)
100962@@ -963,6 +997,12 @@ static int
100963 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
100964 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
100965 {
100966+
100967+#ifdef CONFIG_PAX_SEGMEXEC
100968+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
100969+ return 0;
100970+#endif
100971+
100972 if (is_mergeable_vma(vma, file, vm_flags) &&
100973 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
100974 pgoff_t vm_pglen;
100975@@ -1005,13 +1045,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
100976 struct vm_area_struct *vma_merge(struct mm_struct *mm,
100977 struct vm_area_struct *prev, unsigned long addr,
100978 unsigned long end, unsigned long vm_flags,
100979- struct anon_vma *anon_vma, struct file *file,
100980+ struct anon_vma *anon_vma, struct file *file,
100981 pgoff_t pgoff, struct mempolicy *policy)
100982 {
100983 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
100984 struct vm_area_struct *area, *next;
100985 int err;
100986
100987+#ifdef CONFIG_PAX_SEGMEXEC
100988+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
100989+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
100990+
100991+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
100992+#endif
100993+
100994 /*
100995 * We later require that vma->vm_flags == vm_flags,
100996 * so this tests vma->vm_flags & VM_SPECIAL, too.
100997@@ -1027,6 +1074,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
100998 if (next && next->vm_end == end) /* cases 6, 7, 8 */
100999 next = next->vm_next;
101000
101001+#ifdef CONFIG_PAX_SEGMEXEC
101002+ if (prev)
101003+ prev_m = pax_find_mirror_vma(prev);
101004+ if (area)
101005+ area_m = pax_find_mirror_vma(area);
101006+ if (next)
101007+ next_m = pax_find_mirror_vma(next);
101008+#endif
101009+
101010 /*
101011 * Can it merge with the predecessor?
101012 */
101013@@ -1046,9 +1102,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
101014 /* cases 1, 6 */
101015 err = vma_adjust(prev, prev->vm_start,
101016 next->vm_end, prev->vm_pgoff, NULL);
101017- } else /* cases 2, 5, 7 */
101018+
101019+#ifdef CONFIG_PAX_SEGMEXEC
101020+ if (!err && prev_m)
101021+ err = vma_adjust(prev_m, prev_m->vm_start,
101022+ next_m->vm_end, prev_m->vm_pgoff, NULL);
101023+#endif
101024+
101025+ } else { /* cases 2, 5, 7 */
101026 err = vma_adjust(prev, prev->vm_start,
101027 end, prev->vm_pgoff, NULL);
101028+
101029+#ifdef CONFIG_PAX_SEGMEXEC
101030+ if (!err && prev_m)
101031+ err = vma_adjust(prev_m, prev_m->vm_start,
101032+ end_m, prev_m->vm_pgoff, NULL);
101033+#endif
101034+
101035+ }
101036 if (err)
101037 return NULL;
101038 khugepaged_enter_vma_merge(prev);
101039@@ -1062,12 +1133,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
101040 mpol_equal(policy, vma_policy(next)) &&
101041 can_vma_merge_before(next, vm_flags,
101042 anon_vma, file, pgoff+pglen)) {
101043- if (prev && addr < prev->vm_end) /* case 4 */
101044+ if (prev && addr < prev->vm_end) { /* case 4 */
101045 err = vma_adjust(prev, prev->vm_start,
101046 addr, prev->vm_pgoff, NULL);
101047- else /* cases 3, 8 */
101048+
101049+#ifdef CONFIG_PAX_SEGMEXEC
101050+ if (!err && prev_m)
101051+ err = vma_adjust(prev_m, prev_m->vm_start,
101052+ addr_m, prev_m->vm_pgoff, NULL);
101053+#endif
101054+
101055+ } else { /* cases 3, 8 */
101056 err = vma_adjust(area, addr, next->vm_end,
101057 next->vm_pgoff - pglen, NULL);
101058+
101059+#ifdef CONFIG_PAX_SEGMEXEC
101060+ if (!err && area_m)
101061+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
101062+ next_m->vm_pgoff - pglen, NULL);
101063+#endif
101064+
101065+ }
101066 if (err)
101067 return NULL;
101068 khugepaged_enter_vma_merge(area);
101069@@ -1176,8 +1262,10 @@ none:
101070 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
101071 struct file *file, long pages)
101072 {
101073- const unsigned long stack_flags
101074- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
101075+
101076+#ifdef CONFIG_PAX_RANDMMAP
101077+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
101078+#endif
101079
101080 mm->total_vm += pages;
101081
101082@@ -1185,7 +1273,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
101083 mm->shared_vm += pages;
101084 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
101085 mm->exec_vm += pages;
101086- } else if (flags & stack_flags)
101087+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
101088 mm->stack_vm += pages;
101089 }
101090 #endif /* CONFIG_PROC_FS */
101091@@ -1215,6 +1303,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
101092 locked += mm->locked_vm;
101093 lock_limit = rlimit(RLIMIT_MEMLOCK);
101094 lock_limit >>= PAGE_SHIFT;
101095+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
101096 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
101097 return -EAGAIN;
101098 }
101099@@ -1241,7 +1330,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
101100 * (the exception is when the underlying filesystem is noexec
101101 * mounted, in which case we dont add PROT_EXEC.)
101102 */
101103- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
101104+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
101105 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
101106 prot |= PROT_EXEC;
101107
101108@@ -1267,7 +1356,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
101109 /* Obtain the address to map to. we verify (or select) it and ensure
101110 * that it represents a valid section of the address space.
101111 */
101112- addr = get_unmapped_area(file, addr, len, pgoff, flags);
101113+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
101114 if (addr & ~PAGE_MASK)
101115 return addr;
101116
101117@@ -1278,6 +1367,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
101118 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
101119 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
101120
101121+#ifdef CONFIG_PAX_MPROTECT
101122+ if (mm->pax_flags & MF_PAX_MPROTECT) {
101123+
101124+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
101125+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
101126+ mm->binfmt->handle_mmap)
101127+ mm->binfmt->handle_mmap(file);
101128+#endif
101129+
101130+#ifndef CONFIG_PAX_MPROTECT_COMPAT
101131+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
101132+ gr_log_rwxmmap(file);
101133+
101134+#ifdef CONFIG_PAX_EMUPLT
101135+ vm_flags &= ~VM_EXEC;
101136+#else
101137+ return -EPERM;
101138+#endif
101139+
101140+ }
101141+
101142+ if (!(vm_flags & VM_EXEC))
101143+ vm_flags &= ~VM_MAYEXEC;
101144+#else
101145+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
101146+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
101147+#endif
101148+ else
101149+ vm_flags &= ~VM_MAYWRITE;
101150+ }
101151+#endif
101152+
101153+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
101154+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
101155+ vm_flags &= ~VM_PAGEEXEC;
101156+#endif
101157+
101158 if (flags & MAP_LOCKED)
101159 if (!can_do_mlock())
101160 return -EPERM;
101161@@ -1365,6 +1491,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
101162 vm_flags |= VM_NORESERVE;
101163 }
101164
101165+ if (!gr_acl_handle_mmap(file, prot))
101166+ return -EACCES;
101167+
101168 addr = mmap_region(file, addr, len, vm_flags, pgoff);
101169 if (!IS_ERR_VALUE(addr) &&
101170 ((vm_flags & VM_LOCKED) ||
101171@@ -1458,7 +1587,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
101172 vm_flags_t vm_flags = vma->vm_flags;
101173
101174 /* If it was private or non-writable, the write bit is already clear */
101175- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
101176+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
101177 return 0;
101178
101179 /* The backer wishes to know when pages are first written to? */
101180@@ -1504,7 +1633,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
101181 struct rb_node **rb_link, *rb_parent;
101182 unsigned long charged = 0;
101183
101184+#ifdef CONFIG_PAX_SEGMEXEC
101185+ struct vm_area_struct *vma_m = NULL;
101186+#endif
101187+
101188+ /*
101189+ * mm->mmap_sem is required to protect against another thread
101190+ * changing the mappings in case we sleep.
101191+ */
101192+ verify_mm_writelocked(mm);
101193+
101194 /* Check against address space limit. */
101195+
101196+#ifdef CONFIG_PAX_RANDMMAP
101197+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
101198+#endif
101199+
101200 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
101201 unsigned long nr_pages;
101202
101203@@ -1523,11 +1667,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
101204
101205 /* Clear old maps */
101206 error = -ENOMEM;
101207-munmap_back:
101208 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
101209 if (do_munmap(mm, addr, len))
101210 return -ENOMEM;
101211- goto munmap_back;
101212+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
101213 }
101214
101215 /*
101216@@ -1558,6 +1701,16 @@ munmap_back:
101217 goto unacct_error;
101218 }
101219
101220+#ifdef CONFIG_PAX_SEGMEXEC
101221+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
101222+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
101223+ if (!vma_m) {
101224+ error = -ENOMEM;
101225+ goto free_vma;
101226+ }
101227+ }
101228+#endif
101229+
101230 vma->vm_mm = mm;
101231 vma->vm_start = addr;
101232 vma->vm_end = addr + len;
101233@@ -1577,6 +1730,13 @@ munmap_back:
101234 if (error)
101235 goto unmap_and_free_vma;
101236
101237+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
101238+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
101239+ vma->vm_flags |= VM_PAGEEXEC;
101240+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
101241+ }
101242+#endif
101243+
101244 /* Can addr have changed??
101245 *
101246 * Answer: Yes, several device drivers can do it in their
101247@@ -1610,6 +1770,12 @@ munmap_back:
101248 }
101249
101250 vma_link(mm, vma, prev, rb_link, rb_parent);
101251+
101252+#ifdef CONFIG_PAX_SEGMEXEC
101253+ if (vma_m)
101254+ BUG_ON(pax_mirror_vma(vma_m, vma));
101255+#endif
101256+
101257 /* Once vma denies write, undo our temporary denial count */
101258 if (vm_flags & VM_DENYWRITE)
101259 allow_write_access(file);
101260@@ -1618,6 +1784,7 @@ out:
101261 perf_event_mmap(vma);
101262
101263 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
101264+ track_exec_limit(mm, addr, addr + len, vm_flags);
101265 if (vm_flags & VM_LOCKED) {
101266 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
101267 vma == get_gate_vma(current->mm)))
101268@@ -1650,6 +1817,12 @@ unmap_and_free_vma:
101269 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
101270 charged = 0;
101271 free_vma:
101272+
101273+#ifdef CONFIG_PAX_SEGMEXEC
101274+ if (vma_m)
101275+ kmem_cache_free(vm_area_cachep, vma_m);
101276+#endif
101277+
101278 kmem_cache_free(vm_area_cachep, vma);
101279 unacct_error:
101280 if (charged)
101281@@ -1657,7 +1830,63 @@ unacct_error:
101282 return error;
101283 }
101284
101285-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
101286+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
101287+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
101288+{
101289+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
101290+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
101291+
101292+ return 0;
101293+}
101294+#endif
101295+
101296+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
101297+{
101298+ if (!vma) {
101299+#ifdef CONFIG_STACK_GROWSUP
101300+ if (addr > sysctl_heap_stack_gap)
101301+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
101302+ else
101303+ vma = find_vma(current->mm, 0);
101304+ if (vma && (vma->vm_flags & VM_GROWSUP))
101305+ return false;
101306+#endif
101307+ return true;
101308+ }
101309+
101310+ if (addr + len > vma->vm_start)
101311+ return false;
101312+
101313+ if (vma->vm_flags & VM_GROWSDOWN)
101314+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
101315+#ifdef CONFIG_STACK_GROWSUP
101316+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
101317+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
101318+#endif
101319+ else if (offset)
101320+ return offset <= vma->vm_start - addr - len;
101321+
101322+ return true;
101323+}
101324+
101325+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
101326+{
101327+ if (vma->vm_start < len)
101328+ return -ENOMEM;
101329+
101330+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
101331+ if (offset <= vma->vm_start - len)
101332+ return vma->vm_start - len - offset;
101333+ else
101334+ return -ENOMEM;
101335+ }
101336+
101337+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
101338+ return vma->vm_start - len - sysctl_heap_stack_gap;
101339+ return -ENOMEM;
101340+}
101341+
101342+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
101343 {
101344 /*
101345 * We implement the search by looking for an rbtree node that
101346@@ -1705,11 +1934,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
101347 }
101348 }
101349
101350- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
101351+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
101352 check_current:
101353 /* Check if current node has a suitable gap */
101354 if (gap_start > high_limit)
101355 return -ENOMEM;
101356+
101357+ if (gap_end - gap_start > info->threadstack_offset)
101358+ gap_start += info->threadstack_offset;
101359+ else
101360+ gap_start = gap_end;
101361+
101362+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
101363+ if (gap_end - gap_start > sysctl_heap_stack_gap)
101364+ gap_start += sysctl_heap_stack_gap;
101365+ else
101366+ gap_start = gap_end;
101367+ }
101368+ if (vma->vm_flags & VM_GROWSDOWN) {
101369+ if (gap_end - gap_start > sysctl_heap_stack_gap)
101370+ gap_end -= sysctl_heap_stack_gap;
101371+ else
101372+ gap_end = gap_start;
101373+ }
101374 if (gap_end >= low_limit && gap_end - gap_start >= length)
101375 goto found;
101376
101377@@ -1759,7 +2006,7 @@ found:
101378 return gap_start;
101379 }
101380
101381-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
101382+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
101383 {
101384 struct mm_struct *mm = current->mm;
101385 struct vm_area_struct *vma;
101386@@ -1813,6 +2060,24 @@ check_current:
101387 gap_end = vma->vm_start;
101388 if (gap_end < low_limit)
101389 return -ENOMEM;
101390+
101391+ if (gap_end - gap_start > info->threadstack_offset)
101392+ gap_end -= info->threadstack_offset;
101393+ else
101394+ gap_end = gap_start;
101395+
101396+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
101397+ if (gap_end - gap_start > sysctl_heap_stack_gap)
101398+ gap_start += sysctl_heap_stack_gap;
101399+ else
101400+ gap_start = gap_end;
101401+ }
101402+ if (vma->vm_flags & VM_GROWSDOWN) {
101403+ if (gap_end - gap_start > sysctl_heap_stack_gap)
101404+ gap_end -= sysctl_heap_stack_gap;
101405+ else
101406+ gap_end = gap_start;
101407+ }
101408 if (gap_start <= high_limit && gap_end - gap_start >= length)
101409 goto found;
101410
101411@@ -1876,6 +2141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
101412 struct mm_struct *mm = current->mm;
101413 struct vm_area_struct *vma;
101414 struct vm_unmapped_area_info info;
101415+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
101416
101417 if (len > TASK_SIZE - mmap_min_addr)
101418 return -ENOMEM;
101419@@ -1883,11 +2149,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
101420 if (flags & MAP_FIXED)
101421 return addr;
101422
101423+#ifdef CONFIG_PAX_RANDMMAP
101424+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
101425+#endif
101426+
101427 if (addr) {
101428 addr = PAGE_ALIGN(addr);
101429 vma = find_vma(mm, addr);
101430 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
101431- (!vma || addr + len <= vma->vm_start))
101432+ check_heap_stack_gap(vma, addr, len, offset))
101433 return addr;
101434 }
101435
101436@@ -1896,6 +2166,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
101437 info.low_limit = mm->mmap_base;
101438 info.high_limit = TASK_SIZE;
101439 info.align_mask = 0;
101440+ info.threadstack_offset = offset;
101441 return vm_unmapped_area(&info);
101442 }
101443 #endif
101444@@ -1914,6 +2185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
101445 struct mm_struct *mm = current->mm;
101446 unsigned long addr = addr0;
101447 struct vm_unmapped_area_info info;
101448+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
101449
101450 /* requested length too big for entire address space */
101451 if (len > TASK_SIZE - mmap_min_addr)
101452@@ -1922,12 +2194,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
101453 if (flags & MAP_FIXED)
101454 return addr;
101455
101456+#ifdef CONFIG_PAX_RANDMMAP
101457+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
101458+#endif
101459+
101460 /* requesting a specific address */
101461 if (addr) {
101462 addr = PAGE_ALIGN(addr);
101463 vma = find_vma(mm, addr);
101464 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
101465- (!vma || addr + len <= vma->vm_start))
101466+ check_heap_stack_gap(vma, addr, len, offset))
101467 return addr;
101468 }
101469
101470@@ -1936,6 +2212,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
101471 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
101472 info.high_limit = mm->mmap_base;
101473 info.align_mask = 0;
101474+ info.threadstack_offset = offset;
101475 addr = vm_unmapped_area(&info);
101476
101477 /*
101478@@ -1948,6 +2225,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
101479 VM_BUG_ON(addr != -ENOMEM);
101480 info.flags = 0;
101481 info.low_limit = TASK_UNMAPPED_BASE;
101482+
101483+#ifdef CONFIG_PAX_RANDMMAP
101484+ if (mm->pax_flags & MF_PAX_RANDMMAP)
101485+ info.low_limit += mm->delta_mmap;
101486+#endif
101487+
101488 info.high_limit = TASK_SIZE;
101489 addr = vm_unmapped_area(&info);
101490 }
101491@@ -2048,6 +2331,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
101492 return vma;
101493 }
101494
101495+#ifdef CONFIG_PAX_SEGMEXEC
101496+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
101497+{
101498+ struct vm_area_struct *vma_m;
101499+
101500+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
101501+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
101502+ BUG_ON(vma->vm_mirror);
101503+ return NULL;
101504+ }
101505+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
101506+ vma_m = vma->vm_mirror;
101507+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
101508+ BUG_ON(vma->vm_file != vma_m->vm_file);
101509+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
101510+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
101511+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
101512+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
101513+ return vma_m;
101514+}
101515+#endif
101516+
101517 /*
101518 * Verify that the stack growth is acceptable and
101519 * update accounting. This is shared with both the
101520@@ -2064,6 +2369,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
101521 return -ENOMEM;
101522
101523 /* Stack limit test */
101524+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
101525 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
101526 return -ENOMEM;
101527
101528@@ -2074,6 +2380,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
101529 locked = mm->locked_vm + grow;
101530 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
101531 limit >>= PAGE_SHIFT;
101532+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
101533 if (locked > limit && !capable(CAP_IPC_LOCK))
101534 return -ENOMEM;
101535 }
101536@@ -2103,37 +2410,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
101537 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
101538 * vma is the last one with address > vma->vm_end. Have to extend vma.
101539 */
101540+#ifndef CONFIG_IA64
101541+static
101542+#endif
101543 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
101544 {
101545 int error;
101546+ bool locknext;
101547
101548 if (!(vma->vm_flags & VM_GROWSUP))
101549 return -EFAULT;
101550
101551+ /* Also guard against wrapping around to address 0. */
101552+ if (address < PAGE_ALIGN(address+1))
101553+ address = PAGE_ALIGN(address+1);
101554+ else
101555+ return -ENOMEM;
101556+
101557 /*
101558 * We must make sure the anon_vma is allocated
101559 * so that the anon_vma locking is not a noop.
101560 */
101561 if (unlikely(anon_vma_prepare(vma)))
101562 return -ENOMEM;
101563+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
101564+ if (locknext && anon_vma_prepare(vma->vm_next))
101565+ return -ENOMEM;
101566 vma_lock_anon_vma(vma);
101567+ if (locknext)
101568+ vma_lock_anon_vma(vma->vm_next);
101569
101570 /*
101571 * vma->vm_start/vm_end cannot change under us because the caller
101572 * is required to hold the mmap_sem in read mode. We need the
101573- * anon_vma lock to serialize against concurrent expand_stacks.
101574- * Also guard against wrapping around to address 0.
101575+ * anon_vma locks to serialize against concurrent expand_stacks
101576+ * and expand_upwards.
101577 */
101578- if (address < PAGE_ALIGN(address+4))
101579- address = PAGE_ALIGN(address+4);
101580- else {
101581- vma_unlock_anon_vma(vma);
101582- return -ENOMEM;
101583- }
101584 error = 0;
101585
101586 /* Somebody else might have raced and expanded it already */
101587- if (address > vma->vm_end) {
101588+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
101589+ error = -ENOMEM;
101590+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
101591 unsigned long size, grow;
101592
101593 size = address - vma->vm_start;
101594@@ -2168,6 +2486,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
101595 }
101596 }
101597 }
101598+ if (locknext)
101599+ vma_unlock_anon_vma(vma->vm_next);
101600 vma_unlock_anon_vma(vma);
101601 khugepaged_enter_vma_merge(vma);
101602 validate_mm(vma->vm_mm);
101603@@ -2182,6 +2502,8 @@ int expand_downwards(struct vm_area_struct *vma,
101604 unsigned long address)
101605 {
101606 int error;
101607+ bool lockprev = false;
101608+ struct vm_area_struct *prev;
101609
101610 /*
101611 * We must make sure the anon_vma is allocated
101612@@ -2195,6 +2517,15 @@ int expand_downwards(struct vm_area_struct *vma,
101613 if (error)
101614 return error;
101615
101616+ prev = vma->vm_prev;
101617+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
101618+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
101619+#endif
101620+ if (lockprev && anon_vma_prepare(prev))
101621+ return -ENOMEM;
101622+ if (lockprev)
101623+ vma_lock_anon_vma(prev);
101624+
101625 vma_lock_anon_vma(vma);
101626
101627 /*
101628@@ -2204,9 +2535,17 @@ int expand_downwards(struct vm_area_struct *vma,
101629 */
101630
101631 /* Somebody else might have raced and expanded it already */
101632- if (address < vma->vm_start) {
101633+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
101634+ error = -ENOMEM;
101635+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
101636 unsigned long size, grow;
101637
101638+#ifdef CONFIG_PAX_SEGMEXEC
101639+ struct vm_area_struct *vma_m;
101640+
101641+ vma_m = pax_find_mirror_vma(vma);
101642+#endif
101643+
101644 size = vma->vm_end - address;
101645 grow = (vma->vm_start - address) >> PAGE_SHIFT;
101646
101647@@ -2231,13 +2570,27 @@ int expand_downwards(struct vm_area_struct *vma,
101648 vma->vm_pgoff -= grow;
101649 anon_vma_interval_tree_post_update_vma(vma);
101650 vma_gap_update(vma);
101651+
101652+#ifdef CONFIG_PAX_SEGMEXEC
101653+ if (vma_m) {
101654+ anon_vma_interval_tree_pre_update_vma(vma_m);
101655+ vma_m->vm_start -= grow << PAGE_SHIFT;
101656+ vma_m->vm_pgoff -= grow;
101657+ anon_vma_interval_tree_post_update_vma(vma_m);
101658+ vma_gap_update(vma_m);
101659+ }
101660+#endif
101661+
101662 spin_unlock(&vma->vm_mm->page_table_lock);
101663
101664+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
101665 perf_event_mmap(vma);
101666 }
101667 }
101668 }
101669 vma_unlock_anon_vma(vma);
101670+ if (lockprev)
101671+ vma_unlock_anon_vma(prev);
101672 khugepaged_enter_vma_merge(vma);
101673 validate_mm(vma->vm_mm);
101674 return error;
101675@@ -2335,6 +2688,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
101676 do {
101677 long nrpages = vma_pages(vma);
101678
101679+#ifdef CONFIG_PAX_SEGMEXEC
101680+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
101681+ vma = remove_vma(vma);
101682+ continue;
101683+ }
101684+#endif
101685+
101686 if (vma->vm_flags & VM_ACCOUNT)
101687 nr_accounted += nrpages;
101688 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
101689@@ -2379,6 +2739,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
101690 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
101691 vma->vm_prev = NULL;
101692 do {
101693+
101694+#ifdef CONFIG_PAX_SEGMEXEC
101695+ if (vma->vm_mirror) {
101696+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
101697+ vma->vm_mirror->vm_mirror = NULL;
101698+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
101699+ vma->vm_mirror = NULL;
101700+ }
101701+#endif
101702+
101703 vma_rb_erase(vma, &mm->mm_rb);
101704 mm->map_count--;
101705 tail_vma = vma;
101706@@ -2406,14 +2776,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
101707 struct vm_area_struct *new;
101708 int err = -ENOMEM;
101709
101710+#ifdef CONFIG_PAX_SEGMEXEC
101711+ struct vm_area_struct *vma_m, *new_m = NULL;
101712+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
101713+#endif
101714+
101715 if (is_vm_hugetlb_page(vma) && (addr &
101716 ~(huge_page_mask(hstate_vma(vma)))))
101717 return -EINVAL;
101718
101719+#ifdef CONFIG_PAX_SEGMEXEC
101720+ vma_m = pax_find_mirror_vma(vma);
101721+#endif
101722+
101723 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
101724 if (!new)
101725 goto out_err;
101726
101727+#ifdef CONFIG_PAX_SEGMEXEC
101728+ if (vma_m) {
101729+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
101730+ if (!new_m) {
101731+ kmem_cache_free(vm_area_cachep, new);
101732+ goto out_err;
101733+ }
101734+ }
101735+#endif
101736+
101737 /* most fields are the same, copy all, and then fixup */
101738 *new = *vma;
101739
101740@@ -2426,6 +2815,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
101741 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
101742 }
101743
101744+#ifdef CONFIG_PAX_SEGMEXEC
101745+ if (vma_m) {
101746+ *new_m = *vma_m;
101747+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
101748+ new_m->vm_mirror = new;
101749+ new->vm_mirror = new_m;
101750+
101751+ if (new_below)
101752+ new_m->vm_end = addr_m;
101753+ else {
101754+ new_m->vm_start = addr_m;
101755+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
101756+ }
101757+ }
101758+#endif
101759+
101760 err = vma_dup_policy(vma, new);
101761 if (err)
101762 goto out_free_vma;
101763@@ -2445,6 +2850,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
101764 else
101765 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
101766
101767+#ifdef CONFIG_PAX_SEGMEXEC
101768+ if (!err && vma_m) {
101769+ struct mempolicy *pol = vma_policy(new);
101770+
101771+ if (anon_vma_clone(new_m, vma_m))
101772+ goto out_free_mpol;
101773+
101774+ mpol_get(pol);
101775+ set_vma_policy(new_m, pol);
101776+
101777+ if (new_m->vm_file)
101778+ get_file(new_m->vm_file);
101779+
101780+ if (new_m->vm_ops && new_m->vm_ops->open)
101781+ new_m->vm_ops->open(new_m);
101782+
101783+ if (new_below)
101784+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
101785+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
101786+ else
101787+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
101788+
101789+ if (err) {
101790+ if (new_m->vm_ops && new_m->vm_ops->close)
101791+ new_m->vm_ops->close(new_m);
101792+ if (new_m->vm_file)
101793+ fput(new_m->vm_file);
101794+ mpol_put(pol);
101795+ }
101796+ }
101797+#endif
101798+
101799 /* Success. */
101800 if (!err)
101801 return 0;
101802@@ -2454,10 +2891,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
101803 new->vm_ops->close(new);
101804 if (new->vm_file)
101805 fput(new->vm_file);
101806- unlink_anon_vmas(new);
101807 out_free_mpol:
101808 mpol_put(vma_policy(new));
101809 out_free_vma:
101810+
101811+#ifdef CONFIG_PAX_SEGMEXEC
101812+ if (new_m) {
101813+ unlink_anon_vmas(new_m);
101814+ kmem_cache_free(vm_area_cachep, new_m);
101815+ }
101816+#endif
101817+
101818+ unlink_anon_vmas(new);
101819 kmem_cache_free(vm_area_cachep, new);
101820 out_err:
101821 return err;
101822@@ -2470,6 +2915,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
101823 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
101824 unsigned long addr, int new_below)
101825 {
101826+
101827+#ifdef CONFIG_PAX_SEGMEXEC
101828+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
101829+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
101830+ if (mm->map_count >= sysctl_max_map_count-1)
101831+ return -ENOMEM;
101832+ } else
101833+#endif
101834+
101835 if (mm->map_count >= sysctl_max_map_count)
101836 return -ENOMEM;
101837
101838@@ -2481,11 +2935,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
101839 * work. This now handles partial unmappings.
101840 * Jeremy Fitzhardinge <jeremy@goop.org>
101841 */
101842+#ifdef CONFIG_PAX_SEGMEXEC
101843 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
101844 {
101845+ int ret = __do_munmap(mm, start, len);
101846+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
101847+ return ret;
101848+
101849+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
101850+}
101851+
101852+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
101853+#else
101854+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
101855+#endif
101856+{
101857 unsigned long end;
101858 struct vm_area_struct *vma, *prev, *last;
101859
101860+ /*
101861+ * mm->mmap_sem is required to protect against another thread
101862+ * changing the mappings in case we sleep.
101863+ */
101864+ verify_mm_writelocked(mm);
101865+
101866 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
101867 return -EINVAL;
101868
101869@@ -2560,6 +3033,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
101870 /* Fix up all other VM information */
101871 remove_vma_list(mm, vma);
101872
101873+ track_exec_limit(mm, start, end, 0UL);
101874+
101875 return 0;
101876 }
101877
101878@@ -2568,6 +3043,13 @@ int vm_munmap(unsigned long start, size_t len)
101879 int ret;
101880 struct mm_struct *mm = current->mm;
101881
101882+
101883+#ifdef CONFIG_PAX_SEGMEXEC
101884+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
101885+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
101886+ return -EINVAL;
101887+#endif
101888+
101889 down_write(&mm->mmap_sem);
101890 ret = do_munmap(mm, start, len);
101891 up_write(&mm->mmap_sem);
101892@@ -2581,16 +3063,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
101893 return vm_munmap(addr, len);
101894 }
101895
101896-static inline void verify_mm_writelocked(struct mm_struct *mm)
101897-{
101898-#ifdef CONFIG_DEBUG_VM
101899- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
101900- WARN_ON(1);
101901- up_read(&mm->mmap_sem);
101902- }
101903-#endif
101904-}
101905-
101906 /*
101907 * this is really a simplified "do_mmap". it only handles
101908 * anonymous maps. eventually we may be able to do some
101909@@ -2604,6 +3076,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
101910 struct rb_node ** rb_link, * rb_parent;
101911 pgoff_t pgoff = addr >> PAGE_SHIFT;
101912 int error;
101913+ unsigned long charged;
101914
101915 len = PAGE_ALIGN(len);
101916 if (!len)
101917@@ -2611,10 +3084,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
101918
101919 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
101920
101921+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
101922+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
101923+ flags &= ~VM_EXEC;
101924+
101925+#ifdef CONFIG_PAX_MPROTECT
101926+ if (mm->pax_flags & MF_PAX_MPROTECT)
101927+ flags &= ~VM_MAYEXEC;
101928+#endif
101929+
101930+ }
101931+#endif
101932+
101933 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
101934 if (error & ~PAGE_MASK)
101935 return error;
101936
101937+ charged = len >> PAGE_SHIFT;
101938+
101939 error = mlock_future_check(mm, mm->def_flags, len);
101940 if (error)
101941 return error;
101942@@ -2628,21 +3115,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
101943 /*
101944 * Clear old maps. this also does some error checking for us
101945 */
101946- munmap_back:
101947 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
101948 if (do_munmap(mm, addr, len))
101949 return -ENOMEM;
101950- goto munmap_back;
101951+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
101952 }
101953
101954 /* Check against address space limits *after* clearing old maps... */
101955- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
101956+ if (!may_expand_vm(mm, charged))
101957 return -ENOMEM;
101958
101959 if (mm->map_count > sysctl_max_map_count)
101960 return -ENOMEM;
101961
101962- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
101963+ if (security_vm_enough_memory_mm(mm, charged))
101964 return -ENOMEM;
101965
101966 /* Can we just expand an old private anonymous mapping? */
101967@@ -2656,7 +3142,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
101968 */
101969 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
101970 if (!vma) {
101971- vm_unacct_memory(len >> PAGE_SHIFT);
101972+ vm_unacct_memory(charged);
101973 return -ENOMEM;
101974 }
101975
101976@@ -2670,10 +3156,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
101977 vma_link(mm, vma, prev, rb_link, rb_parent);
101978 out:
101979 perf_event_mmap(vma);
101980- mm->total_vm += len >> PAGE_SHIFT;
101981+ mm->total_vm += charged;
101982 if (flags & VM_LOCKED)
101983- mm->locked_vm += (len >> PAGE_SHIFT);
101984+ mm->locked_vm += charged;
101985 vma->vm_flags |= VM_SOFTDIRTY;
101986+ track_exec_limit(mm, addr, addr + len, flags);
101987 return addr;
101988 }
101989
101990@@ -2735,6 +3222,7 @@ void exit_mmap(struct mm_struct *mm)
101991 while (vma) {
101992 if (vma->vm_flags & VM_ACCOUNT)
101993 nr_accounted += vma_pages(vma);
101994+ vma->vm_mirror = NULL;
101995 vma = remove_vma(vma);
101996 }
101997 vm_unacct_memory(nr_accounted);
101998@@ -2752,6 +3240,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
101999 struct vm_area_struct *prev;
102000 struct rb_node **rb_link, *rb_parent;
102001
102002+#ifdef CONFIG_PAX_SEGMEXEC
102003+ struct vm_area_struct *vma_m = NULL;
102004+#endif
102005+
102006+ if (security_mmap_addr(vma->vm_start))
102007+ return -EPERM;
102008+
102009 /*
102010 * The vm_pgoff of a purely anonymous vma should be irrelevant
102011 * until its first write fault, when page's anon_vma and index
102012@@ -2775,7 +3270,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
102013 security_vm_enough_memory_mm(mm, vma_pages(vma)))
102014 return -ENOMEM;
102015
102016+#ifdef CONFIG_PAX_SEGMEXEC
102017+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
102018+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
102019+ if (!vma_m)
102020+ return -ENOMEM;
102021+ }
102022+#endif
102023+
102024 vma_link(mm, vma, prev, rb_link, rb_parent);
102025+
102026+#ifdef CONFIG_PAX_SEGMEXEC
102027+ if (vma_m)
102028+ BUG_ON(pax_mirror_vma(vma_m, vma));
102029+#endif
102030+
102031 return 0;
102032 }
102033
102034@@ -2794,6 +3303,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
102035 struct rb_node **rb_link, *rb_parent;
102036 bool faulted_in_anon_vma = true;
102037
102038+ BUG_ON(vma->vm_mirror);
102039+
102040 /*
102041 * If anonymous vma has not yet been faulted, update new pgoff
102042 * to match new location, to increase its chance of merging.
102043@@ -2858,6 +3369,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
102044 return NULL;
102045 }
102046
102047+#ifdef CONFIG_PAX_SEGMEXEC
102048+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
102049+{
102050+ struct vm_area_struct *prev_m;
102051+ struct rb_node **rb_link_m, *rb_parent_m;
102052+ struct mempolicy *pol_m;
102053+
102054+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
102055+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
102056+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
102057+ *vma_m = *vma;
102058+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
102059+ if (anon_vma_clone(vma_m, vma))
102060+ return -ENOMEM;
102061+ pol_m = vma_policy(vma_m);
102062+ mpol_get(pol_m);
102063+ set_vma_policy(vma_m, pol_m);
102064+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
102065+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
102066+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
102067+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
102068+ if (vma_m->vm_file)
102069+ get_file(vma_m->vm_file);
102070+ if (vma_m->vm_ops && vma_m->vm_ops->open)
102071+ vma_m->vm_ops->open(vma_m);
102072+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
102073+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
102074+ vma_m->vm_mirror = vma;
102075+ vma->vm_mirror = vma_m;
102076+ return 0;
102077+}
102078+#endif
102079+
102080 /*
102081 * Return true if the calling process may expand its vm space by the passed
102082 * number of pages
102083@@ -2869,6 +3413,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
102084
102085 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
102086
102087+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
102088 if (cur + npages > lim)
102089 return 0;
102090 return 1;
102091@@ -2951,6 +3496,22 @@ static struct vm_area_struct *__install_special_mapping(
102092 vma->vm_start = addr;
102093 vma->vm_end = addr + len;
102094
102095+#ifdef CONFIG_PAX_MPROTECT
102096+ if (mm->pax_flags & MF_PAX_MPROTECT) {
102097+#ifndef CONFIG_PAX_MPROTECT_COMPAT
102098+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
102099+ return ERR_PTR(-EPERM);
102100+ if (!(vm_flags & VM_EXEC))
102101+ vm_flags &= ~VM_MAYEXEC;
102102+#else
102103+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
102104+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
102105+#endif
102106+ else
102107+ vm_flags &= ~VM_MAYWRITE;
102108+ }
102109+#endif
102110+
102111 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
102112 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
102113
102114diff --git a/mm/mprotect.c b/mm/mprotect.c
102115index c43d557..0b7ccd2 100644
102116--- a/mm/mprotect.c
102117+++ b/mm/mprotect.c
102118@@ -24,10 +24,18 @@
102119 #include <linux/migrate.h>
102120 #include <linux/perf_event.h>
102121 #include <linux/ksm.h>
102122+#include <linux/sched/sysctl.h>
102123+
102124+#ifdef CONFIG_PAX_MPROTECT
102125+#include <linux/elf.h>
102126+#include <linux/binfmts.h>
102127+#endif
102128+
102129 #include <asm/uaccess.h>
102130 #include <asm/pgtable.h>
102131 #include <asm/cacheflush.h>
102132 #include <asm/tlbflush.h>
102133+#include <asm/mmu_context.h>
102134
102135 #ifndef pgprot_modify
102136 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
102137@@ -256,6 +264,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
102138 return pages;
102139 }
102140
102141+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
102142+/* called while holding the mmap semaphor for writing except stack expansion */
102143+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
102144+{
102145+ unsigned long oldlimit, newlimit = 0UL;
102146+
102147+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
102148+ return;
102149+
102150+ spin_lock(&mm->page_table_lock);
102151+ oldlimit = mm->context.user_cs_limit;
102152+ if ((prot & VM_EXEC) && oldlimit < end)
102153+ /* USER_CS limit moved up */
102154+ newlimit = end;
102155+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
102156+ /* USER_CS limit moved down */
102157+ newlimit = start;
102158+
102159+ if (newlimit) {
102160+ mm->context.user_cs_limit = newlimit;
102161+
102162+#ifdef CONFIG_SMP
102163+ wmb();
102164+ cpus_clear(mm->context.cpu_user_cs_mask);
102165+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
102166+#endif
102167+
102168+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
102169+ }
102170+ spin_unlock(&mm->page_table_lock);
102171+ if (newlimit == end) {
102172+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
102173+
102174+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
102175+ if (is_vm_hugetlb_page(vma))
102176+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
102177+ else
102178+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
102179+ }
102180+}
102181+#endif
102182+
102183 int
102184 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
102185 unsigned long start, unsigned long end, unsigned long newflags)
102186@@ -268,11 +318,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
102187 int error;
102188 int dirty_accountable = 0;
102189
102190+#ifdef CONFIG_PAX_SEGMEXEC
102191+ struct vm_area_struct *vma_m = NULL;
102192+ unsigned long start_m, end_m;
102193+
102194+ start_m = start + SEGMEXEC_TASK_SIZE;
102195+ end_m = end + SEGMEXEC_TASK_SIZE;
102196+#endif
102197+
102198 if (newflags == oldflags) {
102199 *pprev = vma;
102200 return 0;
102201 }
102202
102203+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
102204+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
102205+
102206+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
102207+ return -ENOMEM;
102208+
102209+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
102210+ return -ENOMEM;
102211+ }
102212+
102213 /*
102214 * If we make a private mapping writable we increase our commit;
102215 * but (without finer accounting) cannot reduce our commit if we
102216@@ -289,6 +357,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
102217 }
102218 }
102219
102220+#ifdef CONFIG_PAX_SEGMEXEC
102221+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
102222+ if (start != vma->vm_start) {
102223+ error = split_vma(mm, vma, start, 1);
102224+ if (error)
102225+ goto fail;
102226+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
102227+ *pprev = (*pprev)->vm_next;
102228+ }
102229+
102230+ if (end != vma->vm_end) {
102231+ error = split_vma(mm, vma, end, 0);
102232+ if (error)
102233+ goto fail;
102234+ }
102235+
102236+ if (pax_find_mirror_vma(vma)) {
102237+ error = __do_munmap(mm, start_m, end_m - start_m);
102238+ if (error)
102239+ goto fail;
102240+ } else {
102241+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
102242+ if (!vma_m) {
102243+ error = -ENOMEM;
102244+ goto fail;
102245+ }
102246+ vma->vm_flags = newflags;
102247+ error = pax_mirror_vma(vma_m, vma);
102248+ if (error) {
102249+ vma->vm_flags = oldflags;
102250+ goto fail;
102251+ }
102252+ }
102253+ }
102254+#endif
102255+
102256 /*
102257 * First try to merge with previous and/or next vma.
102258 */
102259@@ -319,9 +423,21 @@ success:
102260 * vm_flags and vm_page_prot are protected by the mmap_sem
102261 * held in write mode.
102262 */
102263+
102264+#ifdef CONFIG_PAX_SEGMEXEC
102265+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
102266+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
102267+#endif
102268+
102269 vma->vm_flags = newflags;
102270+
102271+#ifdef CONFIG_PAX_MPROTECT
102272+ if (mm->binfmt && mm->binfmt->handle_mprotect)
102273+ mm->binfmt->handle_mprotect(vma, newflags);
102274+#endif
102275+
102276 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
102277- vm_get_page_prot(newflags));
102278+ vm_get_page_prot(vma->vm_flags));
102279
102280 if (vma_wants_writenotify(vma)) {
102281 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
102282@@ -360,6 +476,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
102283 end = start + len;
102284 if (end <= start)
102285 return -ENOMEM;
102286+
102287+#ifdef CONFIG_PAX_SEGMEXEC
102288+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
102289+ if (end > SEGMEXEC_TASK_SIZE)
102290+ return -EINVAL;
102291+ } else
102292+#endif
102293+
102294+ if (end > TASK_SIZE)
102295+ return -EINVAL;
102296+
102297 if (!arch_validate_prot(prot))
102298 return -EINVAL;
102299
102300@@ -367,7 +494,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
102301 /*
102302 * Does the application expect PROT_READ to imply PROT_EXEC:
102303 */
102304- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
102305+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
102306 prot |= PROT_EXEC;
102307
102308 vm_flags = calc_vm_prot_bits(prot);
102309@@ -399,6 +526,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
102310 if (start > vma->vm_start)
102311 prev = vma;
102312
102313+#ifdef CONFIG_PAX_MPROTECT
102314+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
102315+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
102316+#endif
102317+
102318 for (nstart = start ; ; ) {
102319 unsigned long newflags;
102320
102321@@ -409,6 +541,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
102322
102323 /* newflags >> 4 shift VM_MAY% in place of VM_% */
102324 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
102325+ if (prot & (PROT_WRITE | PROT_EXEC))
102326+ gr_log_rwxmprotect(vma);
102327+
102328+ error = -EACCES;
102329+ goto out;
102330+ }
102331+
102332+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
102333 error = -EACCES;
102334 goto out;
102335 }
102336@@ -423,6 +563,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
102337 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
102338 if (error)
102339 goto out;
102340+
102341+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
102342+
102343 nstart = tmp;
102344
102345 if (nstart < prev->vm_end)
102346diff --git a/mm/mremap.c b/mm/mremap.c
102347index 05f1180..c3cde48 100644
102348--- a/mm/mremap.c
102349+++ b/mm/mremap.c
102350@@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
102351 continue;
102352 pte = ptep_get_and_clear(mm, old_addr, old_pte);
102353 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
102354+
102355+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
102356+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
102357+ pte = pte_exprotect(pte);
102358+#endif
102359+
102360 pte = move_soft_dirty_pte(pte);
102361 set_pte_at(mm, new_addr, new_pte, pte);
102362 }
102363@@ -344,6 +350,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
102364 if (is_vm_hugetlb_page(vma))
102365 goto Einval;
102366
102367+#ifdef CONFIG_PAX_SEGMEXEC
102368+ if (pax_find_mirror_vma(vma))
102369+ goto Einval;
102370+#endif
102371+
102372 /* We can't remap across vm area boundaries */
102373 if (old_len > vma->vm_end - addr)
102374 goto Efault;
102375@@ -399,20 +410,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
102376 unsigned long ret = -EINVAL;
102377 unsigned long charged = 0;
102378 unsigned long map_flags;
102379+ unsigned long pax_task_size = TASK_SIZE;
102380
102381 if (new_addr & ~PAGE_MASK)
102382 goto out;
102383
102384- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
102385+#ifdef CONFIG_PAX_SEGMEXEC
102386+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
102387+ pax_task_size = SEGMEXEC_TASK_SIZE;
102388+#endif
102389+
102390+ pax_task_size -= PAGE_SIZE;
102391+
102392+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
102393 goto out;
102394
102395 /* Check if the location we're moving into overlaps the
102396 * old location at all, and fail if it does.
102397 */
102398- if ((new_addr <= addr) && (new_addr+new_len) > addr)
102399- goto out;
102400-
102401- if ((addr <= new_addr) && (addr+old_len) > new_addr)
102402+ if (addr + old_len > new_addr && new_addr + new_len > addr)
102403 goto out;
102404
102405 ret = do_munmap(mm, new_addr, new_len);
102406@@ -481,6 +497,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
102407 unsigned long ret = -EINVAL;
102408 unsigned long charged = 0;
102409 bool locked = false;
102410+ unsigned long pax_task_size = TASK_SIZE;
102411
102412 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
102413 return ret;
102414@@ -502,6 +519,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
102415 if (!new_len)
102416 return ret;
102417
102418+#ifdef CONFIG_PAX_SEGMEXEC
102419+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
102420+ pax_task_size = SEGMEXEC_TASK_SIZE;
102421+#endif
102422+
102423+ pax_task_size -= PAGE_SIZE;
102424+
102425+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
102426+ old_len > pax_task_size || addr > pax_task_size-old_len)
102427+ return ret;
102428+
102429 down_write(&current->mm->mmap_sem);
102430
102431 if (flags & MREMAP_FIXED) {
102432@@ -552,6 +580,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
102433 new_addr = addr;
102434 }
102435 ret = addr;
102436+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
102437 goto out;
102438 }
102439 }
102440@@ -575,7 +604,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
102441 goto out;
102442 }
102443
102444+ map_flags = vma->vm_flags;
102445 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
102446+ if (!(ret & ~PAGE_MASK)) {
102447+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
102448+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
102449+ }
102450 }
102451 out:
102452 if (ret & ~PAGE_MASK)
102453diff --git a/mm/nommu.c b/mm/nommu.c
102454index 4a852f6..4371a6b 100644
102455--- a/mm/nommu.c
102456+++ b/mm/nommu.c
102457@@ -70,7 +70,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
102458 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
102459 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
102460 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
102461-int heap_stack_gap = 0;
102462
102463 atomic_long_t mmap_pages_allocated;
102464
102465@@ -857,15 +856,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
102466 EXPORT_SYMBOL(find_vma);
102467
102468 /*
102469- * find a VMA
102470- * - we don't extend stack VMAs under NOMMU conditions
102471- */
102472-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
102473-{
102474- return find_vma(mm, addr);
102475-}
102476-
102477-/*
102478 * expand a stack to a given address
102479 * - not supported under NOMMU conditions
102480 */
102481@@ -1572,6 +1562,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
102482
102483 /* most fields are the same, copy all, and then fixup */
102484 *new = *vma;
102485+ INIT_LIST_HEAD(&new->anon_vma_chain);
102486 *region = *vma->vm_region;
102487 new->vm_region = region;
102488
102489@@ -2007,8 +1998,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
102490 }
102491 EXPORT_SYMBOL(generic_file_remap_pages);
102492
102493-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
102494- unsigned long addr, void *buf, int len, int write)
102495+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
102496+ unsigned long addr, void *buf, size_t len, int write)
102497 {
102498 struct vm_area_struct *vma;
102499
102500@@ -2049,8 +2040,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
102501 *
102502 * The caller must hold a reference on @mm.
102503 */
102504-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
102505- void *buf, int len, int write)
102506+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
102507+ void *buf, size_t len, int write)
102508 {
102509 return __access_remote_vm(NULL, mm, addr, buf, len, write);
102510 }
102511@@ -2059,7 +2050,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
102512 * Access another process' address space.
102513 * - source/target buffer must be kernel space
102514 */
102515-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
102516+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
102517 {
102518 struct mm_struct *mm;
102519
102520diff --git a/mm/page-writeback.c b/mm/page-writeback.c
102521index e0c9430..3c6bf79 100644
102522--- a/mm/page-writeback.c
102523+++ b/mm/page-writeback.c
102524@@ -667,7 +667,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
102525 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
102526 * - the bdi dirty thresh drops quickly due to change of JBOD workload
102527 */
102528-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
102529+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
102530 unsigned long thresh,
102531 unsigned long bg_thresh,
102532 unsigned long dirty,
102533diff --git a/mm/page_alloc.c b/mm/page_alloc.c
102534index ef44ad7..1056bc7 100644
102535--- a/mm/page_alloc.c
102536+++ b/mm/page_alloc.c
102537@@ -61,6 +61,7 @@
102538 #include <linux/page-debug-flags.h>
102539 #include <linux/hugetlb.h>
102540 #include <linux/sched/rt.h>
102541+#include <linux/random.h>
102542
102543 #include <asm/sections.h>
102544 #include <asm/tlbflush.h>
102545@@ -357,7 +358,7 @@ out:
102546 * This usage means that zero-order pages may not be compound.
102547 */
102548
102549-static void free_compound_page(struct page *page)
102550+void free_compound_page(struct page *page)
102551 {
102552 __free_pages_ok(page, compound_order(page));
102553 }
102554@@ -745,6 +746,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
102555 int i;
102556 int bad = 0;
102557
102558+#ifdef CONFIG_PAX_MEMORY_SANITIZE
102559+ unsigned long index = 1UL << order;
102560+#endif
102561+
102562 trace_mm_page_free(page, order);
102563 kmemcheck_free_shadow(page, order);
102564
102565@@ -761,6 +766,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
102566 debug_check_no_obj_freed(page_address(page),
102567 PAGE_SIZE << order);
102568 }
102569+
102570+#ifdef CONFIG_PAX_MEMORY_SANITIZE
102571+ for (; index; --index)
102572+ sanitize_highpage(page + index - 1);
102573+#endif
102574+
102575 arch_free_page(page, order);
102576 kernel_map_pages(page, 1 << order, 0);
102577
102578@@ -784,6 +795,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
102579 local_irq_restore(flags);
102580 }
102581
102582+#ifdef CONFIG_PAX_LATENT_ENTROPY
102583+bool __meminitdata extra_latent_entropy;
102584+
102585+static int __init setup_pax_extra_latent_entropy(char *str)
102586+{
102587+ extra_latent_entropy = true;
102588+ return 0;
102589+}
102590+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
102591+
102592+volatile u64 latent_entropy __latent_entropy;
102593+EXPORT_SYMBOL(latent_entropy);
102594+#endif
102595+
102596 void __init __free_pages_bootmem(struct page *page, unsigned int order)
102597 {
102598 unsigned int nr_pages = 1 << order;
102599@@ -799,6 +824,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
102600 __ClearPageReserved(p);
102601 set_page_count(p, 0);
102602
102603+#ifdef CONFIG_PAX_LATENT_ENTROPY
102604+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
102605+ u64 hash = 0;
102606+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
102607+ const u64 *data = lowmem_page_address(page);
102608+
102609+ for (index = 0; index < end; index++)
102610+ hash ^= hash + data[index];
102611+ latent_entropy ^= hash;
102612+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
102613+ }
102614+#endif
102615+
102616 page_zone(page)->managed_pages += nr_pages;
102617 set_page_refcounted(page);
102618 __free_pages(page, order);
102619@@ -927,8 +965,10 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
102620 arch_alloc_page(page, order);
102621 kernel_map_pages(page, 1 << order, 1);
102622
102623+#ifndef CONFIG_PAX_MEMORY_SANITIZE
102624 if (gfp_flags & __GFP_ZERO)
102625 prep_zero_page(page, order, gfp_flags);
102626+#endif
102627
102628 if (order && (gfp_flags & __GFP_COMP))
102629 prep_compound_page(page, order);
102630@@ -2427,7 +2467,7 @@ static void reset_alloc_batches(struct zonelist *zonelist,
102631 continue;
102632 mod_zone_page_state(zone, NR_ALLOC_BATCH,
102633 high_wmark_pages(zone) - low_wmark_pages(zone) -
102634- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
102635+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
102636 }
102637 }
102638
102639diff --git a/mm/percpu.c b/mm/percpu.c
102640index 2ddf9a9..f8fc075 100644
102641--- a/mm/percpu.c
102642+++ b/mm/percpu.c
102643@@ -123,7 +123,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
102644 static unsigned int pcpu_high_unit_cpu __read_mostly;
102645
102646 /* the address of the first chunk which starts with the kernel static area */
102647-void *pcpu_base_addr __read_mostly;
102648+void *pcpu_base_addr __read_only;
102649 EXPORT_SYMBOL_GPL(pcpu_base_addr);
102650
102651 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
102652diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
102653index a8b9199..dfb79e0 100644
102654--- a/mm/pgtable-generic.c
102655+++ b/mm/pgtable-generic.c
102656@@ -195,7 +195,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
102657 pmd_t entry = *pmdp;
102658 if (pmd_numa(entry))
102659 entry = pmd_mknonnuma(entry);
102660- set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp));
102661+ set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
102662 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
102663 }
102664 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
102665diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
102666index 5077afc..846c9ef 100644
102667--- a/mm/process_vm_access.c
102668+++ b/mm/process_vm_access.c
102669@@ -13,6 +13,7 @@
102670 #include <linux/uio.h>
102671 #include <linux/sched.h>
102672 #include <linux/highmem.h>
102673+#include <linux/security.h>
102674 #include <linux/ptrace.h>
102675 #include <linux/slab.h>
102676 #include <linux/syscalls.h>
102677@@ -157,19 +158,19 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
102678 ssize_t iov_len;
102679 size_t total_len = iov_iter_count(iter);
102680
102681+ return -ENOSYS; // PaX: until properly audited
102682+
102683 /*
102684 * Work out how many pages of struct pages we're going to need
102685 * when eventually calling get_user_pages
102686 */
102687 for (i = 0; i < riovcnt; i++) {
102688 iov_len = rvec[i].iov_len;
102689- if (iov_len > 0) {
102690- nr_pages_iov = ((unsigned long)rvec[i].iov_base
102691- + iov_len)
102692- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
102693- / PAGE_SIZE + 1;
102694- nr_pages = max(nr_pages, nr_pages_iov);
102695- }
102696+ if (iov_len <= 0)
102697+ continue;
102698+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
102699+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
102700+ nr_pages = max(nr_pages, nr_pages_iov);
102701 }
102702
102703 if (nr_pages == 0)
102704@@ -197,6 +198,11 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
102705 goto free_proc_pages;
102706 }
102707
102708+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
102709+ rc = -EPERM;
102710+ goto put_task_struct;
102711+ }
102712+
102713 mm = mm_access(task, PTRACE_MODE_ATTACH);
102714 if (!mm || IS_ERR(mm)) {
102715 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
102716diff --git a/mm/rmap.c b/mm/rmap.c
102717index 22a4a76..9551288 100644
102718--- a/mm/rmap.c
102719+++ b/mm/rmap.c
102720@@ -164,6 +164,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
102721 struct anon_vma *anon_vma = vma->anon_vma;
102722 struct anon_vma_chain *avc;
102723
102724+#ifdef CONFIG_PAX_SEGMEXEC
102725+ struct anon_vma_chain *avc_m = NULL;
102726+#endif
102727+
102728 might_sleep();
102729 if (unlikely(!anon_vma)) {
102730 struct mm_struct *mm = vma->vm_mm;
102731@@ -173,6 +177,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
102732 if (!avc)
102733 goto out_enomem;
102734
102735+#ifdef CONFIG_PAX_SEGMEXEC
102736+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
102737+ if (!avc_m)
102738+ goto out_enomem_free_avc;
102739+#endif
102740+
102741 anon_vma = find_mergeable_anon_vma(vma);
102742 allocated = NULL;
102743 if (!anon_vma) {
102744@@ -186,6 +196,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
102745 /* page_table_lock to protect against threads */
102746 spin_lock(&mm->page_table_lock);
102747 if (likely(!vma->anon_vma)) {
102748+
102749+#ifdef CONFIG_PAX_SEGMEXEC
102750+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
102751+
102752+ if (vma_m) {
102753+ BUG_ON(vma_m->anon_vma);
102754+ vma_m->anon_vma = anon_vma;
102755+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
102756+ avc_m = NULL;
102757+ }
102758+#endif
102759+
102760 vma->anon_vma = anon_vma;
102761 anon_vma_chain_link(vma, avc, anon_vma);
102762 allocated = NULL;
102763@@ -196,12 +218,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
102764
102765 if (unlikely(allocated))
102766 put_anon_vma(allocated);
102767+
102768+#ifdef CONFIG_PAX_SEGMEXEC
102769+ if (unlikely(avc_m))
102770+ anon_vma_chain_free(avc_m);
102771+#endif
102772+
102773 if (unlikely(avc))
102774 anon_vma_chain_free(avc);
102775 }
102776 return 0;
102777
102778 out_enomem_free_avc:
102779+
102780+#ifdef CONFIG_PAX_SEGMEXEC
102781+ if (avc_m)
102782+ anon_vma_chain_free(avc_m);
102783+#endif
102784+
102785 anon_vma_chain_free(avc);
102786 out_enomem:
102787 return -ENOMEM;
102788@@ -237,7 +271,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
102789 * Attach the anon_vmas from src to dst.
102790 * Returns 0 on success, -ENOMEM on failure.
102791 */
102792-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
102793+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
102794 {
102795 struct anon_vma_chain *avc, *pavc;
102796 struct anon_vma *root = NULL;
102797@@ -270,7 +304,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
102798 * the corresponding VMA in the parent process is attached to.
102799 * Returns 0 on success, non-zero on failure.
102800 */
102801-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
102802+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
102803 {
102804 struct anon_vma_chain *avc;
102805 struct anon_vma *anon_vma;
102806@@ -374,8 +408,10 @@ static void anon_vma_ctor(void *data)
102807 void __init anon_vma_init(void)
102808 {
102809 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
102810- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
102811- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
102812+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
102813+ anon_vma_ctor);
102814+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
102815+ SLAB_PANIC|SLAB_NO_SANITIZE);
102816 }
102817
102818 /*
102819diff --git a/mm/shmem.c b/mm/shmem.c
102820index af68b15..1227320 100644
102821--- a/mm/shmem.c
102822+++ b/mm/shmem.c
102823@@ -33,7 +33,7 @@
102824 #include <linux/swap.h>
102825 #include <linux/aio.h>
102826
102827-static struct vfsmount *shm_mnt;
102828+struct vfsmount *shm_mnt;
102829
102830 #ifdef CONFIG_SHMEM
102831 /*
102832@@ -77,7 +77,7 @@ static struct vfsmount *shm_mnt;
102833 #define BOGO_DIRENT_SIZE 20
102834
102835 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
102836-#define SHORT_SYMLINK_LEN 128
102837+#define SHORT_SYMLINK_LEN 64
102838
102839 /*
102840 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
102841@@ -2219,6 +2219,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
102842 static int shmem_xattr_validate(const char *name)
102843 {
102844 struct { const char *prefix; size_t len; } arr[] = {
102845+
102846+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
102847+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
102848+#endif
102849+
102850 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
102851 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
102852 };
102853@@ -2274,6 +2279,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
102854 if (err)
102855 return err;
102856
102857+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
102858+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
102859+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
102860+ return -EOPNOTSUPP;
102861+ if (size > 8)
102862+ return -EINVAL;
102863+ }
102864+#endif
102865+
102866 return simple_xattr_set(&info->xattrs, name, value, size, flags);
102867 }
102868
102869@@ -2586,8 +2600,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
102870 int err = -ENOMEM;
102871
102872 /* Round up to L1_CACHE_BYTES to resist false sharing */
102873- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
102874- L1_CACHE_BYTES), GFP_KERNEL);
102875+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
102876 if (!sbinfo)
102877 return -ENOMEM;
102878
102879diff --git a/mm/slab.c b/mm/slab.c
102880index 3070b92..bcfff83 100644
102881--- a/mm/slab.c
102882+++ b/mm/slab.c
102883@@ -311,10 +311,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
102884 if ((x)->max_freeable < i) \
102885 (x)->max_freeable = i; \
102886 } while (0)
102887-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
102888-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
102889-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
102890-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
102891+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
102892+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
102893+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
102894+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
102895+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
102896+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
102897 #else
102898 #define STATS_INC_ACTIVE(x) do { } while (0)
102899 #define STATS_DEC_ACTIVE(x) do { } while (0)
102900@@ -331,6 +333,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
102901 #define STATS_INC_ALLOCMISS(x) do { } while (0)
102902 #define STATS_INC_FREEHIT(x) do { } while (0)
102903 #define STATS_INC_FREEMISS(x) do { } while (0)
102904+#define STATS_INC_SANITIZED(x) do { } while (0)
102905+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
102906 #endif
102907
102908 #if DEBUG
102909@@ -447,7 +451,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
102910 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
102911 */
102912 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
102913- const struct page *page, void *obj)
102914+ const struct page *page, const void *obj)
102915 {
102916 u32 offset = (obj - page->s_mem);
102917 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
102918@@ -1558,12 +1562,12 @@ void __init kmem_cache_init(void)
102919 */
102920
102921 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
102922- kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
102923+ kmalloc_size(INDEX_AC), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
102924
102925 if (INDEX_AC != INDEX_NODE)
102926 kmalloc_caches[INDEX_NODE] =
102927 create_kmalloc_cache("kmalloc-node",
102928- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
102929+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
102930
102931 slab_early_init = 0;
102932
102933@@ -3512,6 +3516,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
102934 struct array_cache *ac = cpu_cache_get(cachep);
102935
102936 check_irq_off();
102937+
102938+#ifdef CONFIG_PAX_MEMORY_SANITIZE
102939+ if (pax_sanitize_slab) {
102940+ if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) {
102941+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
102942+
102943+ if (cachep->ctor)
102944+ cachep->ctor(objp);
102945+
102946+ STATS_INC_SANITIZED(cachep);
102947+ } else
102948+ STATS_INC_NOT_SANITIZED(cachep);
102949+ }
102950+#endif
102951+
102952 kmemleak_free_recursive(objp, cachep->flags);
102953 objp = cache_free_debugcheck(cachep, objp, caller);
102954
102955@@ -3735,6 +3754,7 @@ void kfree(const void *objp)
102956
102957 if (unlikely(ZERO_OR_NULL_PTR(objp)))
102958 return;
102959+ VM_BUG_ON(!virt_addr_valid(objp));
102960 local_irq_save(flags);
102961 kfree_debugcheck(objp);
102962 c = virt_to_cache(objp);
102963@@ -4176,14 +4196,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
102964 }
102965 /* cpu stats */
102966 {
102967- unsigned long allochit = atomic_read(&cachep->allochit);
102968- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
102969- unsigned long freehit = atomic_read(&cachep->freehit);
102970- unsigned long freemiss = atomic_read(&cachep->freemiss);
102971+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
102972+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
102973+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
102974+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
102975
102976 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
102977 allochit, allocmiss, freehit, freemiss);
102978 }
102979+#ifdef CONFIG_PAX_MEMORY_SANITIZE
102980+ {
102981+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
102982+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
102983+
102984+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
102985+ }
102986+#endif
102987 #endif
102988 }
102989
102990@@ -4404,13 +4432,69 @@ static const struct file_operations proc_slabstats_operations = {
102991 static int __init slab_proc_init(void)
102992 {
102993 #ifdef CONFIG_DEBUG_SLAB_LEAK
102994- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
102995+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
102996 #endif
102997 return 0;
102998 }
102999 module_init(slab_proc_init);
103000 #endif
103001
103002+bool is_usercopy_object(const void *ptr)
103003+{
103004+ struct page *page;
103005+ struct kmem_cache *cachep;
103006+
103007+ if (ZERO_OR_NULL_PTR(ptr))
103008+ return false;
103009+
103010+ if (!slab_is_available())
103011+ return false;
103012+
103013+ if (!virt_addr_valid(ptr))
103014+ return false;
103015+
103016+ page = virt_to_head_page(ptr);
103017+
103018+ if (!PageSlab(page))
103019+ return false;
103020+
103021+ cachep = page->slab_cache;
103022+ return cachep->flags & SLAB_USERCOPY;
103023+}
103024+
103025+#ifdef CONFIG_PAX_USERCOPY
103026+const char *check_heap_object(const void *ptr, unsigned long n)
103027+{
103028+ struct page *page;
103029+ struct kmem_cache *cachep;
103030+ unsigned int objnr;
103031+ unsigned long offset;
103032+
103033+ if (ZERO_OR_NULL_PTR(ptr))
103034+ return "<null>";
103035+
103036+ if (!virt_addr_valid(ptr))
103037+ return NULL;
103038+
103039+ page = virt_to_head_page(ptr);
103040+
103041+ if (!PageSlab(page))
103042+ return NULL;
103043+
103044+ cachep = page->slab_cache;
103045+ if (!(cachep->flags & SLAB_USERCOPY))
103046+ return cachep->name;
103047+
103048+ objnr = obj_to_index(cachep, page, ptr);
103049+ BUG_ON(objnr >= cachep->num);
103050+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
103051+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
103052+ return NULL;
103053+
103054+ return cachep->name;
103055+}
103056+#endif
103057+
103058 /**
103059 * ksize - get the actual amount of memory allocated for a given object
103060 * @objp: Pointer to the object
103061diff --git a/mm/slab.h b/mm/slab.h
103062index 961a3fb..6b12514 100644
103063--- a/mm/slab.h
103064+++ b/mm/slab.h
103065@@ -32,6 +32,15 @@ extern struct list_head slab_caches;
103066 /* The slab cache that manages slab cache information */
103067 extern struct kmem_cache *kmem_cache;
103068
103069+#ifdef CONFIG_PAX_MEMORY_SANITIZE
103070+#ifdef CONFIG_X86_64
103071+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
103072+#else
103073+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
103074+#endif
103075+extern bool pax_sanitize_slab;
103076+#endif
103077+
103078 unsigned long calculate_alignment(unsigned long flags,
103079 unsigned long align, unsigned long size);
103080
103081@@ -67,7 +76,8 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
103082
103083 /* Legal flag mask for kmem_cache_create(), for various configurations */
103084 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
103085- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
103086+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
103087+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
103088
103089 #if defined(CONFIG_DEBUG_SLAB)
103090 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
103091@@ -251,6 +261,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
103092 return s;
103093
103094 page = virt_to_head_page(x);
103095+
103096+ BUG_ON(!PageSlab(page));
103097+
103098 cachep = page->slab_cache;
103099 if (slab_equal_or_root(cachep, s))
103100 return cachep;
103101diff --git a/mm/slab_common.c b/mm/slab_common.c
103102index d31c4ba..1121296 100644
103103--- a/mm/slab_common.c
103104+++ b/mm/slab_common.c
103105@@ -23,11 +23,22 @@
103106
103107 #include "slab.h"
103108
103109-enum slab_state slab_state;
103110+enum slab_state slab_state __read_only;
103111 LIST_HEAD(slab_caches);
103112 DEFINE_MUTEX(slab_mutex);
103113 struct kmem_cache *kmem_cache;
103114
103115+#ifdef CONFIG_PAX_MEMORY_SANITIZE
103116+bool pax_sanitize_slab __read_only = true;
103117+static int __init pax_sanitize_slab_setup(char *str)
103118+{
103119+ pax_sanitize_slab = !!simple_strtol(str, NULL, 0);
103120+ printk("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis");
103121+ return 1;
103122+}
103123+__setup("pax_sanitize_slab=", pax_sanitize_slab_setup);
103124+#endif
103125+
103126 #ifdef CONFIG_DEBUG_VM
103127 static int kmem_cache_sanity_check(const char *name, size_t size)
103128 {
103129@@ -158,7 +169,7 @@ do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
103130 if (err)
103131 goto out_free_cache;
103132
103133- s->refcount = 1;
103134+ atomic_set(&s->refcount, 1);
103135 list_add(&s->list, &slab_caches);
103136 out:
103137 if (err)
103138@@ -339,8 +350,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
103139
103140 mutex_lock(&slab_mutex);
103141
103142- s->refcount--;
103143- if (s->refcount)
103144+ if (!atomic_dec_and_test(&s->refcount))
103145 goto out_unlock;
103146
103147 if (memcg_cleanup_cache_params(s) != 0)
103148@@ -360,7 +370,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
103149 rcu_barrier();
103150
103151 memcg_free_cache_params(s);
103152-#ifdef SLAB_SUPPORTS_SYSFS
103153+#if defined(SLAB_SUPPORTS_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
103154 sysfs_slab_remove(s);
103155 #else
103156 slab_kmem_cache_release(s);
103157@@ -416,7 +426,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
103158 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
103159 name, size, err);
103160
103161- s->refcount = -1; /* Exempt from merging for now */
103162+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
103163 }
103164
103165 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
103166@@ -429,7 +439,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
103167
103168 create_boot_cache(s, name, size, flags);
103169 list_add(&s->list, &slab_caches);
103170- s->refcount = 1;
103171+ atomic_set(&s->refcount, 1);
103172 return s;
103173 }
103174
103175@@ -441,6 +451,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
103176 EXPORT_SYMBOL(kmalloc_dma_caches);
103177 #endif
103178
103179+#ifdef CONFIG_PAX_USERCOPY_SLABS
103180+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
103181+EXPORT_SYMBOL(kmalloc_usercopy_caches);
103182+#endif
103183+
103184 /*
103185 * Conversion table for small slabs sizes / 8 to the index in the
103186 * kmalloc array. This is necessary for slabs < 192 since we have non power
103187@@ -505,6 +520,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
103188 return kmalloc_dma_caches[index];
103189
103190 #endif
103191+
103192+#ifdef CONFIG_PAX_USERCOPY_SLABS
103193+ if (unlikely((flags & GFP_USERCOPY)))
103194+ return kmalloc_usercopy_caches[index];
103195+
103196+#endif
103197+
103198 return kmalloc_caches[index];
103199 }
103200
103201@@ -561,7 +583,7 @@ void __init create_kmalloc_caches(unsigned long flags)
103202 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
103203 if (!kmalloc_caches[i]) {
103204 kmalloc_caches[i] = create_kmalloc_cache(NULL,
103205- 1 << i, flags);
103206+ 1 << i, SLAB_USERCOPY | flags);
103207 }
103208
103209 /*
103210@@ -570,10 +592,10 @@ void __init create_kmalloc_caches(unsigned long flags)
103211 * earlier power of two caches
103212 */
103213 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
103214- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
103215+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
103216
103217 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
103218- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
103219+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
103220 }
103221
103222 /* Kmalloc array is now usable */
103223@@ -606,6 +628,23 @@ void __init create_kmalloc_caches(unsigned long flags)
103224 }
103225 }
103226 #endif
103227+
103228+#ifdef CONFIG_PAX_USERCOPY_SLABS
103229+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
103230+ struct kmem_cache *s = kmalloc_caches[i];
103231+
103232+ if (s) {
103233+ int size = kmalloc_size(i);
103234+ char *n = kasprintf(GFP_NOWAIT,
103235+ "usercopy-kmalloc-%d", size);
103236+
103237+ BUG_ON(!n);
103238+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
103239+ size, SLAB_USERCOPY | flags);
103240+ }
103241+ }
103242+#endif
103243+
103244 }
103245 #endif /* !CONFIG_SLOB */
103246
103247@@ -664,6 +703,9 @@ void print_slabinfo_header(struct seq_file *m)
103248 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
103249 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
103250 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
103251+#ifdef CONFIG_PAX_MEMORY_SANITIZE
103252+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
103253+#endif
103254 #endif
103255 seq_putc(m, '\n');
103256 }
103257diff --git a/mm/slob.c b/mm/slob.c
103258index 21980e0..ed9a648 100644
103259--- a/mm/slob.c
103260+++ b/mm/slob.c
103261@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
103262 /*
103263 * Return the size of a slob block.
103264 */
103265-static slobidx_t slob_units(slob_t *s)
103266+static slobidx_t slob_units(const slob_t *s)
103267 {
103268 if (s->units > 0)
103269 return s->units;
103270@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
103271 /*
103272 * Return the next free slob block pointer after this one.
103273 */
103274-static slob_t *slob_next(slob_t *s)
103275+static slob_t *slob_next(const slob_t *s)
103276 {
103277 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
103278 slobidx_t next;
103279@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
103280 /*
103281 * Returns true if s is the last free block in its page.
103282 */
103283-static int slob_last(slob_t *s)
103284+static int slob_last(const slob_t *s)
103285 {
103286 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
103287 }
103288
103289-static void *slob_new_pages(gfp_t gfp, int order, int node)
103290+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
103291 {
103292- void *page;
103293+ struct page *page;
103294
103295 #ifdef CONFIG_NUMA
103296 if (node != NUMA_NO_NODE)
103297@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
103298 if (!page)
103299 return NULL;
103300
103301- return page_address(page);
103302+ __SetPageSlab(page);
103303+ return page;
103304 }
103305
103306-static void slob_free_pages(void *b, int order)
103307+static void slob_free_pages(struct page *sp, int order)
103308 {
103309 if (current->reclaim_state)
103310 current->reclaim_state->reclaimed_slab += 1 << order;
103311- free_pages((unsigned long)b, order);
103312+ __ClearPageSlab(sp);
103313+ page_mapcount_reset(sp);
103314+ sp->private = 0;
103315+ __free_pages(sp, order);
103316 }
103317
103318 /*
103319@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
103320
103321 /* Not enough space: must allocate a new page */
103322 if (!b) {
103323- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
103324- if (!b)
103325+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
103326+ if (!sp)
103327 return NULL;
103328- sp = virt_to_page(b);
103329- __SetPageSlab(sp);
103330+ b = page_address(sp);
103331
103332 spin_lock_irqsave(&slob_lock, flags);
103333 sp->units = SLOB_UNITS(PAGE_SIZE);
103334 sp->freelist = b;
103335+ sp->private = 0;
103336 INIT_LIST_HEAD(&sp->lru);
103337 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
103338 set_slob_page_free(sp, slob_list);
103339@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
103340 if (slob_page_free(sp))
103341 clear_slob_page_free(sp);
103342 spin_unlock_irqrestore(&slob_lock, flags);
103343- __ClearPageSlab(sp);
103344- page_mapcount_reset(sp);
103345- slob_free_pages(b, 0);
103346+ slob_free_pages(sp, 0);
103347 return;
103348 }
103349
103350+#ifdef CONFIG_PAX_MEMORY_SANITIZE
103351+ if (pax_sanitize_slab)
103352+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
103353+#endif
103354+
103355 if (!slob_page_free(sp)) {
103356 /* This slob page is about to become partially free. Easy! */
103357 sp->units = units;
103358@@ -424,11 +431,10 @@ out:
103359 */
103360
103361 static __always_inline void *
103362-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
103363+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
103364 {
103365- unsigned int *m;
103366- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
103367- void *ret;
103368+ slob_t *m;
103369+ void *ret = NULL;
103370
103371 gfp &= gfp_allowed_mask;
103372
103373@@ -442,23 +448,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
103374
103375 if (!m)
103376 return NULL;
103377- *m = size;
103378+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
103379+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
103380+ m[0].units = size;
103381+ m[1].units = align;
103382 ret = (void *)m + align;
103383
103384 trace_kmalloc_node(caller, ret,
103385 size, size + align, gfp, node);
103386 } else {
103387 unsigned int order = get_order(size);
103388+ struct page *page;
103389
103390 if (likely(order))
103391 gfp |= __GFP_COMP;
103392- ret = slob_new_pages(gfp, order, node);
103393+ page = slob_new_pages(gfp, order, node);
103394+ if (page) {
103395+ ret = page_address(page);
103396+ page->private = size;
103397+ }
103398
103399 trace_kmalloc_node(caller, ret,
103400 size, PAGE_SIZE << order, gfp, node);
103401 }
103402
103403- kmemleak_alloc(ret, size, 1, gfp);
103404+ return ret;
103405+}
103406+
103407+static __always_inline void *
103408+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
103409+{
103410+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
103411+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
103412+
103413+ if (!ZERO_OR_NULL_PTR(ret))
103414+ kmemleak_alloc(ret, size, 1, gfp);
103415 return ret;
103416 }
103417
103418@@ -493,34 +517,112 @@ void kfree(const void *block)
103419 return;
103420 kmemleak_free(block);
103421
103422+ VM_BUG_ON(!virt_addr_valid(block));
103423 sp = virt_to_page(block);
103424- if (PageSlab(sp)) {
103425+ VM_BUG_ON(!PageSlab(sp));
103426+ if (!sp->private) {
103427 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
103428- unsigned int *m = (unsigned int *)(block - align);
103429- slob_free(m, *m + align);
103430- } else
103431+ slob_t *m = (slob_t *)(block - align);
103432+ slob_free(m, m[0].units + align);
103433+ } else {
103434+ __ClearPageSlab(sp);
103435+ page_mapcount_reset(sp);
103436+ sp->private = 0;
103437 __free_pages(sp, compound_order(sp));
103438+ }
103439 }
103440 EXPORT_SYMBOL(kfree);
103441
103442+bool is_usercopy_object(const void *ptr)
103443+{
103444+ if (!slab_is_available())
103445+ return false;
103446+
103447+ // PAX: TODO
103448+
103449+ return false;
103450+}
103451+
103452+#ifdef CONFIG_PAX_USERCOPY
103453+const char *check_heap_object(const void *ptr, unsigned long n)
103454+{
103455+ struct page *page;
103456+ const slob_t *free;
103457+ const void *base;
103458+ unsigned long flags;
103459+
103460+ if (ZERO_OR_NULL_PTR(ptr))
103461+ return "<null>";
103462+
103463+ if (!virt_addr_valid(ptr))
103464+ return NULL;
103465+
103466+ page = virt_to_head_page(ptr);
103467+ if (!PageSlab(page))
103468+ return NULL;
103469+
103470+ if (page->private) {
103471+ base = page;
103472+ if (base <= ptr && n <= page->private - (ptr - base))
103473+ return NULL;
103474+ return "<slob>";
103475+ }
103476+
103477+ /* some tricky double walking to find the chunk */
103478+ spin_lock_irqsave(&slob_lock, flags);
103479+ base = (void *)((unsigned long)ptr & PAGE_MASK);
103480+ free = page->freelist;
103481+
103482+ while (!slob_last(free) && (void *)free <= ptr) {
103483+ base = free + slob_units(free);
103484+ free = slob_next(free);
103485+ }
103486+
103487+ while (base < (void *)free) {
103488+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
103489+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
103490+ int offset;
103491+
103492+ if (ptr < base + align)
103493+ break;
103494+
103495+ offset = ptr - base - align;
103496+ if (offset >= m) {
103497+ base += size;
103498+ continue;
103499+ }
103500+
103501+ if (n > m - offset)
103502+ break;
103503+
103504+ spin_unlock_irqrestore(&slob_lock, flags);
103505+ return NULL;
103506+ }
103507+
103508+ spin_unlock_irqrestore(&slob_lock, flags);
103509+ return "<slob>";
103510+}
103511+#endif
103512+
103513 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
103514 size_t ksize(const void *block)
103515 {
103516 struct page *sp;
103517 int align;
103518- unsigned int *m;
103519+ slob_t *m;
103520
103521 BUG_ON(!block);
103522 if (unlikely(block == ZERO_SIZE_PTR))
103523 return 0;
103524
103525 sp = virt_to_page(block);
103526- if (unlikely(!PageSlab(sp)))
103527- return PAGE_SIZE << compound_order(sp);
103528+ VM_BUG_ON(!PageSlab(sp));
103529+ if (sp->private)
103530+ return sp->private;
103531
103532 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
103533- m = (unsigned int *)(block - align);
103534- return SLOB_UNITS(*m) * SLOB_UNIT;
103535+ m = (slob_t *)(block - align);
103536+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
103537 }
103538 EXPORT_SYMBOL(ksize);
103539
103540@@ -536,23 +638,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
103541
103542 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
103543 {
103544- void *b;
103545+ void *b = NULL;
103546
103547 flags &= gfp_allowed_mask;
103548
103549 lockdep_trace_alloc(flags);
103550
103551+#ifdef CONFIG_PAX_USERCOPY_SLABS
103552+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
103553+#else
103554 if (c->size < PAGE_SIZE) {
103555 b = slob_alloc(c->size, flags, c->align, node);
103556 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
103557 SLOB_UNITS(c->size) * SLOB_UNIT,
103558 flags, node);
103559 } else {
103560- b = slob_new_pages(flags, get_order(c->size), node);
103561+ struct page *sp;
103562+
103563+ sp = slob_new_pages(flags, get_order(c->size), node);
103564+ if (sp) {
103565+ b = page_address(sp);
103566+ sp->private = c->size;
103567+ }
103568 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
103569 PAGE_SIZE << get_order(c->size),
103570 flags, node);
103571 }
103572+#endif
103573
103574 if (b && c->ctor)
103575 c->ctor(b);
103576@@ -584,10 +696,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
103577
103578 static void __kmem_cache_free(void *b, int size)
103579 {
103580- if (size < PAGE_SIZE)
103581+ struct page *sp;
103582+
103583+ sp = virt_to_page(b);
103584+ BUG_ON(!PageSlab(sp));
103585+ if (!sp->private)
103586 slob_free(b, size);
103587 else
103588- slob_free_pages(b, get_order(size));
103589+ slob_free_pages(sp, get_order(size));
103590 }
103591
103592 static void kmem_rcu_free(struct rcu_head *head)
103593@@ -600,17 +716,31 @@ static void kmem_rcu_free(struct rcu_head *head)
103594
103595 void kmem_cache_free(struct kmem_cache *c, void *b)
103596 {
103597+ int size = c->size;
103598+
103599+#ifdef CONFIG_PAX_USERCOPY_SLABS
103600+ if (size + c->align < PAGE_SIZE) {
103601+ size += c->align;
103602+ b -= c->align;
103603+ }
103604+#endif
103605+
103606 kmemleak_free_recursive(b, c->flags);
103607 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
103608 struct slob_rcu *slob_rcu;
103609- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
103610- slob_rcu->size = c->size;
103611+ slob_rcu = b + (size - sizeof(struct slob_rcu));
103612+ slob_rcu->size = size;
103613 call_rcu(&slob_rcu->head, kmem_rcu_free);
103614 } else {
103615- __kmem_cache_free(b, c->size);
103616+ __kmem_cache_free(b, size);
103617 }
103618
103619+#ifdef CONFIG_PAX_USERCOPY_SLABS
103620+ trace_kfree(_RET_IP_, b);
103621+#else
103622 trace_kmem_cache_free(_RET_IP_, b);
103623+#endif
103624+
103625 }
103626 EXPORT_SYMBOL(kmem_cache_free);
103627
103628diff --git a/mm/slub.c b/mm/slub.c
103629index 7300480..cb92846 100644
103630--- a/mm/slub.c
103631+++ b/mm/slub.c
103632@@ -207,7 +207,7 @@ struct track {
103633
103634 enum track_item { TRACK_ALLOC, TRACK_FREE };
103635
103636-#ifdef CONFIG_SYSFS
103637+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
103638 static int sysfs_slab_add(struct kmem_cache *);
103639 static int sysfs_slab_alias(struct kmem_cache *, const char *);
103640 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
103641@@ -546,7 +546,7 @@ static void print_track(const char *s, struct track *t)
103642 if (!t->addr)
103643 return;
103644
103645- pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
103646+ pr_err("INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
103647 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
103648 #ifdef CONFIG_STACKTRACE
103649 {
103650@@ -2673,6 +2673,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
103651
103652 slab_free_hook(s, x);
103653
103654+#ifdef CONFIG_PAX_MEMORY_SANITIZE
103655+ if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) {
103656+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
103657+ if (s->ctor)
103658+ s->ctor(x);
103659+ }
103660+#endif
103661+
103662 redo:
103663 /*
103664 * Determine the currently cpus per cpu slab.
103665@@ -2740,7 +2748,7 @@ static int slub_min_objects;
103666 * Merge control. If this is set then no merging of slab caches will occur.
103667 * (Could be removed. This was introduced to pacify the merge skeptics.)
103668 */
103669-static int slub_nomerge;
103670+static int slub_nomerge = 1;
103671
103672 /*
103673 * Calculate the order of allocation given an slab object size.
103674@@ -3019,6 +3027,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
103675 s->inuse = size;
103676
103677 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
103678+#ifdef CONFIG_PAX_MEMORY_SANITIZE
103679+ (pax_sanitize_slab && !(flags & SLAB_NO_SANITIZE)) ||
103680+#endif
103681 s->ctor)) {
103682 /*
103683 * Relocate free pointer after the object if it is not
103684@@ -3347,6 +3358,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
103685 EXPORT_SYMBOL(__kmalloc_node);
103686 #endif
103687
103688+bool is_usercopy_object(const void *ptr)
103689+{
103690+ struct page *page;
103691+ struct kmem_cache *s;
103692+
103693+ if (ZERO_OR_NULL_PTR(ptr))
103694+ return false;
103695+
103696+ if (!slab_is_available())
103697+ return false;
103698+
103699+ if (!virt_addr_valid(ptr))
103700+ return false;
103701+
103702+ page = virt_to_head_page(ptr);
103703+
103704+ if (!PageSlab(page))
103705+ return false;
103706+
103707+ s = page->slab_cache;
103708+ return s->flags & SLAB_USERCOPY;
103709+}
103710+
103711+#ifdef CONFIG_PAX_USERCOPY
103712+const char *check_heap_object(const void *ptr, unsigned long n)
103713+{
103714+ struct page *page;
103715+ struct kmem_cache *s;
103716+ unsigned long offset;
103717+
103718+ if (ZERO_OR_NULL_PTR(ptr))
103719+ return "<null>";
103720+
103721+ if (!virt_addr_valid(ptr))
103722+ return NULL;
103723+
103724+ page = virt_to_head_page(ptr);
103725+
103726+ if (!PageSlab(page))
103727+ return NULL;
103728+
103729+ s = page->slab_cache;
103730+ if (!(s->flags & SLAB_USERCOPY))
103731+ return s->name;
103732+
103733+ offset = (ptr - page_address(page)) % s->size;
103734+ if (offset <= s->object_size && n <= s->object_size - offset)
103735+ return NULL;
103736+
103737+ return s->name;
103738+}
103739+#endif
103740+
103741 size_t ksize(const void *object)
103742 {
103743 struct page *page;
103744@@ -3375,6 +3439,7 @@ void kfree(const void *x)
103745 if (unlikely(ZERO_OR_NULL_PTR(x)))
103746 return;
103747
103748+ VM_BUG_ON(!virt_addr_valid(x));
103749 page = virt_to_head_page(x);
103750 if (unlikely(!PageSlab(page))) {
103751 BUG_ON(!PageCompound(page));
103752@@ -3680,7 +3745,7 @@ static int slab_unmergeable(struct kmem_cache *s)
103753 /*
103754 * We may have set a slab to be unmergeable during bootstrap.
103755 */
103756- if (s->refcount < 0)
103757+ if (atomic_read(&s->refcount) < 0)
103758 return 1;
103759
103760 return 0;
103761@@ -3737,7 +3802,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
103762 int i;
103763 struct kmem_cache *c;
103764
103765- s->refcount++;
103766+ atomic_inc(&s->refcount);
103767
103768 /*
103769 * Adjust the object sizes so that we clear
103770@@ -3756,7 +3821,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
103771 }
103772
103773 if (sysfs_slab_alias(s, name)) {
103774- s->refcount--;
103775+ atomic_dec(&s->refcount);
103776 s = NULL;
103777 }
103778 }
103779@@ -3873,7 +3938,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
103780 }
103781 #endif
103782
103783-#ifdef CONFIG_SYSFS
103784+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
103785 static int count_inuse(struct page *page)
103786 {
103787 return page->inuse;
103788@@ -4156,7 +4221,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
103789 len += sprintf(buf + len, "%7ld ", l->count);
103790
103791 if (l->addr)
103792+#ifdef CONFIG_GRKERNSEC_HIDESYM
103793+ len += sprintf(buf + len, "%pS", NULL);
103794+#else
103795 len += sprintf(buf + len, "%pS", (void *)l->addr);
103796+#endif
103797 else
103798 len += sprintf(buf + len, "<not-available>");
103799
103800@@ -4258,12 +4327,12 @@ static void resiliency_test(void)
103801 validate_slab_cache(kmalloc_caches[9]);
103802 }
103803 #else
103804-#ifdef CONFIG_SYSFS
103805+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
103806 static void resiliency_test(void) {};
103807 #endif
103808 #endif
103809
103810-#ifdef CONFIG_SYSFS
103811+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
103812 enum slab_stat_type {
103813 SL_ALL, /* All slabs */
103814 SL_PARTIAL, /* Only partially allocated slabs */
103815@@ -4503,13 +4572,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
103816 {
103817 if (!s->ctor)
103818 return 0;
103819+#ifdef CONFIG_GRKERNSEC_HIDESYM
103820+ return sprintf(buf, "%pS\n", NULL);
103821+#else
103822 return sprintf(buf, "%pS\n", s->ctor);
103823+#endif
103824 }
103825 SLAB_ATTR_RO(ctor);
103826
103827 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
103828 {
103829- return sprintf(buf, "%d\n", s->refcount - 1);
103830+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
103831 }
103832 SLAB_ATTR_RO(aliases);
103833
103834@@ -4597,6 +4670,14 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
103835 SLAB_ATTR_RO(cache_dma);
103836 #endif
103837
103838+#ifdef CONFIG_PAX_USERCOPY_SLABS
103839+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
103840+{
103841+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
103842+}
103843+SLAB_ATTR_RO(usercopy);
103844+#endif
103845+
103846 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
103847 {
103848 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
103849@@ -4931,6 +5012,9 @@ static struct attribute *slab_attrs[] = {
103850 #ifdef CONFIG_ZONE_DMA
103851 &cache_dma_attr.attr,
103852 #endif
103853+#ifdef CONFIG_PAX_USERCOPY_SLABS
103854+ &usercopy_attr.attr,
103855+#endif
103856 #ifdef CONFIG_NUMA
103857 &remote_node_defrag_ratio_attr.attr,
103858 #endif
103859@@ -5181,6 +5265,7 @@ static char *create_unique_id(struct kmem_cache *s)
103860 return name;
103861 }
103862
103863+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
103864 static int sysfs_slab_add(struct kmem_cache *s)
103865 {
103866 int err;
103867@@ -5254,6 +5339,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
103868 kobject_del(&s->kobj);
103869 kobject_put(&s->kobj);
103870 }
103871+#endif
103872
103873 /*
103874 * Need to buffer aliases during bootup until sysfs becomes
103875@@ -5267,6 +5353,7 @@ struct saved_alias {
103876
103877 static struct saved_alias *alias_list;
103878
103879+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
103880 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
103881 {
103882 struct saved_alias *al;
103883@@ -5289,6 +5376,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
103884 alias_list = al;
103885 return 0;
103886 }
103887+#endif
103888
103889 static int __init slab_sysfs_init(void)
103890 {
103891diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
103892index 4cba9c2..b4f9fcc 100644
103893--- a/mm/sparse-vmemmap.c
103894+++ b/mm/sparse-vmemmap.c
103895@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
103896 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
103897 if (!p)
103898 return NULL;
103899- pud_populate(&init_mm, pud, p);
103900+ pud_populate_kernel(&init_mm, pud, p);
103901 }
103902 return pud;
103903 }
103904@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
103905 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
103906 if (!p)
103907 return NULL;
103908- pgd_populate(&init_mm, pgd, p);
103909+ pgd_populate_kernel(&init_mm, pgd, p);
103910 }
103911 return pgd;
103912 }
103913diff --git a/mm/sparse.c b/mm/sparse.c
103914index d1b48b6..6e8590e 100644
103915--- a/mm/sparse.c
103916+++ b/mm/sparse.c
103917@@ -750,7 +750,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
103918
103919 for (i = 0; i < PAGES_PER_SECTION; i++) {
103920 if (PageHWPoison(&memmap[i])) {
103921- atomic_long_sub(1, &num_poisoned_pages);
103922+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
103923 ClearPageHWPoison(&memmap[i]);
103924 }
103925 }
103926diff --git a/mm/swap.c b/mm/swap.c
103927index 9e8e347..3c22e0f 100644
103928--- a/mm/swap.c
103929+++ b/mm/swap.c
103930@@ -31,6 +31,7 @@
103931 #include <linux/memcontrol.h>
103932 #include <linux/gfp.h>
103933 #include <linux/uio.h>
103934+#include <linux/hugetlb.h>
103935
103936 #include "internal.h"
103937
103938@@ -76,6 +77,8 @@ static void __put_compound_page(struct page *page)
103939
103940 __page_cache_release(page);
103941 dtor = get_compound_page_dtor(page);
103942+ if (!PageHuge(page))
103943+ BUG_ON(dtor != free_compound_page);
103944 (*dtor)(page);
103945 }
103946
103947diff --git a/mm/swapfile.c b/mm/swapfile.c
103948index 4c524f7..f7601f17 100644
103949--- a/mm/swapfile.c
103950+++ b/mm/swapfile.c
103951@@ -84,7 +84,7 @@ static DEFINE_MUTEX(swapon_mutex);
103952
103953 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
103954 /* Activity counter to indicate that a swapon or swapoff has occurred */
103955-static atomic_t proc_poll_event = ATOMIC_INIT(0);
103956+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
103957
103958 static inline unsigned char swap_count(unsigned char ent)
103959 {
103960@@ -1945,7 +1945,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
103961 spin_unlock(&swap_lock);
103962
103963 err = 0;
103964- atomic_inc(&proc_poll_event);
103965+ atomic_inc_unchecked(&proc_poll_event);
103966 wake_up_interruptible(&proc_poll_wait);
103967
103968 out_dput:
103969@@ -1962,8 +1962,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
103970
103971 poll_wait(file, &proc_poll_wait, wait);
103972
103973- if (seq->poll_event != atomic_read(&proc_poll_event)) {
103974- seq->poll_event = atomic_read(&proc_poll_event);
103975+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
103976+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
103977 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
103978 }
103979
103980@@ -2061,7 +2061,7 @@ static int swaps_open(struct inode *inode, struct file *file)
103981 return ret;
103982
103983 seq = file->private_data;
103984- seq->poll_event = atomic_read(&proc_poll_event);
103985+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
103986 return 0;
103987 }
103988
103989@@ -2521,7 +2521,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
103990 (frontswap_map) ? "FS" : "");
103991
103992 mutex_unlock(&swapon_mutex);
103993- atomic_inc(&proc_poll_event);
103994+ atomic_inc_unchecked(&proc_poll_event);
103995 wake_up_interruptible(&proc_poll_wait);
103996
103997 if (S_ISREG(inode->i_mode))
103998diff --git a/mm/util.c b/mm/util.c
103999index 33e9f44..be026b2 100644
104000--- a/mm/util.c
104001+++ b/mm/util.c
104002@@ -296,6 +296,12 @@ done:
104003 void arch_pick_mmap_layout(struct mm_struct *mm)
104004 {
104005 mm->mmap_base = TASK_UNMAPPED_BASE;
104006+
104007+#ifdef CONFIG_PAX_RANDMMAP
104008+ if (mm->pax_flags & MF_PAX_RANDMMAP)
104009+ mm->mmap_base += mm->delta_mmap;
104010+#endif
104011+
104012 mm->get_unmapped_area = arch_get_unmapped_area;
104013 }
104014 #endif
104015@@ -472,6 +478,9 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
104016 if (!mm->arg_end)
104017 goto out_mm; /* Shh! No looking before we're done */
104018
104019+ if (gr_acl_handle_procpidmem(task))
104020+ goto out_mm;
104021+
104022 len = mm->arg_end - mm->arg_start;
104023
104024 if (len > buflen)
104025diff --git a/mm/vmalloc.c b/mm/vmalloc.c
104026index f64632b..e8c52e7 100644
104027--- a/mm/vmalloc.c
104028+++ b/mm/vmalloc.c
104029@@ -40,6 +40,21 @@ struct vfree_deferred {
104030 };
104031 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
104032
104033+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
104034+struct stack_deferred_llist {
104035+ struct llist_head list;
104036+ void *stack;
104037+ void *lowmem_stack;
104038+};
104039+
104040+struct stack_deferred {
104041+ struct stack_deferred_llist list;
104042+ struct work_struct wq;
104043+};
104044+
104045+static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
104046+#endif
104047+
104048 static void __vunmap(const void *, int);
104049
104050 static void free_work(struct work_struct *w)
104051@@ -47,12 +62,30 @@ static void free_work(struct work_struct *w)
104052 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
104053 struct llist_node *llnode = llist_del_all(&p->list);
104054 while (llnode) {
104055- void *p = llnode;
104056+ void *x = llnode;
104057 llnode = llist_next(llnode);
104058- __vunmap(p, 1);
104059+ __vunmap(x, 1);
104060 }
104061 }
104062
104063+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
104064+static void unmap_work(struct work_struct *w)
104065+{
104066+ struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
104067+ struct llist_node *llnode = llist_del_all(&p->list.list);
104068+ while (llnode) {
104069+ struct stack_deferred_llist *x =
104070+ llist_entry((struct llist_head *)llnode,
104071+ struct stack_deferred_llist, list);
104072+ void *stack = ACCESS_ONCE(x->stack);
104073+ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
104074+ llnode = llist_next(llnode);
104075+ __vunmap(stack, 0);
104076+ free_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
104077+ }
104078+}
104079+#endif
104080+
104081 /*** Page table manipulation functions ***/
104082
104083 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
104084@@ -61,8 +94,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
104085
104086 pte = pte_offset_kernel(pmd, addr);
104087 do {
104088- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
104089- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
104090+
104091+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
104092+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
104093+ BUG_ON(!pte_exec(*pte));
104094+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
104095+ continue;
104096+ }
104097+#endif
104098+
104099+ {
104100+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
104101+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
104102+ }
104103 } while (pte++, addr += PAGE_SIZE, addr != end);
104104 }
104105
104106@@ -122,16 +166,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
104107 pte = pte_alloc_kernel(pmd, addr);
104108 if (!pte)
104109 return -ENOMEM;
104110+
104111+ pax_open_kernel();
104112 do {
104113 struct page *page = pages[*nr];
104114
104115- if (WARN_ON(!pte_none(*pte)))
104116+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
104117+ if (pgprot_val(prot) & _PAGE_NX)
104118+#endif
104119+
104120+ if (!pte_none(*pte)) {
104121+ pax_close_kernel();
104122+ WARN_ON(1);
104123 return -EBUSY;
104124- if (WARN_ON(!page))
104125+ }
104126+ if (!page) {
104127+ pax_close_kernel();
104128+ WARN_ON(1);
104129 return -ENOMEM;
104130+ }
104131 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
104132 (*nr)++;
104133 } while (pte++, addr += PAGE_SIZE, addr != end);
104134+ pax_close_kernel();
104135 return 0;
104136 }
104137
104138@@ -141,7 +198,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
104139 pmd_t *pmd;
104140 unsigned long next;
104141
104142- pmd = pmd_alloc(&init_mm, pud, addr);
104143+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
104144 if (!pmd)
104145 return -ENOMEM;
104146 do {
104147@@ -158,7 +215,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
104148 pud_t *pud;
104149 unsigned long next;
104150
104151- pud = pud_alloc(&init_mm, pgd, addr);
104152+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
104153 if (!pud)
104154 return -ENOMEM;
104155 do {
104156@@ -218,6 +275,12 @@ int is_vmalloc_or_module_addr(const void *x)
104157 if (addr >= MODULES_VADDR && addr < MODULES_END)
104158 return 1;
104159 #endif
104160+
104161+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
104162+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
104163+ return 1;
104164+#endif
104165+
104166 return is_vmalloc_addr(x);
104167 }
104168
104169@@ -238,8 +301,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
104170
104171 if (!pgd_none(*pgd)) {
104172 pud_t *pud = pud_offset(pgd, addr);
104173+#ifdef CONFIG_X86
104174+ if (!pud_large(*pud))
104175+#endif
104176 if (!pud_none(*pud)) {
104177 pmd_t *pmd = pmd_offset(pud, addr);
104178+#ifdef CONFIG_X86
104179+ if (!pmd_large(*pmd))
104180+#endif
104181 if (!pmd_none(*pmd)) {
104182 pte_t *ptep, pte;
104183
104184@@ -1183,13 +1252,23 @@ void __init vmalloc_init(void)
104185 for_each_possible_cpu(i) {
104186 struct vmap_block_queue *vbq;
104187 struct vfree_deferred *p;
104188+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
104189+ struct stack_deferred *p2;
104190+#endif
104191
104192 vbq = &per_cpu(vmap_block_queue, i);
104193 spin_lock_init(&vbq->lock);
104194 INIT_LIST_HEAD(&vbq->free);
104195+
104196 p = &per_cpu(vfree_deferred, i);
104197 init_llist_head(&p->list);
104198 INIT_WORK(&p->wq, free_work);
104199+
104200+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
104201+ p2 = &per_cpu(stack_deferred, i);
104202+ init_llist_head(&p2->list.list);
104203+ INIT_WORK(&p2->wq, unmap_work);
104204+#endif
104205 }
104206
104207 /* Import existing vmlist entries. */
104208@@ -1318,6 +1397,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
104209 struct vm_struct *area;
104210
104211 BUG_ON(in_interrupt());
104212+
104213+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
104214+ if (flags & VM_KERNEXEC) {
104215+ if (start != VMALLOC_START || end != VMALLOC_END)
104216+ return NULL;
104217+ start = (unsigned long)MODULES_EXEC_VADDR;
104218+ end = (unsigned long)MODULES_EXEC_END;
104219+ }
104220+#endif
104221+
104222 if (flags & VM_IOREMAP)
104223 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
104224
104225@@ -1523,6 +1612,23 @@ void vunmap(const void *addr)
104226 }
104227 EXPORT_SYMBOL(vunmap);
104228
104229+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
104230+void unmap_process_stacks(struct task_struct *task)
104231+{
104232+ if (unlikely(in_interrupt())) {
104233+ struct stack_deferred *p = &__get_cpu_var(stack_deferred);
104234+ struct stack_deferred_llist *list = task->stack;
104235+ list->stack = task->stack;
104236+ list->lowmem_stack = task->lowmem_stack;
104237+ if (llist_add((struct llist_node *)&list->list, &p->list.list))
104238+ schedule_work(&p->wq);
104239+ } else {
104240+ __vunmap(task->stack, 0);
104241+ free_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
104242+ }
104243+}
104244+#endif
104245+
104246 /**
104247 * vmap - map an array of pages into virtually contiguous space
104248 * @pages: array of page pointers
104249@@ -1543,6 +1649,11 @@ void *vmap(struct page **pages, unsigned int count,
104250 if (count > totalram_pages)
104251 return NULL;
104252
104253+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
104254+ if (!(pgprot_val(prot) & _PAGE_NX))
104255+ flags |= VM_KERNEXEC;
104256+#endif
104257+
104258 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
104259 __builtin_return_address(0));
104260 if (!area)
104261@@ -1643,6 +1754,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
104262 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
104263 goto fail;
104264
104265+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
104266+ if (!(pgprot_val(prot) & _PAGE_NX))
104267+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC,
104268+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
104269+ else
104270+#endif
104271+
104272 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
104273 start, end, node, gfp_mask, caller);
104274 if (!area)
104275@@ -1819,10 +1937,9 @@ EXPORT_SYMBOL(vzalloc_node);
104276 * For tight control over page level allocator and protection flags
104277 * use __vmalloc() instead.
104278 */
104279-
104280 void *vmalloc_exec(unsigned long size)
104281 {
104282- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
104283+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
104284 NUMA_NO_NODE, __builtin_return_address(0));
104285 }
104286
104287@@ -2129,6 +2246,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
104288 {
104289 struct vm_struct *area;
104290
104291+ BUG_ON(vma->vm_mirror);
104292+
104293 size = PAGE_ALIGN(size);
104294
104295 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
104296@@ -2611,7 +2730,11 @@ static int s_show(struct seq_file *m, void *p)
104297 v->addr, v->addr + v->size, v->size);
104298
104299 if (v->caller)
104300+#ifdef CONFIG_GRKERNSEC_HIDESYM
104301+ seq_printf(m, " %pK", v->caller);
104302+#else
104303 seq_printf(m, " %pS", v->caller);
104304+#endif
104305
104306 if (v->nr_pages)
104307 seq_printf(m, " pages=%d", v->nr_pages);
104308diff --git a/mm/vmstat.c b/mm/vmstat.c
104309index b37bd49..4d7b3da 100644
104310--- a/mm/vmstat.c
104311+++ b/mm/vmstat.c
104312@@ -20,6 +20,7 @@
104313 #include <linux/writeback.h>
104314 #include <linux/compaction.h>
104315 #include <linux/mm_inline.h>
104316+#include <linux/grsecurity.h>
104317
104318 #include "internal.h"
104319
104320@@ -79,7 +80,7 @@ void vm_events_fold_cpu(int cpu)
104321 *
104322 * vm_stat contains the global counters
104323 */
104324-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
104325+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
104326 EXPORT_SYMBOL(vm_stat);
104327
104328 #ifdef CONFIG_SMP
104329@@ -425,7 +426,7 @@ static inline void fold_diff(int *diff)
104330
104331 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
104332 if (diff[i])
104333- atomic_long_add(diff[i], &vm_stat[i]);
104334+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
104335 }
104336
104337 /*
104338@@ -457,7 +458,7 @@ static void refresh_cpu_vm_stats(void)
104339 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
104340 if (v) {
104341
104342- atomic_long_add(v, &zone->vm_stat[i]);
104343+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
104344 global_diff[i] += v;
104345 #ifdef CONFIG_NUMA
104346 /* 3 seconds idle till flush */
104347@@ -519,7 +520,7 @@ void cpu_vm_stats_fold(int cpu)
104348
104349 v = p->vm_stat_diff[i];
104350 p->vm_stat_diff[i] = 0;
104351- atomic_long_add(v, &zone->vm_stat[i]);
104352+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
104353 global_diff[i] += v;
104354 }
104355 }
104356@@ -539,8 +540,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
104357 if (pset->vm_stat_diff[i]) {
104358 int v = pset->vm_stat_diff[i];
104359 pset->vm_stat_diff[i] = 0;
104360- atomic_long_add(v, &zone->vm_stat[i]);
104361- atomic_long_add(v, &vm_stat[i]);
104362+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
104363+ atomic_long_add_unchecked(v, &vm_stat[i]);
104364 }
104365 }
104366 #endif
104367@@ -1162,10 +1163,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
104368 stat_items_size += sizeof(struct vm_event_state);
104369 #endif
104370
104371- v = kmalloc(stat_items_size, GFP_KERNEL);
104372+ v = kzalloc(stat_items_size, GFP_KERNEL);
104373 m->private = v;
104374 if (!v)
104375 return ERR_PTR(-ENOMEM);
104376+
104377+#ifdef CONFIG_GRKERNSEC_PROC_ADD
104378+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
104379+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
104380+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
104381+ && !in_group_p(grsec_proc_gid)
104382+#endif
104383+ )
104384+ return (unsigned long *)m->private + *pos;
104385+#endif
104386+#endif
104387+
104388 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
104389 v[i] = global_page_state(i);
104390 v += NR_VM_ZONE_STAT_ITEMS;
104391@@ -1314,10 +1327,16 @@ static int __init setup_vmstat(void)
104392 cpu_notifier_register_done();
104393 #endif
104394 #ifdef CONFIG_PROC_FS
104395- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
104396- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
104397- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
104398- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
104399+ {
104400+ mode_t gr_mode = S_IRUGO;
104401+#ifdef CONFIG_GRKERNSEC_PROC_ADD
104402+ gr_mode = S_IRUSR;
104403+#endif
104404+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
104405+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
104406+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
104407+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
104408+ }
104409 #endif
104410 return 0;
104411 }
104412diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
104413index 44ebd5c..1f732bae 100644
104414--- a/net/8021q/vlan.c
104415+++ b/net/8021q/vlan.c
104416@@ -475,7 +475,7 @@ out:
104417 return NOTIFY_DONE;
104418 }
104419
104420-static struct notifier_block vlan_notifier_block __read_mostly = {
104421+static struct notifier_block vlan_notifier_block = {
104422 .notifier_call = vlan_device_event,
104423 };
104424
104425@@ -550,8 +550,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
104426 err = -EPERM;
104427 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
104428 break;
104429- if ((args.u.name_type >= 0) &&
104430- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
104431+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
104432 struct vlan_net *vn;
104433
104434 vn = net_generic(net, vlan_net_id);
104435diff --git a/net/9p/client.c b/net/9p/client.c
104436index 0004cba..feba240 100644
104437--- a/net/9p/client.c
104438+++ b/net/9p/client.c
104439@@ -596,7 +596,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
104440 len - inline_len);
104441 } else {
104442 err = copy_from_user(ename + inline_len,
104443- uidata, len - inline_len);
104444+ (char __force_user *)uidata, len - inline_len);
104445 if (err) {
104446 err = -EFAULT;
104447 goto out_err;
104448@@ -1571,7 +1571,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
104449 kernel_buf = 1;
104450 indata = data;
104451 } else
104452- indata = (__force char *)udata;
104453+ indata = (__force_kernel char *)udata;
104454 /*
104455 * response header len is 11
104456 * PDU Header(7) + IO Size (4)
104457@@ -1646,7 +1646,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
104458 kernel_buf = 1;
104459 odata = data;
104460 } else
104461- odata = (char *)udata;
104462+ odata = (char __force_kernel *)udata;
104463 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
104464 P9_ZC_HDR_SZ, kernel_buf, "dqd",
104465 fid->fid, offset, rsize);
104466diff --git a/net/9p/mod.c b/net/9p/mod.c
104467index 6ab36ae..6f1841b 100644
104468--- a/net/9p/mod.c
104469+++ b/net/9p/mod.c
104470@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
104471 void v9fs_register_trans(struct p9_trans_module *m)
104472 {
104473 spin_lock(&v9fs_trans_lock);
104474- list_add_tail(&m->list, &v9fs_trans_list);
104475+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
104476 spin_unlock(&v9fs_trans_lock);
104477 }
104478 EXPORT_SYMBOL(v9fs_register_trans);
104479@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
104480 void v9fs_unregister_trans(struct p9_trans_module *m)
104481 {
104482 spin_lock(&v9fs_trans_lock);
104483- list_del_init(&m->list);
104484+ pax_list_del_init((struct list_head *)&m->list);
104485 spin_unlock(&v9fs_trans_lock);
104486 }
104487 EXPORT_SYMBOL(v9fs_unregister_trans);
104488diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
104489index 80d08f6..de63fd1 100644
104490--- a/net/9p/trans_fd.c
104491+++ b/net/9p/trans_fd.c
104492@@ -428,7 +428,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
104493 oldfs = get_fs();
104494 set_fs(get_ds());
104495 /* The cast to a user pointer is valid due to the set_fs() */
104496- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
104497+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
104498 set_fs(oldfs);
104499
104500 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
104501diff --git a/net/Kconfig b/net/Kconfig
104502index d92afe4..ab63892 100644
104503--- a/net/Kconfig
104504+++ b/net/Kconfig
104505@@ -89,12 +89,8 @@ config NETWORK_SECMARK
104506 to nfmark, but designated for security purposes.
104507 If you are unsure how to answer this question, answer N.
104508
104509-config NET_PTP_CLASSIFY
104510- def_bool n
104511-
104512 config NETWORK_PHY_TIMESTAMPING
104513 bool "Timestamping in PHY devices"
104514- select NET_PTP_CLASSIFY
104515 help
104516 This allows timestamping of network packets by PHYs with
104517 hardware timestamping capabilities. This option adds some
104518@@ -269,7 +265,7 @@ config BQL
104519 config BPF_JIT
104520 bool "enable BPF Just In Time compiler"
104521 depends on HAVE_BPF_JIT
104522- depends on MODULES
104523+ depends on MODULES && X86
104524 ---help---
104525 Berkeley Packet Filter filtering capabilities are normally handled
104526 by an interpreter. This option allows kernel to generate a native
104527diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
104528index af46bc4..f9adfcd 100644
104529--- a/net/appletalk/atalk_proc.c
104530+++ b/net/appletalk/atalk_proc.c
104531@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
104532 struct proc_dir_entry *p;
104533 int rc = -ENOMEM;
104534
104535- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
104536+ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
104537 if (!atalk_proc_dir)
104538 goto out;
104539
104540diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
104541index 876fbe8..8bbea9f 100644
104542--- a/net/atm/atm_misc.c
104543+++ b/net/atm/atm_misc.c
104544@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
104545 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
104546 return 1;
104547 atm_return(vcc, truesize);
104548- atomic_inc(&vcc->stats->rx_drop);
104549+ atomic_inc_unchecked(&vcc->stats->rx_drop);
104550 return 0;
104551 }
104552 EXPORT_SYMBOL(atm_charge);
104553@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
104554 }
104555 }
104556 atm_return(vcc, guess);
104557- atomic_inc(&vcc->stats->rx_drop);
104558+ atomic_inc_unchecked(&vcc->stats->rx_drop);
104559 return NULL;
104560 }
104561 EXPORT_SYMBOL(atm_alloc_charge);
104562@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
104563
104564 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
104565 {
104566-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
104567+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
104568 __SONET_ITEMS
104569 #undef __HANDLE_ITEM
104570 }
104571@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
104572
104573 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
104574 {
104575-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
104576+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
104577 __SONET_ITEMS
104578 #undef __HANDLE_ITEM
104579 }
104580diff --git a/net/atm/lec.c b/net/atm/lec.c
104581index 4c5b8ba..95f7005 100644
104582--- a/net/atm/lec.c
104583+++ b/net/atm/lec.c
104584@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
104585 }
104586
104587 static struct lane2_ops lane2_ops = {
104588- lane2_resolve, /* resolve, spec 3.1.3 */
104589- lane2_associate_req, /* associate_req, spec 3.1.4 */
104590- NULL /* associate indicator, spec 3.1.5 */
104591+ .resolve = lane2_resolve,
104592+ .associate_req = lane2_associate_req,
104593+ .associate_indicator = NULL
104594 };
104595
104596 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
104597diff --git a/net/atm/lec.h b/net/atm/lec.h
104598index 4149db1..f2ab682 100644
104599--- a/net/atm/lec.h
104600+++ b/net/atm/lec.h
104601@@ -48,7 +48,7 @@ struct lane2_ops {
104602 const u8 *tlvs, u32 sizeoftlvs);
104603 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
104604 const u8 *tlvs, u32 sizeoftlvs);
104605-};
104606+} __no_const;
104607
104608 /*
104609 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
104610diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
104611index d1b2d9a..d549f7f 100644
104612--- a/net/atm/mpoa_caches.c
104613+++ b/net/atm/mpoa_caches.c
104614@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
104615
104616
104617 static struct in_cache_ops ingress_ops = {
104618- in_cache_add_entry, /* add_entry */
104619- in_cache_get, /* get */
104620- in_cache_get_with_mask, /* get_with_mask */
104621- in_cache_get_by_vcc, /* get_by_vcc */
104622- in_cache_put, /* put */
104623- in_cache_remove_entry, /* remove_entry */
104624- cache_hit, /* cache_hit */
104625- clear_count_and_expired, /* clear_count */
104626- check_resolving_entries, /* check_resolving */
104627- refresh_entries, /* refresh */
104628- in_destroy_cache /* destroy_cache */
104629+ .add_entry = in_cache_add_entry,
104630+ .get = in_cache_get,
104631+ .get_with_mask = in_cache_get_with_mask,
104632+ .get_by_vcc = in_cache_get_by_vcc,
104633+ .put = in_cache_put,
104634+ .remove_entry = in_cache_remove_entry,
104635+ .cache_hit = cache_hit,
104636+ .clear_count = clear_count_and_expired,
104637+ .check_resolving = check_resolving_entries,
104638+ .refresh = refresh_entries,
104639+ .destroy_cache = in_destroy_cache
104640 };
104641
104642 static struct eg_cache_ops egress_ops = {
104643- eg_cache_add_entry, /* add_entry */
104644- eg_cache_get_by_cache_id, /* get_by_cache_id */
104645- eg_cache_get_by_tag, /* get_by_tag */
104646- eg_cache_get_by_vcc, /* get_by_vcc */
104647- eg_cache_get_by_src_ip, /* get_by_src_ip */
104648- eg_cache_put, /* put */
104649- eg_cache_remove_entry, /* remove_entry */
104650- update_eg_cache_entry, /* update */
104651- clear_expired, /* clear_expired */
104652- eg_destroy_cache /* destroy_cache */
104653+ .add_entry = eg_cache_add_entry,
104654+ .get_by_cache_id = eg_cache_get_by_cache_id,
104655+ .get_by_tag = eg_cache_get_by_tag,
104656+ .get_by_vcc = eg_cache_get_by_vcc,
104657+ .get_by_src_ip = eg_cache_get_by_src_ip,
104658+ .put = eg_cache_put,
104659+ .remove_entry = eg_cache_remove_entry,
104660+ .update = update_eg_cache_entry,
104661+ .clear_expired = clear_expired,
104662+ .destroy_cache = eg_destroy_cache
104663 };
104664
104665
104666diff --git a/net/atm/proc.c b/net/atm/proc.c
104667index bbb6461..cf04016 100644
104668--- a/net/atm/proc.c
104669+++ b/net/atm/proc.c
104670@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
104671 const struct k_atm_aal_stats *stats)
104672 {
104673 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
104674- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
104675- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
104676- atomic_read(&stats->rx_drop));
104677+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
104678+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
104679+ atomic_read_unchecked(&stats->rx_drop));
104680 }
104681
104682 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
104683diff --git a/net/atm/resources.c b/net/atm/resources.c
104684index 0447d5d..3cf4728 100644
104685--- a/net/atm/resources.c
104686+++ b/net/atm/resources.c
104687@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
104688 static void copy_aal_stats(struct k_atm_aal_stats *from,
104689 struct atm_aal_stats *to)
104690 {
104691-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
104692+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
104693 __AAL_STAT_ITEMS
104694 #undef __HANDLE_ITEM
104695 }
104696@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
104697 static void subtract_aal_stats(struct k_atm_aal_stats *from,
104698 struct atm_aal_stats *to)
104699 {
104700-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
104701+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
104702 __AAL_STAT_ITEMS
104703 #undef __HANDLE_ITEM
104704 }
104705diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
104706index 919a5ce..cc6b444 100644
104707--- a/net/ax25/sysctl_net_ax25.c
104708+++ b/net/ax25/sysctl_net_ax25.c
104709@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
104710 {
104711 char path[sizeof("net/ax25/") + IFNAMSIZ];
104712 int k;
104713- struct ctl_table *table;
104714+ ctl_table_no_const *table;
104715
104716 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
104717 if (!table)
104718diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
104719index f04224c..f326579 100644
104720--- a/net/batman-adv/bat_iv_ogm.c
104721+++ b/net/batman-adv/bat_iv_ogm.c
104722@@ -312,7 +312,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
104723
104724 /* randomize initial seqno to avoid collision */
104725 get_random_bytes(&random_seqno, sizeof(random_seqno));
104726- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
104727+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
104728
104729 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
104730 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
104731@@ -917,9 +917,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
104732 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
104733
104734 /* change sequence number to network order */
104735- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
104736+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
104737 batadv_ogm_packet->seqno = htonl(seqno);
104738- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
104739+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
104740
104741 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
104742
104743@@ -1596,7 +1596,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
104744 return;
104745
104746 /* could be changed by schedule_own_packet() */
104747- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
104748+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
104749
104750 if (ogm_packet->flags & BATADV_DIRECTLINK)
104751 has_directlink_flag = true;
104752diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
104753index 022d18a..919daff 100644
104754--- a/net/batman-adv/fragmentation.c
104755+++ b/net/batman-adv/fragmentation.c
104756@@ -450,7 +450,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
104757 frag_header.packet_type = BATADV_UNICAST_FRAG;
104758 frag_header.version = BATADV_COMPAT_VERSION;
104759 frag_header.ttl = BATADV_TTL;
104760- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
104761+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
104762 frag_header.reserved = 0;
104763 frag_header.no = 0;
104764 frag_header.total_size = htons(skb->len);
104765diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
104766index cbd677f..b783347 100644
104767--- a/net/batman-adv/soft-interface.c
104768+++ b/net/batman-adv/soft-interface.c
104769@@ -296,7 +296,7 @@ send:
104770 primary_if->net_dev->dev_addr);
104771
104772 /* set broadcast sequence number */
104773- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
104774+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
104775 bcast_packet->seqno = htonl(seqno);
104776
104777 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
104778@@ -761,7 +761,7 @@ static int batadv_softif_init_late(struct net_device *dev)
104779 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
104780
104781 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
104782- atomic_set(&bat_priv->bcast_seqno, 1);
104783+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
104784 atomic_set(&bat_priv->tt.vn, 0);
104785 atomic_set(&bat_priv->tt.local_changes, 0);
104786 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
104787@@ -775,7 +775,7 @@ static int batadv_softif_init_late(struct net_device *dev)
104788
104789 /* randomize initial seqno to avoid collision */
104790 get_random_bytes(&random_seqno, sizeof(random_seqno));
104791- atomic_set(&bat_priv->frag_seqno, random_seqno);
104792+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
104793
104794 bat_priv->primary_if = NULL;
104795 bat_priv->num_ifaces = 0;
104796diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
104797index 8854c05..ee5d5497 100644
104798--- a/net/batman-adv/types.h
104799+++ b/net/batman-adv/types.h
104800@@ -67,7 +67,7 @@ enum batadv_dhcp_recipient {
104801 struct batadv_hard_iface_bat_iv {
104802 unsigned char *ogm_buff;
104803 int ogm_buff_len;
104804- atomic_t ogm_seqno;
104805+ atomic_unchecked_t ogm_seqno;
104806 };
104807
104808 /**
104809@@ -768,7 +768,7 @@ struct batadv_priv {
104810 atomic_t bonding;
104811 atomic_t fragmentation;
104812 atomic_t packet_size_max;
104813- atomic_t frag_seqno;
104814+ atomic_unchecked_t frag_seqno;
104815 #ifdef CONFIG_BATMAN_ADV_BLA
104816 atomic_t bridge_loop_avoidance;
104817 #endif
104818@@ -787,7 +787,7 @@ struct batadv_priv {
104819 #endif
104820 uint32_t isolation_mark;
104821 uint32_t isolation_mark_mask;
104822- atomic_t bcast_seqno;
104823+ atomic_unchecked_t bcast_seqno;
104824 atomic_t bcast_queue_left;
104825 atomic_t batman_queue_left;
104826 char num_ifaces;
104827diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
104828index 80d25c1..aa99a98 100644
104829--- a/net/bluetooth/hci_sock.c
104830+++ b/net/bluetooth/hci_sock.c
104831@@ -1044,7 +1044,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
104832 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
104833 }
104834
104835- len = min_t(unsigned int, len, sizeof(uf));
104836+ len = min((size_t)len, sizeof(uf));
104837 if (copy_from_user(&uf, optval, len)) {
104838 err = -EFAULT;
104839 break;
104840diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
104841index 323f23c..5e27529 100644
104842--- a/net/bluetooth/l2cap_core.c
104843+++ b/net/bluetooth/l2cap_core.c
104844@@ -3548,8 +3548,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
104845 break;
104846
104847 case L2CAP_CONF_RFC:
104848- if (olen == sizeof(rfc))
104849- memcpy(&rfc, (void *)val, olen);
104850+ if (olen != sizeof(rfc))
104851+ break;
104852+
104853+ memcpy(&rfc, (void *)val, olen);
104854
104855 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
104856 rfc.mode != chan->mode)
104857diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
104858index e137869..33f3ebd 100644
104859--- a/net/bluetooth/l2cap_sock.c
104860+++ b/net/bluetooth/l2cap_sock.c
104861@@ -628,7 +628,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
104862 struct sock *sk = sock->sk;
104863 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
104864 struct l2cap_options opts;
104865- int len, err = 0;
104866+ int err = 0;
104867+ size_t len = optlen;
104868 u32 opt;
104869
104870 BT_DBG("sk %p", sk);
104871@@ -655,7 +656,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
104872 opts.max_tx = chan->max_tx;
104873 opts.txwin_size = chan->tx_win;
104874
104875- len = min_t(unsigned int, sizeof(opts), optlen);
104876+ len = min(sizeof(opts), len);
104877 if (copy_from_user((char *) &opts, optval, len)) {
104878 err = -EFAULT;
104879 break;
104880@@ -742,7 +743,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
104881 struct bt_security sec;
104882 struct bt_power pwr;
104883 struct l2cap_conn *conn;
104884- int len, err = 0;
104885+ int err = 0;
104886+ size_t len = optlen;
104887 u32 opt;
104888
104889 BT_DBG("sk %p", sk);
104890@@ -766,7 +768,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
104891
104892 sec.level = BT_SECURITY_LOW;
104893
104894- len = min_t(unsigned int, sizeof(sec), optlen);
104895+ len = min(sizeof(sec), len);
104896 if (copy_from_user((char *) &sec, optval, len)) {
104897 err = -EFAULT;
104898 break;
104899@@ -861,7 +863,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
104900
104901 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
104902
104903- len = min_t(unsigned int, sizeof(pwr), optlen);
104904+ len = min(sizeof(pwr), len);
104905 if (copy_from_user((char *) &pwr, optval, len)) {
104906 err = -EFAULT;
104907 break;
104908diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
104909index c603a5e..7f08991 100644
104910--- a/net/bluetooth/rfcomm/sock.c
104911+++ b/net/bluetooth/rfcomm/sock.c
104912@@ -687,7 +687,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
104913 struct sock *sk = sock->sk;
104914 struct bt_security sec;
104915 int err = 0;
104916- size_t len;
104917+ size_t len = optlen;
104918 u32 opt;
104919
104920 BT_DBG("sk %p", sk);
104921@@ -709,7 +709,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
104922
104923 sec.level = BT_SECURITY_LOW;
104924
104925- len = min_t(unsigned int, sizeof(sec), optlen);
104926+ len = min(sizeof(sec), len);
104927 if (copy_from_user((char *) &sec, optval, len)) {
104928 err = -EFAULT;
104929 break;
104930diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
104931index 8e385a0..a5bdd8e 100644
104932--- a/net/bluetooth/rfcomm/tty.c
104933+++ b/net/bluetooth/rfcomm/tty.c
104934@@ -752,7 +752,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
104935 BT_DBG("tty %p id %d", tty, tty->index);
104936
104937 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
104938- dev->channel, dev->port.count);
104939+ dev->channel, atomic_read(&dev->port.count));
104940
104941 err = tty_port_open(&dev->port, tty, filp);
104942 if (err)
104943@@ -775,7 +775,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
104944 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
104945
104946 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
104947- dev->port.count);
104948+ atomic_read(&dev->port.count));
104949
104950 tty_port_close(&dev->port, tty, filp);
104951 }
104952diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
104953index 1059ed3..d70846a 100644
104954--- a/net/bridge/netfilter/ebtables.c
104955+++ b/net/bridge/netfilter/ebtables.c
104956@@ -1524,7 +1524,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
104957 tmp.valid_hooks = t->table->valid_hooks;
104958 }
104959 mutex_unlock(&ebt_mutex);
104960- if (copy_to_user(user, &tmp, *len) != 0) {
104961+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
104962 BUGPRINT("c2u Didn't work\n");
104963 ret = -EFAULT;
104964 break;
104965@@ -2330,7 +2330,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
104966 goto out;
104967 tmp.valid_hooks = t->valid_hooks;
104968
104969- if (copy_to_user(user, &tmp, *len) != 0) {
104970+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
104971 ret = -EFAULT;
104972 break;
104973 }
104974@@ -2341,7 +2341,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
104975 tmp.entries_size = t->table->entries_size;
104976 tmp.valid_hooks = t->table->valid_hooks;
104977
104978- if (copy_to_user(user, &tmp, *len) != 0) {
104979+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
104980 ret = -EFAULT;
104981 break;
104982 }
104983diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
104984index 0f45522..dab651f 100644
104985--- a/net/caif/cfctrl.c
104986+++ b/net/caif/cfctrl.c
104987@@ -10,6 +10,7 @@
104988 #include <linux/spinlock.h>
104989 #include <linux/slab.h>
104990 #include <linux/pkt_sched.h>
104991+#include <linux/sched.h>
104992 #include <net/caif/caif_layer.h>
104993 #include <net/caif/cfpkt.h>
104994 #include <net/caif/cfctrl.h>
104995@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
104996 memset(&dev_info, 0, sizeof(dev_info));
104997 dev_info.id = 0xff;
104998 cfsrvl_init(&this->serv, 0, &dev_info, false);
104999- atomic_set(&this->req_seq_no, 1);
105000- atomic_set(&this->rsp_seq_no, 1);
105001+ atomic_set_unchecked(&this->req_seq_no, 1);
105002+ atomic_set_unchecked(&this->rsp_seq_no, 1);
105003 this->serv.layer.receive = cfctrl_recv;
105004 sprintf(this->serv.layer.name, "ctrl");
105005 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
105006@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
105007 struct cfctrl_request_info *req)
105008 {
105009 spin_lock_bh(&ctrl->info_list_lock);
105010- atomic_inc(&ctrl->req_seq_no);
105011- req->sequence_no = atomic_read(&ctrl->req_seq_no);
105012+ atomic_inc_unchecked(&ctrl->req_seq_no);
105013+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
105014 list_add_tail(&req->list, &ctrl->list);
105015 spin_unlock_bh(&ctrl->info_list_lock);
105016 }
105017@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
105018 if (p != first)
105019 pr_warn("Requests are not received in order\n");
105020
105021- atomic_set(&ctrl->rsp_seq_no,
105022+ atomic_set_unchecked(&ctrl->rsp_seq_no,
105023 p->sequence_no);
105024 list_del(&p->list);
105025 goto out;
105026diff --git a/net/can/af_can.c b/net/can/af_can.c
105027index ce82337..5d17b4d 100644
105028--- a/net/can/af_can.c
105029+++ b/net/can/af_can.c
105030@@ -884,7 +884,7 @@ static const struct net_proto_family can_family_ops = {
105031 };
105032
105033 /* notifier block for netdevice event */
105034-static struct notifier_block can_netdev_notifier __read_mostly = {
105035+static struct notifier_block can_netdev_notifier = {
105036 .notifier_call = can_notifier,
105037 };
105038
105039diff --git a/net/can/bcm.c b/net/can/bcm.c
105040index dcb75c0..24b1b43 100644
105041--- a/net/can/bcm.c
105042+++ b/net/can/bcm.c
105043@@ -1624,7 +1624,7 @@ static int __init bcm_module_init(void)
105044 }
105045
105046 /* create /proc/net/can-bcm directory */
105047- proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
105048+ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
105049 return 0;
105050 }
105051
105052diff --git a/net/can/gw.c b/net/can/gw.c
105053index 050a211..bb9fe33 100644
105054--- a/net/can/gw.c
105055+++ b/net/can/gw.c
105056@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
105057 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
105058
105059 static HLIST_HEAD(cgw_list);
105060-static struct notifier_block notifier;
105061
105062 static struct kmem_cache *cgw_cache __read_mostly;
105063
105064@@ -947,6 +946,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
105065 return err;
105066 }
105067
105068+static struct notifier_block notifier = {
105069+ .notifier_call = cgw_notifier
105070+};
105071+
105072 static __init int cgw_module_init(void)
105073 {
105074 /* sanitize given module parameter */
105075@@ -962,7 +965,6 @@ static __init int cgw_module_init(void)
105076 return -ENOMEM;
105077
105078 /* set notifier */
105079- notifier.notifier_call = cgw_notifier;
105080 register_netdevice_notifier(&notifier);
105081
105082 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
105083diff --git a/net/can/proc.c b/net/can/proc.c
105084index 1a19b98..df2b4ec 100644
105085--- a/net/can/proc.c
105086+++ b/net/can/proc.c
105087@@ -514,7 +514,7 @@ static void can_remove_proc_readentry(const char *name)
105088 void can_init_proc(void)
105089 {
105090 /* create /proc/net/can directory */
105091- can_dir = proc_mkdir("can", init_net.proc_net);
105092+ can_dir = proc_mkdir_restrict("can", init_net.proc_net);
105093
105094 if (!can_dir) {
105095 printk(KERN_INFO "can: failed to create /proc/net/can . "
105096diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
105097index 96238ba..de6662b 100644
105098--- a/net/ceph/auth_x.c
105099+++ b/net/ceph/auth_x.c
105100@@ -13,8 +13,6 @@
105101 #include "auth_x.h"
105102 #include "auth_x_protocol.h"
105103
105104-#define TEMP_TICKET_BUF_LEN 256
105105-
105106 static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed);
105107
105108 static int ceph_x_is_authenticated(struct ceph_auth_client *ac)
105109@@ -64,7 +62,7 @@ static int ceph_x_encrypt(struct ceph_crypto_key *secret,
105110 }
105111
105112 static int ceph_x_decrypt(struct ceph_crypto_key *secret,
105113- void **p, void *end, void *obuf, size_t olen)
105114+ void **p, void *end, void **obuf, size_t olen)
105115 {
105116 struct ceph_x_encrypt_header head;
105117 size_t head_len = sizeof(head);
105118@@ -75,8 +73,14 @@ static int ceph_x_decrypt(struct ceph_crypto_key *secret,
105119 return -EINVAL;
105120
105121 dout("ceph_x_decrypt len %d\n", len);
105122- ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen,
105123- *p, len);
105124+ if (*obuf == NULL) {
105125+ *obuf = kmalloc(len, GFP_NOFS);
105126+ if (!*obuf)
105127+ return -ENOMEM;
105128+ olen = len;
105129+ }
105130+
105131+ ret = ceph_decrypt2(secret, &head, &head_len, *obuf, &olen, *p, len);
105132 if (ret)
105133 return ret;
105134 if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC)
105135@@ -129,145 +133,154 @@ static void remove_ticket_handler(struct ceph_auth_client *ac,
105136 kfree(th);
105137 }
105138
105139+static int process_one_ticket(struct ceph_auth_client *ac,
105140+ struct ceph_crypto_key *secret,
105141+ void **p, void *end)
105142+{
105143+ struct ceph_x_info *xi = ac->private;
105144+ int type;
105145+ u8 tkt_struct_v, blob_struct_v;
105146+ struct ceph_x_ticket_handler *th;
105147+ void *dbuf = NULL;
105148+ void *dp, *dend;
105149+ int dlen;
105150+ char is_enc;
105151+ struct timespec validity;
105152+ struct ceph_crypto_key old_key;
105153+ void *ticket_buf = NULL;
105154+ void *tp, *tpend;
105155+ struct ceph_timespec new_validity;
105156+ struct ceph_crypto_key new_session_key;
105157+ struct ceph_buffer *new_ticket_blob;
105158+ unsigned long new_expires, new_renew_after;
105159+ u64 new_secret_id;
105160+ int ret;
105161+
105162+ ceph_decode_need(p, end, sizeof(u32) + 1, bad);
105163+
105164+ type = ceph_decode_32(p);
105165+ dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
105166+
105167+ tkt_struct_v = ceph_decode_8(p);
105168+ if (tkt_struct_v != 1)
105169+ goto bad;
105170+
105171+ th = get_ticket_handler(ac, type);
105172+ if (IS_ERR(th)) {
105173+ ret = PTR_ERR(th);
105174+ goto out;
105175+ }
105176+
105177+ /* blob for me */
105178+ dlen = ceph_x_decrypt(secret, p, end, &dbuf, 0);
105179+ if (dlen <= 0) {
105180+ ret = dlen;
105181+ goto out;
105182+ }
105183+ dout(" decrypted %d bytes\n", dlen);
105184+ dp = dbuf;
105185+ dend = dp + dlen;
105186+
105187+ tkt_struct_v = ceph_decode_8(&dp);
105188+ if (tkt_struct_v != 1)
105189+ goto bad;
105190+
105191+ memcpy(&old_key, &th->session_key, sizeof(old_key));
105192+ ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
105193+ if (ret)
105194+ goto out;
105195+
105196+ ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
105197+ ceph_decode_timespec(&validity, &new_validity);
105198+ new_expires = get_seconds() + validity.tv_sec;
105199+ new_renew_after = new_expires - (validity.tv_sec / 4);
105200+ dout(" expires=%lu renew_after=%lu\n", new_expires,
105201+ new_renew_after);
105202+
105203+ /* ticket blob for service */
105204+ ceph_decode_8_safe(p, end, is_enc, bad);
105205+ if (is_enc) {
105206+ /* encrypted */
105207+ dout(" encrypted ticket\n");
105208+ dlen = ceph_x_decrypt(&old_key, p, end, &ticket_buf, 0);
105209+ if (dlen < 0) {
105210+ ret = dlen;
105211+ goto out;
105212+ }
105213+ tp = ticket_buf;
105214+ dlen = ceph_decode_32(&tp);
105215+ } else {
105216+ /* unencrypted */
105217+ ceph_decode_32_safe(p, end, dlen, bad);
105218+ ticket_buf = kmalloc(dlen, GFP_NOFS);
105219+ if (!ticket_buf) {
105220+ ret = -ENOMEM;
105221+ goto out;
105222+ }
105223+ tp = ticket_buf;
105224+ ceph_decode_need(p, end, dlen, bad);
105225+ ceph_decode_copy(p, ticket_buf, dlen);
105226+ }
105227+ tpend = tp + dlen;
105228+ dout(" ticket blob is %d bytes\n", dlen);
105229+ ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
105230+ blob_struct_v = ceph_decode_8(&tp);
105231+ new_secret_id = ceph_decode_64(&tp);
105232+ ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
105233+ if (ret)
105234+ goto out;
105235+
105236+ /* all is well, update our ticket */
105237+ ceph_crypto_key_destroy(&th->session_key);
105238+ if (th->ticket_blob)
105239+ ceph_buffer_put(th->ticket_blob);
105240+ th->session_key = new_session_key;
105241+ th->ticket_blob = new_ticket_blob;
105242+ th->validity = new_validity;
105243+ th->secret_id = new_secret_id;
105244+ th->expires = new_expires;
105245+ th->renew_after = new_renew_after;
105246+ dout(" got ticket service %d (%s) secret_id %lld len %d\n",
105247+ type, ceph_entity_type_name(type), th->secret_id,
105248+ (int)th->ticket_blob->vec.iov_len);
105249+ xi->have_keys |= th->service;
105250+
105251+out:
105252+ kfree(ticket_buf);
105253+ kfree(dbuf);
105254+ return ret;
105255+
105256+bad:
105257+ ret = -EINVAL;
105258+ goto out;
105259+}
105260+
105261 static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
105262 struct ceph_crypto_key *secret,
105263 void *buf, void *end)
105264 {
105265- struct ceph_x_info *xi = ac->private;
105266- int num;
105267 void *p = buf;
105268- int ret;
105269- char *dbuf;
105270- char *ticket_buf;
105271 u8 reply_struct_v;
105272+ u32 num;
105273+ int ret;
105274
105275- dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
105276- if (!dbuf)
105277- return -ENOMEM;
105278-
105279- ret = -ENOMEM;
105280- ticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
105281- if (!ticket_buf)
105282- goto out_dbuf;
105283-
105284- ceph_decode_need(&p, end, 1 + sizeof(u32), bad);
105285- reply_struct_v = ceph_decode_8(&p);
105286+ ceph_decode_8_safe(&p, end, reply_struct_v, bad);
105287 if (reply_struct_v != 1)
105288- goto bad;
105289- num = ceph_decode_32(&p);
105290+ return -EINVAL;
105291+
105292+ ceph_decode_32_safe(&p, end, num, bad);
105293 dout("%d tickets\n", num);
105294+
105295 while (num--) {
105296- int type;
105297- u8 tkt_struct_v, blob_struct_v;
105298- struct ceph_x_ticket_handler *th;
105299- void *dp, *dend;
105300- int dlen;
105301- char is_enc;
105302- struct timespec validity;
105303- struct ceph_crypto_key old_key;
105304- void *tp, *tpend;
105305- struct ceph_timespec new_validity;
105306- struct ceph_crypto_key new_session_key;
105307- struct ceph_buffer *new_ticket_blob;
105308- unsigned long new_expires, new_renew_after;
105309- u64 new_secret_id;
105310-
105311- ceph_decode_need(&p, end, sizeof(u32) + 1, bad);
105312-
105313- type = ceph_decode_32(&p);
105314- dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
105315-
105316- tkt_struct_v = ceph_decode_8(&p);
105317- if (tkt_struct_v != 1)
105318- goto bad;
105319-
105320- th = get_ticket_handler(ac, type);
105321- if (IS_ERR(th)) {
105322- ret = PTR_ERR(th);
105323- goto out;
105324- }
105325-
105326- /* blob for me */
105327- dlen = ceph_x_decrypt(secret, &p, end, dbuf,
105328- TEMP_TICKET_BUF_LEN);
105329- if (dlen <= 0) {
105330- ret = dlen;
105331- goto out;
105332- }
105333- dout(" decrypted %d bytes\n", dlen);
105334- dend = dbuf + dlen;
105335- dp = dbuf;
105336-
105337- tkt_struct_v = ceph_decode_8(&dp);
105338- if (tkt_struct_v != 1)
105339- goto bad;
105340-
105341- memcpy(&old_key, &th->session_key, sizeof(old_key));
105342- ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
105343+ ret = process_one_ticket(ac, secret, &p, end);
105344 if (ret)
105345- goto out;
105346-
105347- ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
105348- ceph_decode_timespec(&validity, &new_validity);
105349- new_expires = get_seconds() + validity.tv_sec;
105350- new_renew_after = new_expires - (validity.tv_sec / 4);
105351- dout(" expires=%lu renew_after=%lu\n", new_expires,
105352- new_renew_after);
105353-
105354- /* ticket blob for service */
105355- ceph_decode_8_safe(&p, end, is_enc, bad);
105356- tp = ticket_buf;
105357- if (is_enc) {
105358- /* encrypted */
105359- dout(" encrypted ticket\n");
105360- dlen = ceph_x_decrypt(&old_key, &p, end, ticket_buf,
105361- TEMP_TICKET_BUF_LEN);
105362- if (dlen < 0) {
105363- ret = dlen;
105364- goto out;
105365- }
105366- dlen = ceph_decode_32(&tp);
105367- } else {
105368- /* unencrypted */
105369- ceph_decode_32_safe(&p, end, dlen, bad);
105370- ceph_decode_need(&p, end, dlen, bad);
105371- ceph_decode_copy(&p, ticket_buf, dlen);
105372- }
105373- tpend = tp + dlen;
105374- dout(" ticket blob is %d bytes\n", dlen);
105375- ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
105376- blob_struct_v = ceph_decode_8(&tp);
105377- new_secret_id = ceph_decode_64(&tp);
105378- ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
105379- if (ret)
105380- goto out;
105381-
105382- /* all is well, update our ticket */
105383- ceph_crypto_key_destroy(&th->session_key);
105384- if (th->ticket_blob)
105385- ceph_buffer_put(th->ticket_blob);
105386- th->session_key = new_session_key;
105387- th->ticket_blob = new_ticket_blob;
105388- th->validity = new_validity;
105389- th->secret_id = new_secret_id;
105390- th->expires = new_expires;
105391- th->renew_after = new_renew_after;
105392- dout(" got ticket service %d (%s) secret_id %lld len %d\n",
105393- type, ceph_entity_type_name(type), th->secret_id,
105394- (int)th->ticket_blob->vec.iov_len);
105395- xi->have_keys |= th->service;
105396+ return ret;
105397 }
105398
105399- ret = 0;
105400-out:
105401- kfree(ticket_buf);
105402-out_dbuf:
105403- kfree(dbuf);
105404- return ret;
105405+ return 0;
105406
105407 bad:
105408- ret = -EINVAL;
105409- goto out;
105410+ return -EINVAL;
105411 }
105412
105413 static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
105414@@ -583,13 +596,14 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
105415 struct ceph_x_ticket_handler *th;
105416 int ret = 0;
105417 struct ceph_x_authorize_reply reply;
105418+ void *preply = &reply;
105419 void *p = au->reply_buf;
105420 void *end = p + sizeof(au->reply_buf);
105421
105422 th = get_ticket_handler(ac, au->service);
105423 if (IS_ERR(th))
105424 return PTR_ERR(th);
105425- ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply));
105426+ ret = ceph_x_decrypt(&th->session_key, &p, end, &preply, sizeof(reply));
105427 if (ret < 0)
105428 return ret;
105429 if (ret != sizeof(reply))
105430diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
105431index 1948d59..9e854d5 100644
105432--- a/net/ceph/messenger.c
105433+++ b/net/ceph/messenger.c
105434@@ -187,7 +187,7 @@ static void con_fault(struct ceph_connection *con);
105435 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
105436
105437 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
105438-static atomic_t addr_str_seq = ATOMIC_INIT(0);
105439+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
105440
105441 static struct page *zero_page; /* used in certain error cases */
105442
105443@@ -198,7 +198,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
105444 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
105445 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
105446
105447- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
105448+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
105449 s = addr_str[i];
105450
105451 switch (ss->ss_family) {
105452diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
105453index 067d3af..61fcfc3 100644
105454--- a/net/ceph/mon_client.c
105455+++ b/net/ceph/mon_client.c
105456@@ -1181,7 +1181,15 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
105457 if (!m) {
105458 pr_info("alloc_msg unknown type %d\n", type);
105459 *skip = 1;
105460+ } else if (front_len > m->front_alloc_len) {
105461+ pr_warning("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n",
105462+ front_len, m->front_alloc_len,
105463+ (unsigned int)con->peer_name.type,
105464+ le64_to_cpu(con->peer_name.num));
105465+ ceph_msg_put(m);
105466+ m = ceph_msg_new(type, front_len, GFP_NOFS, false);
105467 }
105468+
105469 return m;
105470 }
105471
105472diff --git a/net/compat.c b/net/compat.c
105473index bc8aeef..f9c070c 100644
105474--- a/net/compat.c
105475+++ b/net/compat.c
105476@@ -73,9 +73,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
105477 return -EFAULT;
105478 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
105479 kmsg->msg_namelen = sizeof(struct sockaddr_storage);
105480- kmsg->msg_name = compat_ptr(tmp1);
105481- kmsg->msg_iov = compat_ptr(tmp2);
105482- kmsg->msg_control = compat_ptr(tmp3);
105483+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
105484+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
105485+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
105486 return 0;
105487 }
105488
105489@@ -87,7 +87,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
105490
105491 if (kern_msg->msg_name && kern_msg->msg_namelen) {
105492 if (mode == VERIFY_READ) {
105493- int err = move_addr_to_kernel(kern_msg->msg_name,
105494+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
105495 kern_msg->msg_namelen,
105496 kern_address);
105497 if (err < 0)
105498@@ -100,7 +100,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
105499 }
105500
105501 tot_len = iov_from_user_compat_to_kern(kern_iov,
105502- (struct compat_iovec __user *)kern_msg->msg_iov,
105503+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
105504 kern_msg->msg_iovlen);
105505 if (tot_len >= 0)
105506 kern_msg->msg_iov = kern_iov;
105507@@ -120,20 +120,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
105508
105509 #define CMSG_COMPAT_FIRSTHDR(msg) \
105510 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
105511- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
105512+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
105513 (struct compat_cmsghdr __user *)NULL)
105514
105515 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
105516 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
105517 (ucmlen) <= (unsigned long) \
105518 ((mhdr)->msg_controllen - \
105519- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
105520+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
105521
105522 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
105523 struct compat_cmsghdr __user *cmsg, int cmsg_len)
105524 {
105525 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
105526- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
105527+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
105528 msg->msg_controllen)
105529 return NULL;
105530 return (struct compat_cmsghdr __user *)ptr;
105531@@ -223,7 +223,7 @@ Efault:
105532
105533 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
105534 {
105535- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
105536+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
105537 struct compat_cmsghdr cmhdr;
105538 struct compat_timeval ctv;
105539 struct compat_timespec cts[3];
105540@@ -279,7 +279,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
105541
105542 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
105543 {
105544- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
105545+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
105546 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
105547 int fdnum = scm->fp->count;
105548 struct file **fp = scm->fp->fp;
105549@@ -367,7 +367,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
105550 return -EFAULT;
105551 old_fs = get_fs();
105552 set_fs(KERNEL_DS);
105553- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
105554+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
105555 set_fs(old_fs);
105556
105557 return err;
105558@@ -428,7 +428,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
105559 len = sizeof(ktime);
105560 old_fs = get_fs();
105561 set_fs(KERNEL_DS);
105562- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
105563+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
105564 set_fs(old_fs);
105565
105566 if (!err) {
105567@@ -571,7 +571,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
105568 case MCAST_JOIN_GROUP:
105569 case MCAST_LEAVE_GROUP:
105570 {
105571- struct compat_group_req __user *gr32 = (void *)optval;
105572+ struct compat_group_req __user *gr32 = (void __user *)optval;
105573 struct group_req __user *kgr =
105574 compat_alloc_user_space(sizeof(struct group_req));
105575 u32 interface;
105576@@ -592,7 +592,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
105577 case MCAST_BLOCK_SOURCE:
105578 case MCAST_UNBLOCK_SOURCE:
105579 {
105580- struct compat_group_source_req __user *gsr32 = (void *)optval;
105581+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
105582 struct group_source_req __user *kgsr = compat_alloc_user_space(
105583 sizeof(struct group_source_req));
105584 u32 interface;
105585@@ -613,7 +613,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
105586 }
105587 case MCAST_MSFILTER:
105588 {
105589- struct compat_group_filter __user *gf32 = (void *)optval;
105590+ struct compat_group_filter __user *gf32 = (void __user *)optval;
105591 struct group_filter __user *kgf;
105592 u32 interface, fmode, numsrc;
105593
105594@@ -651,7 +651,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
105595 char __user *optval, int __user *optlen,
105596 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
105597 {
105598- struct compat_group_filter __user *gf32 = (void *)optval;
105599+ struct compat_group_filter __user *gf32 = (void __user *)optval;
105600 struct group_filter __user *kgf;
105601 int __user *koptlen;
105602 u32 interface, fmode, numsrc;
105603@@ -804,7 +804,7 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
105604
105605 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
105606 return -EINVAL;
105607- if (copy_from_user(a, args, nas[call]))
105608+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
105609 return -EFAULT;
105610 a0 = a[0];
105611 a1 = a[1];
105612diff --git a/net/core/Makefile b/net/core/Makefile
105613index 71093d9..a8a035b 100644
105614--- a/net/core/Makefile
105615+++ b/net/core/Makefile
105616@@ -21,6 +21,5 @@ obj-$(CONFIG_FIB_RULES) += fib_rules.o
105617 obj-$(CONFIG_TRACEPOINTS) += net-traces.o
105618 obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o
105619 obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += timestamping.o
105620-obj-$(CONFIG_NET_PTP_CLASSIFY) += ptp_classifier.o
105621 obj-$(CONFIG_CGROUP_NET_PRIO) += netprio_cgroup.o
105622 obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o
105623diff --git a/net/core/datagram.c b/net/core/datagram.c
105624index 488dd1a..7179f0f 100644
105625--- a/net/core/datagram.c
105626+++ b/net/core/datagram.c
105627@@ -301,7 +301,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
105628 }
105629
105630 kfree_skb(skb);
105631- atomic_inc(&sk->sk_drops);
105632+ atomic_inc_unchecked(&sk->sk_drops);
105633 sk_mem_reclaim_partial(sk);
105634
105635 return err;
105636diff --git a/net/core/dev.c b/net/core/dev.c
105637index 367a586..ef2fe17 100644
105638--- a/net/core/dev.c
105639+++ b/net/core/dev.c
105640@@ -1672,14 +1672,14 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
105641 {
105642 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
105643 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
105644- atomic_long_inc(&dev->rx_dropped);
105645+ atomic_long_inc_unchecked(&dev->rx_dropped);
105646 kfree_skb(skb);
105647 return NET_RX_DROP;
105648 }
105649 }
105650
105651 if (unlikely(!is_skb_forwardable(dev, skb))) {
105652- atomic_long_inc(&dev->rx_dropped);
105653+ atomic_long_inc_unchecked(&dev->rx_dropped);
105654 kfree_skb(skb);
105655 return NET_RX_DROP;
105656 }
105657@@ -2476,7 +2476,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
105658
105659 struct dev_gso_cb {
105660 void (*destructor)(struct sk_buff *skb);
105661-};
105662+} __no_const;
105663
105664 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
105665
105666@@ -2932,7 +2932,7 @@ recursion_alert:
105667 rc = -ENETDOWN;
105668 rcu_read_unlock_bh();
105669
105670- atomic_long_inc(&dev->tx_dropped);
105671+ atomic_long_inc_unchecked(&dev->tx_dropped);
105672 kfree_skb(skb);
105673 return rc;
105674 out:
105675@@ -3276,7 +3276,7 @@ enqueue:
105676
105677 local_irq_restore(flags);
105678
105679- atomic_long_inc(&skb->dev->rx_dropped);
105680+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
105681 kfree_skb(skb);
105682 return NET_RX_DROP;
105683 }
105684@@ -3353,7 +3353,7 @@ int netif_rx_ni(struct sk_buff *skb)
105685 }
105686 EXPORT_SYMBOL(netif_rx_ni);
105687
105688-static void net_tx_action(struct softirq_action *h)
105689+static __latent_entropy void net_tx_action(void)
105690 {
105691 struct softnet_data *sd = &__get_cpu_var(softnet_data);
105692
105693@@ -3686,7 +3686,7 @@ ncls:
105694 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
105695 } else {
105696 drop:
105697- atomic_long_inc(&skb->dev->rx_dropped);
105698+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
105699 kfree_skb(skb);
105700 /* Jamal, now you will not able to escape explaining
105701 * me how you were going to use this. :-)
105702@@ -4406,7 +4406,7 @@ void netif_napi_del(struct napi_struct *napi)
105703 }
105704 EXPORT_SYMBOL(netif_napi_del);
105705
105706-static void net_rx_action(struct softirq_action *h)
105707+static __latent_entropy void net_rx_action(void)
105708 {
105709 struct softnet_data *sd = &__get_cpu_var(softnet_data);
105710 unsigned long time_limit = jiffies + 2;
105711@@ -6403,8 +6403,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
105712 } else {
105713 netdev_stats_to_stats64(storage, &dev->stats);
105714 }
105715- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
105716- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
105717+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
105718+ storage->tx_dropped += atomic_long_read_unchecked(&dev->tx_dropped);
105719 return storage;
105720 }
105721 EXPORT_SYMBOL(dev_get_stats);
105722diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
105723index cf999e0..c59a975 100644
105724--- a/net/core/dev_ioctl.c
105725+++ b/net/core/dev_ioctl.c
105726@@ -366,9 +366,13 @@ void dev_load(struct net *net, const char *name)
105727 if (no_module && capable(CAP_NET_ADMIN))
105728 no_module = request_module("netdev-%s", name);
105729 if (no_module && capable(CAP_SYS_MODULE)) {
105730+#ifdef CONFIG_GRKERNSEC_MODHARDEN
105731+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
105732+#else
105733 if (!request_module("%s", name))
105734 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
105735 name);
105736+#endif
105737 }
105738 }
105739 EXPORT_SYMBOL(dev_load);
105740diff --git a/net/core/filter.c b/net/core/filter.c
105741index 1dbf646..0f95703 100644
105742--- a/net/core/filter.c
105743+++ b/net/core/filter.c
105744@@ -1,16 +1,11 @@
105745 /*
105746 * Linux Socket Filter - Kernel level socket filtering
105747 *
105748- * Based on the design of the Berkeley Packet Filter. The new
105749- * internal format has been designed by PLUMgrid:
105750+ * Author:
105751+ * Jay Schulist <jschlst@samba.org>
105752 *
105753- * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
105754- *
105755- * Authors:
105756- *
105757- * Jay Schulist <jschlst@samba.org>
105758- * Alexei Starovoitov <ast@plumgrid.com>
105759- * Daniel Borkmann <dborkman@redhat.com>
105760+ * Based on the design of:
105761+ * - The Berkeley Packet Filter
105762 *
105763 * This program is free software; you can redistribute it and/or
105764 * modify it under the terms of the GNU General Public License
105765@@ -45,27 +40,6 @@
105766 #include <linux/seccomp.h>
105767 #include <linux/if_vlan.h>
105768
105769-/* Registers */
105770-#define BPF_R0 regs[BPF_REG_0]
105771-#define BPF_R1 regs[BPF_REG_1]
105772-#define BPF_R2 regs[BPF_REG_2]
105773-#define BPF_R3 regs[BPF_REG_3]
105774-#define BPF_R4 regs[BPF_REG_4]
105775-#define BPF_R5 regs[BPF_REG_5]
105776-#define BPF_R6 regs[BPF_REG_6]
105777-#define BPF_R7 regs[BPF_REG_7]
105778-#define BPF_R8 regs[BPF_REG_8]
105779-#define BPF_R9 regs[BPF_REG_9]
105780-#define BPF_R10 regs[BPF_REG_10]
105781-
105782-/* Named registers */
105783-#define DST regs[insn->dst_reg]
105784-#define SRC regs[insn->src_reg]
105785-#define FP regs[BPF_REG_FP]
105786-#define ARG1 regs[BPF_REG_ARG1]
105787-#define CTX regs[BPF_REG_CTX]
105788-#define IMM insn->imm
105789-
105790 /* No hurry in this branch
105791 *
105792 * Exported for the bpf jit load helper.
105793@@ -78,9 +52,9 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
105794 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
105795 else if (k >= SKF_LL_OFF)
105796 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
105797+
105798 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
105799 return ptr;
105800-
105801 return NULL;
105802 }
105803
105804@@ -89,7 +63,6 @@ static inline void *load_pointer(const struct sk_buff *skb, int k,
105805 {
105806 if (k >= 0)
105807 return skb_header_pointer(skb, k, size, buffer);
105808-
105809 return bpf_internal_load_pointer_neg_helper(skb, k, size);
105810 }
105811
105812@@ -135,960 +108,309 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
105813 }
105814 EXPORT_SYMBOL(sk_filter);
105815
105816-/* Base function for offset calculation. Needs to go into .text section,
105817- * therefore keeping it non-static as well; will also be used by JITs
105818- * anyway later on, so do not let the compiler omit it.
105819- */
105820-noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
105821-{
105822- return 0;
105823-}
105824-
105825 /**
105826- * __sk_run_filter - run a filter on a given context
105827- * @ctx: buffer to run the filter on
105828- * @insn: filter to apply
105829+ * sk_run_filter - run a filter on a socket
105830+ * @skb: buffer to run the filter on
105831+ * @fentry: filter to apply
105832 *
105833- * Decode and apply filter instructions to the skb->data. Return length to
105834- * keep, 0 for none. @ctx is the data we are operating on, @insn is the
105835- * array of filter instructions.
105836+ * Decode and apply filter instructions to the skb->data.
105837+ * Return length to keep, 0 for none. @skb is the data we are
105838+ * filtering, @filter is the array of filter instructions.
105839+ * Because all jumps are guaranteed to be before last instruction,
105840+ * and last instruction guaranteed to be a RET, we dont need to check
105841+ * flen. (We used to pass to this function the length of filter)
105842 */
105843-static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
105844+unsigned int sk_run_filter(const struct sk_buff *skb,
105845+ const struct sock_filter *fentry)
105846 {
105847- u64 stack[MAX_BPF_STACK / sizeof(u64)];
105848- u64 regs[MAX_BPF_REG], tmp;
105849- static const void *jumptable[256] = {
105850- [0 ... 255] = &&default_label,
105851- /* Now overwrite non-defaults ... */
105852- /* 32 bit ALU operations */
105853- [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
105854- [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
105855- [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
105856- [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
105857- [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
105858- [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
105859- [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
105860- [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
105861- [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
105862- [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
105863- [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
105864- [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
105865- [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
105866- [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
105867- [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
105868- [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
105869- [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
105870- [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
105871- [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
105872- [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
105873- [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
105874- [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
105875- [BPF_ALU | BPF_NEG] = &&ALU_NEG,
105876- [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
105877- [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
105878- /* 64 bit ALU operations */
105879- [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
105880- [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
105881- [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
105882- [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
105883- [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
105884- [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
105885- [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
105886- [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
105887- [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
105888- [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
105889- [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
105890- [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
105891- [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
105892- [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
105893- [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
105894- [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
105895- [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
105896- [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
105897- [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
105898- [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
105899- [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
105900- [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
105901- [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
105902- [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
105903- [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
105904- /* Call instruction */
105905- [BPF_JMP | BPF_CALL] = &&JMP_CALL,
105906- /* Jumps */
105907- [BPF_JMP | BPF_JA] = &&JMP_JA,
105908- [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
105909- [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
105910- [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
105911- [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
105912- [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
105913- [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
105914- [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
105915- [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
105916- [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
105917- [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
105918- [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
105919- [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
105920- [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
105921- [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
105922- /* Program return */
105923- [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
105924- /* Store instructions */
105925- [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
105926- [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
105927- [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
105928- [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
105929- [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
105930- [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
105931- [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
105932- [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
105933- [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
105934- [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
105935- /* Load instructions */
105936- [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
105937- [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
105938- [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
105939- [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
105940- [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
105941- [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
105942- [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
105943- [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
105944- [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
105945- [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
105946- };
105947 void *ptr;
105948- int off;
105949-
105950-#define CONT ({ insn++; goto select_insn; })
105951-#define CONT_JMP ({ insn++; goto select_insn; })
105952-
105953- FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
105954- ARG1 = (u64) (unsigned long) ctx;
105955-
105956- /* Registers used in classic BPF programs need to be reset first. */
105957- regs[BPF_REG_A] = 0;
105958- regs[BPF_REG_X] = 0;
105959-
105960-select_insn:
105961- goto *jumptable[insn->code];
105962-
105963- /* ALU */
105964-#define ALU(OPCODE, OP) \
105965- ALU64_##OPCODE##_X: \
105966- DST = DST OP SRC; \
105967- CONT; \
105968- ALU_##OPCODE##_X: \
105969- DST = (u32) DST OP (u32) SRC; \
105970- CONT; \
105971- ALU64_##OPCODE##_K: \
105972- DST = DST OP IMM; \
105973- CONT; \
105974- ALU_##OPCODE##_K: \
105975- DST = (u32) DST OP (u32) IMM; \
105976- CONT;
105977-
105978- ALU(ADD, +)
105979- ALU(SUB, -)
105980- ALU(AND, &)
105981- ALU(OR, |)
105982- ALU(LSH, <<)
105983- ALU(RSH, >>)
105984- ALU(XOR, ^)
105985- ALU(MUL, *)
105986-#undef ALU
105987- ALU_NEG:
105988- DST = (u32) -DST;
105989- CONT;
105990- ALU64_NEG:
105991- DST = -DST;
105992- CONT;
105993- ALU_MOV_X:
105994- DST = (u32) SRC;
105995- CONT;
105996- ALU_MOV_K:
105997- DST = (u32) IMM;
105998- CONT;
105999- ALU64_MOV_X:
106000- DST = SRC;
106001- CONT;
106002- ALU64_MOV_K:
106003- DST = IMM;
106004- CONT;
106005- ALU64_ARSH_X:
106006- (*(s64 *) &DST) >>= SRC;
106007- CONT;
106008- ALU64_ARSH_K:
106009- (*(s64 *) &DST) >>= IMM;
106010- CONT;
106011- ALU64_MOD_X:
106012- if (unlikely(SRC == 0))
106013- return 0;
106014- tmp = DST;
106015- DST = do_div(tmp, SRC);
106016- CONT;
106017- ALU_MOD_X:
106018- if (unlikely(SRC == 0))
106019- return 0;
106020- tmp = (u32) DST;
106021- DST = do_div(tmp, (u32) SRC);
106022- CONT;
106023- ALU64_MOD_K:
106024- tmp = DST;
106025- DST = do_div(tmp, IMM);
106026- CONT;
106027- ALU_MOD_K:
106028- tmp = (u32) DST;
106029- DST = do_div(tmp, (u32) IMM);
106030- CONT;
106031- ALU64_DIV_X:
106032- if (unlikely(SRC == 0))
106033- return 0;
106034- do_div(DST, SRC);
106035- CONT;
106036- ALU_DIV_X:
106037- if (unlikely(SRC == 0))
106038- return 0;
106039- tmp = (u32) DST;
106040- do_div(tmp, (u32) SRC);
106041- DST = (u32) tmp;
106042- CONT;
106043- ALU64_DIV_K:
106044- do_div(DST, IMM);
106045- CONT;
106046- ALU_DIV_K:
106047- tmp = (u32) DST;
106048- do_div(tmp, (u32) IMM);
106049- DST = (u32) tmp;
106050- CONT;
106051- ALU_END_TO_BE:
106052- switch (IMM) {
106053- case 16:
106054- DST = (__force u16) cpu_to_be16(DST);
106055- break;
106056- case 32:
106057- DST = (__force u32) cpu_to_be32(DST);
106058- break;
106059- case 64:
106060- DST = (__force u64) cpu_to_be64(DST);
106061- break;
106062- }
106063- CONT;
106064- ALU_END_TO_LE:
106065- switch (IMM) {
106066- case 16:
106067- DST = (__force u16) cpu_to_le16(DST);
106068- break;
106069- case 32:
106070- DST = (__force u32) cpu_to_le32(DST);
106071- break;
106072- case 64:
106073- DST = (__force u64) cpu_to_le64(DST);
106074- break;
106075- }
106076- CONT;
106077-
106078- /* CALL */
106079- JMP_CALL:
106080- /* Function call scratches BPF_R1-BPF_R5 registers,
106081- * preserves BPF_R6-BPF_R9, and stores return value
106082- * into BPF_R0.
106083- */
106084- BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
106085- BPF_R4, BPF_R5);
106086- CONT;
106087-
106088- /* JMP */
106089- JMP_JA:
106090- insn += insn->off;
106091- CONT;
106092- JMP_JEQ_X:
106093- if (DST == SRC) {
106094- insn += insn->off;
106095- CONT_JMP;
106096- }
106097- CONT;
106098- JMP_JEQ_K:
106099- if (DST == IMM) {
106100- insn += insn->off;
106101- CONT_JMP;
106102- }
106103- CONT;
106104- JMP_JNE_X:
106105- if (DST != SRC) {
106106- insn += insn->off;
106107- CONT_JMP;
106108- }
106109- CONT;
106110- JMP_JNE_K:
106111- if (DST != IMM) {
106112- insn += insn->off;
106113- CONT_JMP;
106114- }
106115- CONT;
106116- JMP_JGT_X:
106117- if (DST > SRC) {
106118- insn += insn->off;
106119- CONT_JMP;
106120- }
106121- CONT;
106122- JMP_JGT_K:
106123- if (DST > IMM) {
106124- insn += insn->off;
106125- CONT_JMP;
106126- }
106127- CONT;
106128- JMP_JGE_X:
106129- if (DST >= SRC) {
106130- insn += insn->off;
106131- CONT_JMP;
106132- }
106133- CONT;
106134- JMP_JGE_K:
106135- if (DST >= IMM) {
106136- insn += insn->off;
106137- CONT_JMP;
106138- }
106139- CONT;
106140- JMP_JSGT_X:
106141- if (((s64) DST) > ((s64) SRC)) {
106142- insn += insn->off;
106143- CONT_JMP;
106144- }
106145- CONT;
106146- JMP_JSGT_K:
106147- if (((s64) DST) > ((s64) IMM)) {
106148- insn += insn->off;
106149- CONT_JMP;
106150- }
106151- CONT;
106152- JMP_JSGE_X:
106153- if (((s64) DST) >= ((s64) SRC)) {
106154- insn += insn->off;
106155- CONT_JMP;
106156- }
106157- CONT;
106158- JMP_JSGE_K:
106159- if (((s64) DST) >= ((s64) IMM)) {
106160- insn += insn->off;
106161- CONT_JMP;
106162- }
106163- CONT;
106164- JMP_JSET_X:
106165- if (DST & SRC) {
106166- insn += insn->off;
106167- CONT_JMP;
106168- }
106169- CONT;
106170- JMP_JSET_K:
106171- if (DST & IMM) {
106172- insn += insn->off;
106173- CONT_JMP;
106174- }
106175- CONT;
106176- JMP_EXIT:
106177- return BPF_R0;
106178-
106179- /* STX and ST and LDX*/
106180-#define LDST(SIZEOP, SIZE) \
106181- STX_MEM_##SIZEOP: \
106182- *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
106183- CONT; \
106184- ST_MEM_##SIZEOP: \
106185- *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
106186- CONT; \
106187- LDX_MEM_##SIZEOP: \
106188- DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
106189- CONT;
106190-
106191- LDST(B, u8)
106192- LDST(H, u16)
106193- LDST(W, u32)
106194- LDST(DW, u64)
106195-#undef LDST
106196- STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
106197- atomic_add((u32) SRC, (atomic_t *)(unsigned long)
106198- (DST + insn->off));
106199- CONT;
106200- STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
106201- atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
106202- (DST + insn->off));
106203- CONT;
106204- LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
106205- off = IMM;
106206-load_word:
106207- /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
106208- * only appearing in the programs where ctx ==
106209- * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
106210- * == BPF_R6, sk_convert_filter() saves it in BPF_R6,
106211- * internal BPF verifier will check that BPF_R6 ==
106212- * ctx.
106213- *
106214- * BPF_ABS and BPF_IND are wrappers of function calls,
106215- * so they scratch BPF_R1-BPF_R5 registers, preserve
106216- * BPF_R6-BPF_R9, and store return value into BPF_R0.
106217- *
106218- * Implicit input:
106219- * ctx == skb == BPF_R6 == CTX
106220- *
106221- * Explicit input:
106222- * SRC == any register
106223- * IMM == 32-bit immediate
106224- *
106225- * Output:
106226- * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
106227- */
106228-
106229- ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
106230- if (likely(ptr != NULL)) {
106231- BPF_R0 = get_unaligned_be32(ptr);
106232- CONT;
106233- }
106234-
106235- return 0;
106236- LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
106237- off = IMM;
106238-load_half:
106239- ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
106240- if (likely(ptr != NULL)) {
106241- BPF_R0 = get_unaligned_be16(ptr);
106242- CONT;
106243- }
106244-
106245- return 0;
106246- LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
106247- off = IMM;
106248-load_byte:
106249- ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
106250- if (likely(ptr != NULL)) {
106251- BPF_R0 = *(u8 *)ptr;
106252- CONT;
106253- }
106254-
106255- return 0;
106256- LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
106257- off = IMM + SRC;
106258- goto load_word;
106259- LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
106260- off = IMM + SRC;
106261- goto load_half;
106262- LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
106263- off = IMM + SRC;
106264- goto load_byte;
106265-
106266- default_label:
106267- /* If we ever reach this, we have a bug somewhere. */
106268- WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
106269- return 0;
106270-}
106271-
106272-/* Helper to find the offset of pkt_type in sk_buff structure. We want
106273- * to make sure its still a 3bit field starting at a byte boundary;
106274- * taken from arch/x86/net/bpf_jit_comp.c.
106275- */
106276-#ifdef __BIG_ENDIAN_BITFIELD
106277-#define PKT_TYPE_MAX (7 << 5)
106278+ u32 A = 0; /* Accumulator */
106279+ u32 X = 0; /* Index Register */
106280+ u32 mem[BPF_MEMWORDS] = {}; /* Scratch Memory Store */
106281+ u32 tmp;
106282+ int k;
106283+
106284+ /*
106285+ * Process array of filter instructions.
106286+ */
106287+ for (;; fentry++) {
106288+#if defined(CONFIG_X86_32)
106289+#define K (fentry->k)
106290 #else
106291-#define PKT_TYPE_MAX 7
106292+ const u32 K = fentry->k;
106293 #endif
106294-static unsigned int pkt_type_offset(void)
106295-{
106296- struct sk_buff skb_probe = { .pkt_type = ~0, };
106297- u8 *ct = (u8 *) &skb_probe;
106298- unsigned int off;
106299
106300- for (off = 0; off < sizeof(struct sk_buff); off++) {
106301- if (ct[off] == PKT_TYPE_MAX)
106302- return off;
106303- }
106304-
106305- pr_err_once("Please fix %s, as pkt_type couldn't be found!\n", __func__);
106306- return -1;
106307-}
106308-
106309-static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
106310-{
106311- return __skb_get_poff((struct sk_buff *)(unsigned long) ctx);
106312-}
106313-
106314-static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
106315-{
106316- struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
106317- struct nlattr *nla;
106318-
106319- if (skb_is_nonlinear(skb))
106320- return 0;
106321-
106322- if (skb->len < sizeof(struct nlattr))
106323- return 0;
106324-
106325- if (a > skb->len - sizeof(struct nlattr))
106326- return 0;
106327-
106328- nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
106329- if (nla)
106330- return (void *) nla - (void *) skb->data;
106331-
106332- return 0;
106333-}
106334-
106335-static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
106336-{
106337- struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
106338- struct nlattr *nla;
106339-
106340- if (skb_is_nonlinear(skb))
106341- return 0;
106342-
106343- if (skb->len < sizeof(struct nlattr))
106344- return 0;
106345-
106346- if (a > skb->len - sizeof(struct nlattr))
106347- return 0;
106348-
106349- nla = (struct nlattr *) &skb->data[a];
106350- if (nla->nla_len > skb->len - a)
106351- return 0;
106352-
106353- nla = nla_find_nested(nla, x);
106354- if (nla)
106355- return (void *) nla - (void *) skb->data;
106356-
106357- return 0;
106358-}
106359-
106360-static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
106361-{
106362- return raw_smp_processor_id();
106363-}
106364-
106365-/* note that this only generates 32-bit random numbers */
106366-static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
106367-{
106368- return prandom_u32();
106369-}
106370-
106371-static bool convert_bpf_extensions(struct sock_filter *fp,
106372- struct sock_filter_int **insnp)
106373-{
106374- struct sock_filter_int *insn = *insnp;
106375-
106376- switch (fp->k) {
106377- case SKF_AD_OFF + SKF_AD_PROTOCOL:
106378- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
106379-
106380- /* A = *(u16 *) (CTX + offsetof(protocol)) */
106381- *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
106382- offsetof(struct sk_buff, protocol));
106383- /* A = ntohs(A) [emitting a nop or swap16] */
106384- *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
106385- break;
106386-
106387- case SKF_AD_OFF + SKF_AD_PKTTYPE:
106388- *insn = BPF_LDX_MEM(BPF_B, BPF_REG_A, BPF_REG_CTX,
106389- pkt_type_offset());
106390- if (insn->off < 0)
106391- return false;
106392- insn++;
106393- *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX);
106394-#ifdef __BIG_ENDIAN_BITFIELD
106395- insn++;
106396- *insn = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 5);
106397-#endif
106398- break;
106399-
106400- case SKF_AD_OFF + SKF_AD_IFINDEX:
106401- case SKF_AD_OFF + SKF_AD_HATYPE:
106402- BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
106403- BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
106404- BUILD_BUG_ON(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)) < 0);
106405-
106406- *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
106407- BPF_REG_TMP, BPF_REG_CTX,
106408- offsetof(struct sk_buff, dev));
106409- /* if (tmp != 0) goto pc + 1 */
106410- *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
106411- *insn++ = BPF_EXIT_INSN();
106412- if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
106413- *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
106414- offsetof(struct net_device, ifindex));
106415- else
106416- *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
106417- offsetof(struct net_device, type));
106418- break;
106419-
106420- case SKF_AD_OFF + SKF_AD_MARK:
106421- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
106422-
106423- *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
106424- offsetof(struct sk_buff, mark));
106425- break;
106426-
106427- case SKF_AD_OFF + SKF_AD_RXHASH:
106428- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
106429-
106430- *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
106431- offsetof(struct sk_buff, hash));
106432- break;
106433-
106434- case SKF_AD_OFF + SKF_AD_QUEUE:
106435- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
106436-
106437- *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
106438- offsetof(struct sk_buff, queue_mapping));
106439- break;
106440-
106441- case SKF_AD_OFF + SKF_AD_VLAN_TAG:
106442- case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
106443- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
106444- BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
106445-
106446- /* A = *(u16 *) (CTX + offsetof(vlan_tci)) */
106447- *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
106448- offsetof(struct sk_buff, vlan_tci));
106449- if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) {
106450- *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A,
106451- ~VLAN_TAG_PRESENT);
106452- } else {
106453- /* A >>= 12 */
106454- *insn++ = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12);
106455- /* A &= 1 */
106456- *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1);
106457- }
106458- break;
106459-
106460- case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
106461- case SKF_AD_OFF + SKF_AD_NLATTR:
106462- case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
106463- case SKF_AD_OFF + SKF_AD_CPU:
106464- case SKF_AD_OFF + SKF_AD_RANDOM:
106465- /* arg1 = CTX */
106466- *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
106467- /* arg2 = A */
106468- *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
106469- /* arg3 = X */
106470- *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
106471- /* Emit call(arg1=CTX, arg2=A, arg3=X) */
106472- switch (fp->k) {
106473- case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
106474- *insn = BPF_EMIT_CALL(__skb_get_pay_offset);
106475- break;
106476- case SKF_AD_OFF + SKF_AD_NLATTR:
106477- *insn = BPF_EMIT_CALL(__skb_get_nlattr);
106478- break;
106479- case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
106480- *insn = BPF_EMIT_CALL(__skb_get_nlattr_nest);
106481- break;
106482- case SKF_AD_OFF + SKF_AD_CPU:
106483- *insn = BPF_EMIT_CALL(__get_raw_cpu_id);
106484- break;
106485- case SKF_AD_OFF + SKF_AD_RANDOM:
106486- *insn = BPF_EMIT_CALL(__get_random_u32);
106487- break;
106488- }
106489- break;
106490-
106491- case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
106492- /* A ^= X */
106493- *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
106494- break;
106495-
106496- default:
106497- /* This is just a dummy call to avoid letting the compiler
106498- * evict __bpf_call_base() as an optimization. Placed here
106499- * where no-one bothers.
106500- */
106501- BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
106502- return false;
106503- }
106504-
106505- *insnp = insn;
106506- return true;
106507-}
106508-
106509-/**
106510- * sk_convert_filter - convert filter program
106511- * @prog: the user passed filter program
106512- * @len: the length of the user passed filter program
106513- * @new_prog: buffer where converted program will be stored
106514- * @new_len: pointer to store length of converted program
106515- *
106516- * Remap 'sock_filter' style BPF instruction set to 'sock_filter_ext' style.
106517- * Conversion workflow:
106518- *
106519- * 1) First pass for calculating the new program length:
106520- * sk_convert_filter(old_prog, old_len, NULL, &new_len)
106521- *
106522- * 2) 2nd pass to remap in two passes: 1st pass finds new
106523- * jump offsets, 2nd pass remapping:
106524- * new_prog = kmalloc(sizeof(struct sock_filter_int) * new_len);
106525- * sk_convert_filter(old_prog, old_len, new_prog, &new_len);
106526- *
106527- * User BPF's register A is mapped to our BPF register 6, user BPF
106528- * register X is mapped to BPF register 7; frame pointer is always
106529- * register 10; Context 'void *ctx' is stored in register 1, that is,
106530- * for socket filters: ctx == 'struct sk_buff *', for seccomp:
106531- * ctx == 'struct seccomp_data *'.
106532- */
106533-int sk_convert_filter(struct sock_filter *prog, int len,
106534- struct sock_filter_int *new_prog, int *new_len)
106535-{
106536- int new_flen = 0, pass = 0, target, i;
106537- struct sock_filter_int *new_insn;
106538- struct sock_filter *fp;
106539- int *addrs = NULL;
106540- u8 bpf_src;
106541-
106542- BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
106543- BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
106544-
106545- if (len <= 0 || len > BPF_MAXINSNS)
106546- return -EINVAL;
106547-
106548- if (new_prog) {
106549- addrs = kcalloc(len, sizeof(*addrs), GFP_KERNEL);
106550- if (!addrs)
106551- return -ENOMEM;
106552- }
106553-
106554-do_pass:
106555- new_insn = new_prog;
106556- fp = prog;
106557-
106558- if (new_insn)
106559- *new_insn = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
106560- new_insn++;
106561-
106562- for (i = 0; i < len; fp++, i++) {
106563- struct sock_filter_int tmp_insns[6] = { };
106564- struct sock_filter_int *insn = tmp_insns;
106565-
106566- if (addrs)
106567- addrs[i] = new_insn - new_prog;
106568-
106569- switch (fp->code) {
106570- /* All arithmetic insns and skb loads map as-is. */
106571- case BPF_ALU | BPF_ADD | BPF_X:
106572- case BPF_ALU | BPF_ADD | BPF_K:
106573- case BPF_ALU | BPF_SUB | BPF_X:
106574- case BPF_ALU | BPF_SUB | BPF_K:
106575- case BPF_ALU | BPF_AND | BPF_X:
106576- case BPF_ALU | BPF_AND | BPF_K:
106577- case BPF_ALU | BPF_OR | BPF_X:
106578- case BPF_ALU | BPF_OR | BPF_K:
106579- case BPF_ALU | BPF_LSH | BPF_X:
106580- case BPF_ALU | BPF_LSH | BPF_K:
106581- case BPF_ALU | BPF_RSH | BPF_X:
106582- case BPF_ALU | BPF_RSH | BPF_K:
106583- case BPF_ALU | BPF_XOR | BPF_X:
106584- case BPF_ALU | BPF_XOR | BPF_K:
106585- case BPF_ALU | BPF_MUL | BPF_X:
106586- case BPF_ALU | BPF_MUL | BPF_K:
106587- case BPF_ALU | BPF_DIV | BPF_X:
106588- case BPF_ALU | BPF_DIV | BPF_K:
106589- case BPF_ALU | BPF_MOD | BPF_X:
106590- case BPF_ALU | BPF_MOD | BPF_K:
106591- case BPF_ALU | BPF_NEG:
106592- case BPF_LD | BPF_ABS | BPF_W:
106593- case BPF_LD | BPF_ABS | BPF_H:
106594- case BPF_LD | BPF_ABS | BPF_B:
106595- case BPF_LD | BPF_IND | BPF_W:
106596- case BPF_LD | BPF_IND | BPF_H:
106597- case BPF_LD | BPF_IND | BPF_B:
106598- /* Check for overloaded BPF extension and
106599- * directly convert it if found, otherwise
106600- * just move on with mapping.
106601- */
106602- if (BPF_CLASS(fp->code) == BPF_LD &&
106603- BPF_MODE(fp->code) == BPF_ABS &&
106604- convert_bpf_extensions(fp, &insn))
106605- break;
106606-
106607- *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
106608- break;
106609-
106610- /* Jump transformation cannot use BPF block macros
106611- * everywhere as offset calculation and target updates
106612- * require a bit more work than the rest, i.e. jump
106613- * opcodes map as-is, but offsets need adjustment.
106614- */
106615-
106616-#define BPF_EMIT_JMP \
106617- do { \
106618- if (target >= len || target < 0) \
106619- goto err; \
106620- insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
106621- /* Adjust pc relative offset for 2nd or 3rd insn. */ \
106622- insn->off -= insn - tmp_insns; \
106623- } while (0)
106624-
106625- case BPF_JMP | BPF_JA:
106626- target = i + fp->k + 1;
106627- insn->code = fp->code;
106628- BPF_EMIT_JMP;
106629- break;
106630-
106631- case BPF_JMP | BPF_JEQ | BPF_K:
106632- case BPF_JMP | BPF_JEQ | BPF_X:
106633- case BPF_JMP | BPF_JSET | BPF_K:
106634- case BPF_JMP | BPF_JSET | BPF_X:
106635- case BPF_JMP | BPF_JGT | BPF_K:
106636- case BPF_JMP | BPF_JGT | BPF_X:
106637- case BPF_JMP | BPF_JGE | BPF_K:
106638- case BPF_JMP | BPF_JGE | BPF_X:
106639- if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
106640- /* BPF immediates are signed, zero extend
106641- * immediate into tmp register and use it
106642- * in compare insn.
106643- */
106644- *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
106645-
106646- insn->dst_reg = BPF_REG_A;
106647- insn->src_reg = BPF_REG_TMP;
106648- bpf_src = BPF_X;
106649- } else {
106650- insn->dst_reg = BPF_REG_A;
106651- insn->src_reg = BPF_REG_X;
106652- insn->imm = fp->k;
106653- bpf_src = BPF_SRC(fp->code);
106654+ switch (fentry->code) {
106655+ case BPF_S_ALU_ADD_X:
106656+ A += X;
106657+ continue;
106658+ case BPF_S_ALU_ADD_K:
106659+ A += K;
106660+ continue;
106661+ case BPF_S_ALU_SUB_X:
106662+ A -= X;
106663+ continue;
106664+ case BPF_S_ALU_SUB_K:
106665+ A -= K;
106666+ continue;
106667+ case BPF_S_ALU_MUL_X:
106668+ A *= X;
106669+ continue;
106670+ case BPF_S_ALU_MUL_K:
106671+ A *= K;
106672+ continue;
106673+ case BPF_S_ALU_DIV_X:
106674+ if (X == 0)
106675+ return 0;
106676+ A /= X;
106677+ continue;
106678+ case BPF_S_ALU_DIV_K:
106679+ A /= K;
106680+ continue;
106681+ case BPF_S_ALU_MOD_X:
106682+ if (X == 0)
106683+ return 0;
106684+ A %= X;
106685+ continue;
106686+ case BPF_S_ALU_MOD_K:
106687+ A %= K;
106688+ continue;
106689+ case BPF_S_ALU_AND_X:
106690+ A &= X;
106691+ continue;
106692+ case BPF_S_ALU_AND_K:
106693+ A &= K;
106694+ continue;
106695+ case BPF_S_ALU_OR_X:
106696+ A |= X;
106697+ continue;
106698+ case BPF_S_ALU_OR_K:
106699+ A |= K;
106700+ continue;
106701+ case BPF_S_ANC_ALU_XOR_X:
106702+ case BPF_S_ALU_XOR_X:
106703+ A ^= X;
106704+ continue;
106705+ case BPF_S_ALU_XOR_K:
106706+ A ^= K;
106707+ continue;
106708+ case BPF_S_ALU_LSH_X:
106709+ A <<= X;
106710+ continue;
106711+ case BPF_S_ALU_LSH_K:
106712+ A <<= K;
106713+ continue;
106714+ case BPF_S_ALU_RSH_X:
106715+ A >>= X;
106716+ continue;
106717+ case BPF_S_ALU_RSH_K:
106718+ A >>= K;
106719+ continue;
106720+ case BPF_S_ALU_NEG:
106721+ A = -A;
106722+ continue;
106723+ case BPF_S_JMP_JA:
106724+ fentry += K;
106725+ continue;
106726+ case BPF_S_JMP_JGT_K:
106727+ fentry += (A > K) ? fentry->jt : fentry->jf;
106728+ continue;
106729+ case BPF_S_JMP_JGE_K:
106730+ fentry += (A >= K) ? fentry->jt : fentry->jf;
106731+ continue;
106732+ case BPF_S_JMP_JEQ_K:
106733+ fentry += (A == K) ? fentry->jt : fentry->jf;
106734+ continue;
106735+ case BPF_S_JMP_JSET_K:
106736+ fentry += (A & K) ? fentry->jt : fentry->jf;
106737+ continue;
106738+ case BPF_S_JMP_JGT_X:
106739+ fentry += (A > X) ? fentry->jt : fentry->jf;
106740+ continue;
106741+ case BPF_S_JMP_JGE_X:
106742+ fentry += (A >= X) ? fentry->jt : fentry->jf;
106743+ continue;
106744+ case BPF_S_JMP_JEQ_X:
106745+ fentry += (A == X) ? fentry->jt : fentry->jf;
106746+ continue;
106747+ case BPF_S_JMP_JSET_X:
106748+ fentry += (A & X) ? fentry->jt : fentry->jf;
106749+ continue;
106750+ case BPF_S_LD_W_ABS:
106751+ k = K;
106752+load_w:
106753+ ptr = load_pointer(skb, k, 4, &tmp);
106754+ if (ptr != NULL) {
106755+ A = get_unaligned_be32(ptr);
106756+ continue;
106757 }
106758-
106759- /* Common case where 'jump_false' is next insn. */
106760- if (fp->jf == 0) {
106761- insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
106762- target = i + fp->jt + 1;
106763- BPF_EMIT_JMP;
106764- break;
106765+ return 0;
106766+ case BPF_S_LD_H_ABS:
106767+ k = K;
106768+load_h:
106769+ ptr = load_pointer(skb, k, 2, &tmp);
106770+ if (ptr != NULL) {
106771+ A = get_unaligned_be16(ptr);
106772+ continue;
106773 }
106774-
106775- /* Convert JEQ into JNE when 'jump_true' is next insn. */
106776- if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
106777- insn->code = BPF_JMP | BPF_JNE | bpf_src;
106778- target = i + fp->jf + 1;
106779- BPF_EMIT_JMP;
106780- break;
106781+ return 0;
106782+ case BPF_S_LD_B_ABS:
106783+ k = K;
106784+load_b:
106785+ ptr = load_pointer(skb, k, 1, &tmp);
106786+ if (ptr != NULL) {
106787+ A = *(u8 *)ptr;
106788+ continue;
106789 }
106790-
106791- /* Other jumps are mapped into two insns: Jxx and JA. */
106792- target = i + fp->jt + 1;
106793- insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
106794- BPF_EMIT_JMP;
106795- insn++;
106796-
106797- insn->code = BPF_JMP | BPF_JA;
106798- target = i + fp->jf + 1;
106799- BPF_EMIT_JMP;
106800- break;
106801-
106802- /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
106803- case BPF_LDX | BPF_MSH | BPF_B:
106804- /* tmp = A */
106805- *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A);
106806- /* A = BPF_R0 = *(u8 *) (skb->data + K) */
106807- *insn++ = BPF_LD_ABS(BPF_B, fp->k);
106808- /* A &= 0xf */
106809- *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
106810- /* A <<= 2 */
106811- *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
106812- /* X = A */
106813- *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
106814- /* A = tmp */
106815- *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
106816- break;
106817-
106818- /* RET_K, RET_A are remaped into 2 insns. */
106819- case BPF_RET | BPF_A:
106820- case BPF_RET | BPF_K:
106821- *insn++ = BPF_MOV32_RAW(BPF_RVAL(fp->code) == BPF_K ?
106822- BPF_K : BPF_X, BPF_REG_0,
106823- BPF_REG_A, fp->k);
106824- *insn = BPF_EXIT_INSN();
106825- break;
106826-
106827- /* Store to stack. */
106828- case BPF_ST:
106829- case BPF_STX:
106830- *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
106831- BPF_ST ? BPF_REG_A : BPF_REG_X,
106832- -(BPF_MEMWORDS - fp->k) * 4);
106833- break;
106834-
106835- /* Load from stack. */
106836- case BPF_LD | BPF_MEM:
106837- case BPF_LDX | BPF_MEM:
106838- *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
106839- BPF_REG_A : BPF_REG_X, BPF_REG_FP,
106840- -(BPF_MEMWORDS - fp->k) * 4);
106841- break;
106842-
106843- /* A = K or X = K */
106844- case BPF_LD | BPF_IMM:
106845- case BPF_LDX | BPF_IMM:
106846- *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
106847- BPF_REG_A : BPF_REG_X, fp->k);
106848- break;
106849-
106850- /* X = A */
106851- case BPF_MISC | BPF_TAX:
106852- *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
106853- break;
106854-
106855- /* A = X */
106856- case BPF_MISC | BPF_TXA:
106857- *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
106858- break;
106859-
106860- /* A = skb->len or X = skb->len */
106861- case BPF_LD | BPF_W | BPF_LEN:
106862- case BPF_LDX | BPF_W | BPF_LEN:
106863- *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
106864- BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
106865- offsetof(struct sk_buff, len));
106866- break;
106867-
106868- /* Access seccomp_data fields. */
106869- case BPF_LDX | BPF_ABS | BPF_W:
106870- /* A = *(u32 *) (ctx + K) */
106871- *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
106872- break;
106873-
106874- /* Unkown instruction. */
106875+ return 0;
106876+ case BPF_S_LD_W_LEN:
106877+ A = skb->len;
106878+ continue;
106879+ case BPF_S_LDX_W_LEN:
106880+ X = skb->len;
106881+ continue;
106882+ case BPF_S_LD_W_IND:
106883+ k = X + K;
106884+ goto load_w;
106885+ case BPF_S_LD_H_IND:
106886+ k = X + K;
106887+ goto load_h;
106888+ case BPF_S_LD_B_IND:
106889+ k = X + K;
106890+ goto load_b;
106891+ case BPF_S_LDX_B_MSH:
106892+ ptr = load_pointer(skb, K, 1, &tmp);
106893+ if (ptr != NULL) {
106894+ X = (*(u8 *)ptr & 0xf) << 2;
106895+ continue;
106896+ }
106897+ return 0;
106898+ case BPF_S_LD_IMM:
106899+ A = K;
106900+ continue;
106901+ case BPF_S_LDX_IMM:
106902+ X = K;
106903+ continue;
106904+ case BPF_S_LD_MEM:
106905+ A = mem[K&15];
106906+ continue;
106907+ case BPF_S_LDX_MEM:
106908+ X = mem[K&15];
106909+ continue;
106910+ case BPF_S_MISC_TAX:
106911+ X = A;
106912+ continue;
106913+ case BPF_S_MISC_TXA:
106914+ A = X;
106915+ continue;
106916+ case BPF_S_RET_K:
106917+ return K;
106918+ case BPF_S_RET_A:
106919+ return A;
106920+ case BPF_S_ST:
106921+ mem[K&15] = A;
106922+ continue;
106923+ case BPF_S_STX:
106924+ mem[K&15] = X;
106925+ continue;
106926+ case BPF_S_ANC_PROTOCOL:
106927+ A = ntohs(skb->protocol);
106928+ continue;
106929+ case BPF_S_ANC_PKTTYPE:
106930+ A = skb->pkt_type;
106931+ continue;
106932+ case BPF_S_ANC_IFINDEX:
106933+ if (!skb->dev)
106934+ return 0;
106935+ A = skb->dev->ifindex;
106936+ continue;
106937+ case BPF_S_ANC_MARK:
106938+ A = skb->mark;
106939+ continue;
106940+ case BPF_S_ANC_QUEUE:
106941+ A = skb->queue_mapping;
106942+ continue;
106943+ case BPF_S_ANC_HATYPE:
106944+ if (!skb->dev)
106945+ return 0;
106946+ A = skb->dev->type;
106947+ continue;
106948+ case BPF_S_ANC_RXHASH:
106949+ A = skb->hash;
106950+ continue;
106951+ case BPF_S_ANC_CPU:
106952+ A = raw_smp_processor_id();
106953+ continue;
106954+ case BPF_S_ANC_VLAN_TAG:
106955+ A = vlan_tx_tag_get(skb);
106956+ continue;
106957+ case BPF_S_ANC_VLAN_TAG_PRESENT:
106958+ A = !!vlan_tx_tag_present(skb);
106959+ continue;
106960+ case BPF_S_ANC_PAY_OFFSET:
106961+ A = __skb_get_poff(skb);
106962+ continue;
106963+ case BPF_S_ANC_NLATTR: {
106964+ struct nlattr *nla;
106965+
106966+ if (skb_is_nonlinear(skb))
106967+ return 0;
106968+ if (skb->len < sizeof(struct nlattr))
106969+ return 0;
106970+ if (A > skb->len - sizeof(struct nlattr))
106971+ return 0;
106972+
106973+ nla = nla_find((struct nlattr *)&skb->data[A],
106974+ skb->len - A, X);
106975+ if (nla)
106976+ A = (void *)nla - (void *)skb->data;
106977+ else
106978+ A = 0;
106979+ continue;
106980+ }
106981+ case BPF_S_ANC_NLATTR_NEST: {
106982+ struct nlattr *nla;
106983+
106984+ if (skb_is_nonlinear(skb))
106985+ return 0;
106986+ if (skb->len < sizeof(struct nlattr))
106987+ return 0;
106988+ if (A > skb->len - sizeof(struct nlattr))
106989+ return 0;
106990+
106991+ nla = (struct nlattr *)&skb->data[A];
106992+ if (nla->nla_len > skb->len - A)
106993+ return 0;
106994+
106995+ nla = nla_find_nested(nla, X);
106996+ if (nla)
106997+ A = (void *)nla - (void *)skb->data;
106998+ else
106999+ A = 0;
107000+ continue;
107001+ }
107002+#ifdef CONFIG_SECCOMP_FILTER
107003+ case BPF_S_ANC_SECCOMP_LD_W:
107004+ A = seccomp_bpf_load(fentry->k);
107005+ continue;
107006+#endif
107007 default:
107008- goto err;
107009+ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
107010+ fentry->code, fentry->jt,
107011+ fentry->jf, fentry->k);
107012+ BUG();
107013+ return 0;
107014 }
107015-
107016- insn++;
107017- if (new_prog)
107018- memcpy(new_insn, tmp_insns,
107019- sizeof(*insn) * (insn - tmp_insns));
107020- new_insn += insn - tmp_insns;
107021- }
107022-
107023- if (!new_prog) {
107024- /* Only calculating new length. */
107025- *new_len = new_insn - new_prog;
107026- return 0;
107027- }
107028-
107029- pass++;
107030- if (new_flen != new_insn - new_prog) {
107031- new_flen = new_insn - new_prog;
107032- if (pass > 2)
107033- goto err;
107034- goto do_pass;
107035 }
107036
107037- kfree(addrs);
107038- BUG_ON(*new_len != new_flen);
107039 return 0;
107040-err:
107041- kfree(addrs);
107042- return -EINVAL;
107043 }
107044+EXPORT_SYMBOL(sk_run_filter);
107045
107046-/* Security:
107047- *
107048+/*
107049+ * Security :
107050 * A BPF program is able to use 16 cells of memory to store intermediate
107051- * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter()).
107052- *
107053+ * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
107054 * As we dont want to clear mem[] array for each packet going through
107055 * sk_run_filter(), we check that filter loaded by user never try to read
107056 * a cell if not previously written, and we check all branches to be sure
107057@@ -1096,46 +418,44 @@ err:
107058 */
107059 static int check_load_and_stores(struct sock_filter *filter, int flen)
107060 {
107061- u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
107062+ u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
107063 int pc, ret = 0;
107064
107065- BUILD_BUG_ON(BPF_MEMWORDS > 16);
107066-
107067- masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
107068+ BUILD_BUG_ON(BPF_MEMWORDS != 16);
107069+ masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
107070 if (!masks)
107071 return -ENOMEM;
107072-
107073 memset(masks, 0xff, flen * sizeof(*masks));
107074
107075 for (pc = 0; pc < flen; pc++) {
107076 memvalid &= masks[pc];
107077
107078 switch (filter[pc].code) {
107079- case BPF_ST:
107080- case BPF_STX:
107081+ case BPF_S_ST:
107082+ case BPF_S_STX:
107083 memvalid |= (1 << filter[pc].k);
107084 break;
107085- case BPF_LD | BPF_MEM:
107086- case BPF_LDX | BPF_MEM:
107087+ case BPF_S_LD_MEM:
107088+ case BPF_S_LDX_MEM:
107089 if (!(memvalid & (1 << filter[pc].k))) {
107090 ret = -EINVAL;
107091 goto error;
107092 }
107093 break;
107094- case BPF_JMP | BPF_JA:
107095- /* A jump must set masks on target */
107096+ case BPF_S_JMP_JA:
107097+ /* a jump must set masks on target */
107098 masks[pc + 1 + filter[pc].k] &= memvalid;
107099 memvalid = ~0;
107100 break;
107101- case BPF_JMP | BPF_JEQ | BPF_K:
107102- case BPF_JMP | BPF_JEQ | BPF_X:
107103- case BPF_JMP | BPF_JGE | BPF_K:
107104- case BPF_JMP | BPF_JGE | BPF_X:
107105- case BPF_JMP | BPF_JGT | BPF_K:
107106- case BPF_JMP | BPF_JGT | BPF_X:
107107- case BPF_JMP | BPF_JSET | BPF_K:
107108- case BPF_JMP | BPF_JSET | BPF_X:
107109- /* A jump must set masks on targets */
107110+ case BPF_S_JMP_JEQ_K:
107111+ case BPF_S_JMP_JEQ_X:
107112+ case BPF_S_JMP_JGE_K:
107113+ case BPF_S_JMP_JGE_X:
107114+ case BPF_S_JMP_JGT_K:
107115+ case BPF_S_JMP_JGT_X:
107116+ case BPF_S_JMP_JSET_X:
107117+ case BPF_S_JMP_JSET_K:
107118+ /* a jump must set masks on targets */
107119 masks[pc + 1 + filter[pc].jt] &= memvalid;
107120 masks[pc + 1 + filter[pc].jf] &= memvalid;
107121 memvalid = ~0;
107122@@ -1147,72 +467,6 @@ error:
107123 return ret;
107124 }
107125
107126-static bool chk_code_allowed(u16 code_to_probe)
107127-{
107128- static const bool codes[] = {
107129- /* 32 bit ALU operations */
107130- [BPF_ALU | BPF_ADD | BPF_K] = true,
107131- [BPF_ALU | BPF_ADD | BPF_X] = true,
107132- [BPF_ALU | BPF_SUB | BPF_K] = true,
107133- [BPF_ALU | BPF_SUB | BPF_X] = true,
107134- [BPF_ALU | BPF_MUL | BPF_K] = true,
107135- [BPF_ALU | BPF_MUL | BPF_X] = true,
107136- [BPF_ALU | BPF_DIV | BPF_K] = true,
107137- [BPF_ALU | BPF_DIV | BPF_X] = true,
107138- [BPF_ALU | BPF_MOD | BPF_K] = true,
107139- [BPF_ALU | BPF_MOD | BPF_X] = true,
107140- [BPF_ALU | BPF_AND | BPF_K] = true,
107141- [BPF_ALU | BPF_AND | BPF_X] = true,
107142- [BPF_ALU | BPF_OR | BPF_K] = true,
107143- [BPF_ALU | BPF_OR | BPF_X] = true,
107144- [BPF_ALU | BPF_XOR | BPF_K] = true,
107145- [BPF_ALU | BPF_XOR | BPF_X] = true,
107146- [BPF_ALU | BPF_LSH | BPF_K] = true,
107147- [BPF_ALU | BPF_LSH | BPF_X] = true,
107148- [BPF_ALU | BPF_RSH | BPF_K] = true,
107149- [BPF_ALU | BPF_RSH | BPF_X] = true,
107150- [BPF_ALU | BPF_NEG] = true,
107151- /* Load instructions */
107152- [BPF_LD | BPF_W | BPF_ABS] = true,
107153- [BPF_LD | BPF_H | BPF_ABS] = true,
107154- [BPF_LD | BPF_B | BPF_ABS] = true,
107155- [BPF_LD | BPF_W | BPF_LEN] = true,
107156- [BPF_LD | BPF_W | BPF_IND] = true,
107157- [BPF_LD | BPF_H | BPF_IND] = true,
107158- [BPF_LD | BPF_B | BPF_IND] = true,
107159- [BPF_LD | BPF_IMM] = true,
107160- [BPF_LD | BPF_MEM] = true,
107161- [BPF_LDX | BPF_W | BPF_LEN] = true,
107162- [BPF_LDX | BPF_B | BPF_MSH] = true,
107163- [BPF_LDX | BPF_IMM] = true,
107164- [BPF_LDX | BPF_MEM] = true,
107165- /* Store instructions */
107166- [BPF_ST] = true,
107167- [BPF_STX] = true,
107168- /* Misc instructions */
107169- [BPF_MISC | BPF_TAX] = true,
107170- [BPF_MISC | BPF_TXA] = true,
107171- /* Return instructions */
107172- [BPF_RET | BPF_K] = true,
107173- [BPF_RET | BPF_A] = true,
107174- /* Jump instructions */
107175- [BPF_JMP | BPF_JA] = true,
107176- [BPF_JMP | BPF_JEQ | BPF_K] = true,
107177- [BPF_JMP | BPF_JEQ | BPF_X] = true,
107178- [BPF_JMP | BPF_JGE | BPF_K] = true,
107179- [BPF_JMP | BPF_JGE | BPF_X] = true,
107180- [BPF_JMP | BPF_JGT | BPF_K] = true,
107181- [BPF_JMP | BPF_JGT | BPF_X] = true,
107182- [BPF_JMP | BPF_JSET | BPF_K] = true,
107183- [BPF_JMP | BPF_JSET | BPF_X] = true,
107184- };
107185-
107186- if (code_to_probe >= ARRAY_SIZE(codes))
107187- return false;
107188-
107189- return codes[code_to_probe];
107190-}
107191-
107192 /**
107193 * sk_chk_filter - verify socket filter code
107194 * @filter: filter to verify
107195@@ -1229,303 +483,187 @@ static bool chk_code_allowed(u16 code_to_probe)
107196 */
107197 int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
107198 {
107199- bool anc_found;
107200+ /*
107201+ * Valid instructions are initialized to non-0.
107202+ * Invalid instructions are initialized to 0.
107203+ */
107204+ static const u8 codes[] = {
107205+ [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
107206+ [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
107207+ [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
107208+ [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
107209+ [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
107210+ [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
107211+ [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
107212+ [BPF_ALU|BPF_MOD|BPF_K] = BPF_S_ALU_MOD_K,
107213+ [BPF_ALU|BPF_MOD|BPF_X] = BPF_S_ALU_MOD_X,
107214+ [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
107215+ [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
107216+ [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
107217+ [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
107218+ [BPF_ALU|BPF_XOR|BPF_K] = BPF_S_ALU_XOR_K,
107219+ [BPF_ALU|BPF_XOR|BPF_X] = BPF_S_ALU_XOR_X,
107220+ [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
107221+ [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
107222+ [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
107223+ [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
107224+ [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
107225+ [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
107226+ [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
107227+ [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
107228+ [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
107229+ [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
107230+ [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
107231+ [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
107232+ [BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
107233+ [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
107234+ [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
107235+ [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
107236+ [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
107237+ [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
107238+ [BPF_RET|BPF_K] = BPF_S_RET_K,
107239+ [BPF_RET|BPF_A] = BPF_S_RET_A,
107240+ [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
107241+ [BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
107242+ [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
107243+ [BPF_ST] = BPF_S_ST,
107244+ [BPF_STX] = BPF_S_STX,
107245+ [BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
107246+ [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
107247+ [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
107248+ [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
107249+ [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
107250+ [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
107251+ [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
107252+ [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
107253+ [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
107254+ };
107255 int pc;
107256+ bool anc_found;
107257
107258 if (flen == 0 || flen > BPF_MAXINSNS)
107259 return -EINVAL;
107260
107261- /* Check the filter code now */
107262+ /* check the filter code now */
107263 for (pc = 0; pc < flen; pc++) {
107264 struct sock_filter *ftest = &filter[pc];
107265+ u16 code = ftest->code;
107266
107267- /* May we actually operate on this code? */
107268- if (!chk_code_allowed(ftest->code))
107269+ if (code >= ARRAY_SIZE(codes))
107270+ return -EINVAL;
107271+ code = codes[code];
107272+ if (!code)
107273 return -EINVAL;
107274-
107275 /* Some instructions need special checks */
107276- switch (ftest->code) {
107277- case BPF_ALU | BPF_DIV | BPF_K:
107278- case BPF_ALU | BPF_MOD | BPF_K:
107279- /* Check for division by zero */
107280+ switch (code) {
107281+ case BPF_S_ALU_DIV_K:
107282+ case BPF_S_ALU_MOD_K:
107283+ /* check for division by zero */
107284 if (ftest->k == 0)
107285 return -EINVAL;
107286 break;
107287- case BPF_LD | BPF_MEM:
107288- case BPF_LDX | BPF_MEM:
107289- case BPF_ST:
107290- case BPF_STX:
107291- /* Check for invalid memory addresses */
107292+ case BPF_S_LD_MEM:
107293+ case BPF_S_LDX_MEM:
107294+ case BPF_S_ST:
107295+ case BPF_S_STX:
107296+ /* check for invalid memory addresses */
107297 if (ftest->k >= BPF_MEMWORDS)
107298 return -EINVAL;
107299 break;
107300- case BPF_JMP | BPF_JA:
107301- /* Note, the large ftest->k might cause loops.
107302+ case BPF_S_JMP_JA:
107303+ /*
107304+ * Note, the large ftest->k might cause loops.
107305 * Compare this with conditional jumps below,
107306 * where offsets are limited. --ANK (981016)
107307 */
107308- if (ftest->k >= (unsigned int)(flen - pc - 1))
107309+ if (ftest->k >= (unsigned int)(flen-pc-1))
107310 return -EINVAL;
107311 break;
107312- case BPF_JMP | BPF_JEQ | BPF_K:
107313- case BPF_JMP | BPF_JEQ | BPF_X:
107314- case BPF_JMP | BPF_JGE | BPF_K:
107315- case BPF_JMP | BPF_JGE | BPF_X:
107316- case BPF_JMP | BPF_JGT | BPF_K:
107317- case BPF_JMP | BPF_JGT | BPF_X:
107318- case BPF_JMP | BPF_JSET | BPF_K:
107319- case BPF_JMP | BPF_JSET | BPF_X:
107320- /* Both conditionals must be safe */
107321+ case BPF_S_JMP_JEQ_K:
107322+ case BPF_S_JMP_JEQ_X:
107323+ case BPF_S_JMP_JGE_K:
107324+ case BPF_S_JMP_JGE_X:
107325+ case BPF_S_JMP_JGT_K:
107326+ case BPF_S_JMP_JGT_X:
107327+ case BPF_S_JMP_JSET_X:
107328+ case BPF_S_JMP_JSET_K:
107329+ /* for conditionals both must be safe */
107330 if (pc + ftest->jt + 1 >= flen ||
107331 pc + ftest->jf + 1 >= flen)
107332 return -EINVAL;
107333 break;
107334- case BPF_LD | BPF_W | BPF_ABS:
107335- case BPF_LD | BPF_H | BPF_ABS:
107336- case BPF_LD | BPF_B | BPF_ABS:
107337+ case BPF_S_LD_W_ABS:
107338+ case BPF_S_LD_H_ABS:
107339+ case BPF_S_LD_B_ABS:
107340 anc_found = false;
107341- if (bpf_anc_helper(ftest) & BPF_ANC)
107342- anc_found = true;
107343- /* Ancillary operation unknown or unsupported */
107344+#define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
107345+ code = BPF_S_ANC_##CODE; \
107346+ anc_found = true; \
107347+ break
107348+ switch (ftest->k) {
107349+ ANCILLARY(PROTOCOL);
107350+ ANCILLARY(PKTTYPE);
107351+ ANCILLARY(IFINDEX);
107352+ ANCILLARY(NLATTR);
107353+ ANCILLARY(NLATTR_NEST);
107354+ ANCILLARY(MARK);
107355+ ANCILLARY(QUEUE);
107356+ ANCILLARY(HATYPE);
107357+ ANCILLARY(RXHASH);
107358+ ANCILLARY(CPU);
107359+ ANCILLARY(ALU_XOR_X);
107360+ ANCILLARY(VLAN_TAG);
107361+ ANCILLARY(VLAN_TAG_PRESENT);
107362+ ANCILLARY(PAY_OFFSET);
107363+ }
107364+
107365+ /* ancillary operation unknown or unsupported */
107366 if (anc_found == false && ftest->k >= SKF_AD_OFF)
107367 return -EINVAL;
107368 }
107369+ ftest->code = code;
107370 }
107371
107372- /* Last instruction must be a RET code */
107373+ /* last instruction must be a RET code */
107374 switch (filter[flen - 1].code) {
107375- case BPF_RET | BPF_K:
107376- case BPF_RET | BPF_A:
107377+ case BPF_S_RET_K:
107378+ case BPF_S_RET_A:
107379 return check_load_and_stores(filter, flen);
107380 }
107381-
107382 return -EINVAL;
107383 }
107384 EXPORT_SYMBOL(sk_chk_filter);
107385
107386-static int sk_store_orig_filter(struct sk_filter *fp,
107387- const struct sock_fprog *fprog)
107388-{
107389- unsigned int fsize = sk_filter_proglen(fprog);
107390- struct sock_fprog_kern *fkprog;
107391-
107392- fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
107393- if (!fp->orig_prog)
107394- return -ENOMEM;
107395-
107396- fkprog = fp->orig_prog;
107397- fkprog->len = fprog->len;
107398- fkprog->filter = kmemdup(fp->insns, fsize, GFP_KERNEL);
107399- if (!fkprog->filter) {
107400- kfree(fp->orig_prog);
107401- return -ENOMEM;
107402- }
107403-
107404- return 0;
107405-}
107406-
107407-static void sk_release_orig_filter(struct sk_filter *fp)
107408-{
107409- struct sock_fprog_kern *fprog = fp->orig_prog;
107410-
107411- if (fprog) {
107412- kfree(fprog->filter);
107413- kfree(fprog);
107414- }
107415-}
107416-
107417 /**
107418 * sk_filter_release_rcu - Release a socket filter by rcu_head
107419 * @rcu: rcu_head that contains the sk_filter to free
107420 */
107421-static void sk_filter_release_rcu(struct rcu_head *rcu)
107422+void sk_filter_release_rcu(struct rcu_head *rcu)
107423 {
107424 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
107425
107426- sk_release_orig_filter(fp);
107427- sk_filter_free(fp);
107428-}
107429-
107430-/**
107431- * sk_filter_release - release a socket filter
107432- * @fp: filter to remove
107433- *
107434- * Remove a filter from a socket and release its resources.
107435- */
107436-static void sk_filter_release(struct sk_filter *fp)
107437-{
107438- if (atomic_dec_and_test(&fp->refcnt))
107439- call_rcu(&fp->rcu, sk_filter_release_rcu);
107440-}
107441-
107442-void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
107443-{
107444- atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc);
107445- sk_filter_release(fp);
107446-}
107447-
107448-void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
107449-{
107450- atomic_inc(&fp->refcnt);
107451- atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
107452-}
107453-
107454-static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
107455- struct sock *sk,
107456- unsigned int len)
107457-{
107458- struct sk_filter *fp_new;
107459-
107460- if (sk == NULL)
107461- return krealloc(fp, len, GFP_KERNEL);
107462-
107463- fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
107464- if (fp_new) {
107465- *fp_new = *fp;
107466- /* As we're keeping orig_prog in fp_new along,
107467- * we need to make sure we're not evicting it
107468- * from the old fp.
107469- */
107470- fp->orig_prog = NULL;
107471- sk_filter_uncharge(sk, fp);
107472- }
107473-
107474- return fp_new;
107475-}
107476-
107477-static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
107478- struct sock *sk)
107479-{
107480- struct sock_filter *old_prog;
107481- struct sk_filter *old_fp;
107482- int err, new_len, old_len = fp->len;
107483-
107484- /* We are free to overwrite insns et al right here as it
107485- * won't be used at this point in time anymore internally
107486- * after the migration to the internal BPF instruction
107487- * representation.
107488- */
107489- BUILD_BUG_ON(sizeof(struct sock_filter) !=
107490- sizeof(struct sock_filter_int));
107491-
107492- /* Conversion cannot happen on overlapping memory areas,
107493- * so we need to keep the user BPF around until the 2nd
107494- * pass. At this time, the user BPF is stored in fp->insns.
107495- */
107496- old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
107497- GFP_KERNEL);
107498- if (!old_prog) {
107499- err = -ENOMEM;
107500- goto out_err;
107501- }
107502-
107503- /* 1st pass: calculate the new program length. */
107504- err = sk_convert_filter(old_prog, old_len, NULL, &new_len);
107505- if (err)
107506- goto out_err_free;
107507-
107508- /* Expand fp for appending the new filter representation. */
107509- old_fp = fp;
107510- fp = __sk_migrate_realloc(old_fp, sk, sk_filter_size(new_len));
107511- if (!fp) {
107512- /* The old_fp is still around in case we couldn't
107513- * allocate new memory, so uncharge on that one.
107514- */
107515- fp = old_fp;
107516- err = -ENOMEM;
107517- goto out_err_free;
107518- }
107519-
107520- fp->len = new_len;
107521-
107522- /* 2nd pass: remap sock_filter insns into sock_filter_int insns. */
107523- err = sk_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
107524- if (err)
107525- /* 2nd sk_convert_filter() can fail only if it fails
107526- * to allocate memory, remapping must succeed. Note,
107527- * that at this time old_fp has already been released
107528- * by __sk_migrate_realloc().
107529- */
107530- goto out_err_free;
107531-
107532- sk_filter_select_runtime(fp);
107533-
107534- kfree(old_prog);
107535- return fp;
107536-
107537-out_err_free:
107538- kfree(old_prog);
107539-out_err:
107540- /* Rollback filter setup. */
107541- if (sk != NULL)
107542- sk_filter_uncharge(sk, fp);
107543- else
107544- kfree(fp);
107545- return ERR_PTR(err);
107546-}
107547-
107548-void __weak bpf_int_jit_compile(struct sk_filter *prog)
107549-{
107550-}
107551-
107552-/**
107553- * sk_filter_select_runtime - select execution runtime for BPF program
107554- * @fp: sk_filter populated with internal BPF program
107555- *
107556- * try to JIT internal BPF program, if JIT is not available select interpreter
107557- * BPF program will be executed via SK_RUN_FILTER() macro
107558- */
107559-void sk_filter_select_runtime(struct sk_filter *fp)
107560-{
107561- fp->bpf_func = (void *) __sk_run_filter;
107562-
107563- /* Probe if internal BPF can be JITed */
107564- bpf_int_jit_compile(fp);
107565-}
107566-EXPORT_SYMBOL_GPL(sk_filter_select_runtime);
107567-
107568-/* free internal BPF program */
107569-void sk_filter_free(struct sk_filter *fp)
107570-{
107571 bpf_jit_free(fp);
107572 }
107573-EXPORT_SYMBOL_GPL(sk_filter_free);
107574+EXPORT_SYMBOL(sk_filter_release_rcu);
107575
107576-static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
107577- struct sock *sk)
107578+static int __sk_prepare_filter(struct sk_filter *fp)
107579 {
107580 int err;
107581
107582- fp->bpf_func = NULL;
107583- fp->jited = 0;
107584+ fp->bpf_func = sk_run_filter;
107585
107586 err = sk_chk_filter(fp->insns, fp->len);
107587- if (err) {
107588- if (sk != NULL)
107589- sk_filter_uncharge(sk, fp);
107590- else
107591- kfree(fp);
107592- return ERR_PTR(err);
107593- }
107594+ if (err)
107595+ return err;
107596
107597- /* Probe if we can JIT compile the filter and if so, do
107598- * the compilation of the filter.
107599- */
107600 bpf_jit_compile(fp);
107601-
107602- /* JIT compiler couldn't process this filter, so do the
107603- * internal BPF translation for the optimized interpreter.
107604- */
107605- if (!fp->jited)
107606- fp = __sk_migrate_filter(fp, sk);
107607-
107608- return fp;
107609+ return 0;
107610 }
107611
107612 /**
107613 * sk_unattached_filter_create - create an unattached filter
107614+ * @fprog: the filter program
107615 * @pfp: the unattached filter that is created
107616- * @fprog: the filter program
107617 *
107618 * Create a filter independent of any socket. We first run some
107619 * sanity checks on it to make sure it does not explode on us later.
107620@@ -1533,10 +671,11 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
107621 * a negative errno code is returned. On success the return is zero.
107622 */
107623 int sk_unattached_filter_create(struct sk_filter **pfp,
107624- struct sock_fprog_kern *fprog)
107625+ struct sock_fprog *fprog)
107626 {
107627- unsigned int fsize = sk_filter_proglen(fprog);
107628 struct sk_filter *fp;
107629+ unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
107630+ int err;
107631
107632 /* Make sure new filter is there and in the right amounts. */
107633 if (fprog->filter == NULL)
107634@@ -1545,26 +684,20 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
107635 fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
107636 if (!fp)
107637 return -ENOMEM;
107638-
107639- memcpy(fp->insns, fprog->filter, fsize);
107640+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
107641
107642 atomic_set(&fp->refcnt, 1);
107643 fp->len = fprog->len;
107644- /* Since unattached filters are not copied back to user
107645- * space through sk_get_filter(), we do not need to hold
107646- * a copy here, and can spare us the work.
107647- */
107648- fp->orig_prog = NULL;
107649
107650- /* __sk_prepare_filter() already takes care of uncharging
107651- * memory in case something goes wrong.
107652- */
107653- fp = __sk_prepare_filter(fp, NULL);
107654- if (IS_ERR(fp))
107655- return PTR_ERR(fp);
107656+ err = __sk_prepare_filter(fp);
107657+ if (err)
107658+ goto free_mem;
107659
107660 *pfp = fp;
107661 return 0;
107662+free_mem:
107663+ kfree(fp);
107664+ return err;
107665 }
107666 EXPORT_SYMBOL_GPL(sk_unattached_filter_create);
107667
107668@@ -1587,7 +720,7 @@ EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
107669 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
107670 {
107671 struct sk_filter *fp, *old_fp;
107672- unsigned int fsize = sk_filter_proglen(fprog);
107673+ unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
107674 unsigned int sk_fsize = sk_filter_size(fprog->len);
107675 int err;
107676
107677@@ -1601,7 +734,6 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
107678 fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL);
107679 if (!fp)
107680 return -ENOMEM;
107681-
107682 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
107683 sock_kfree_s(sk, fp, sk_fsize);
107684 return -EFAULT;
107685@@ -1610,26 +742,18 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
107686 atomic_set(&fp->refcnt, 1);
107687 fp->len = fprog->len;
107688
107689- err = sk_store_orig_filter(fp, fprog);
107690+ err = __sk_prepare_filter(fp);
107691 if (err) {
107692 sk_filter_uncharge(sk, fp);
107693- return -ENOMEM;
107694+ return err;
107695 }
107696
107697- /* __sk_prepare_filter() already takes care of uncharging
107698- * memory in case something goes wrong.
107699- */
107700- fp = __sk_prepare_filter(fp, sk);
107701- if (IS_ERR(fp))
107702- return PTR_ERR(fp);
107703-
107704 old_fp = rcu_dereference_protected(sk->sk_filter,
107705 sock_owned_by_user(sk));
107706 rcu_assign_pointer(sk->sk_filter, fp);
107707
107708 if (old_fp)
107709 sk_filter_uncharge(sk, old_fp);
107710-
107711 return 0;
107712 }
107713 EXPORT_SYMBOL_GPL(sk_attach_filter);
107714@@ -1649,46 +773,116 @@ int sk_detach_filter(struct sock *sk)
107715 sk_filter_uncharge(sk, filter);
107716 ret = 0;
107717 }
107718-
107719 return ret;
107720 }
107721 EXPORT_SYMBOL_GPL(sk_detach_filter);
107722
107723-int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
107724- unsigned int len)
107725+void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
107726+{
107727+ static const u16 decodes[] = {
107728+ [BPF_S_ALU_ADD_K] = BPF_ALU|BPF_ADD|BPF_K,
107729+ [BPF_S_ALU_ADD_X] = BPF_ALU|BPF_ADD|BPF_X,
107730+ [BPF_S_ALU_SUB_K] = BPF_ALU|BPF_SUB|BPF_K,
107731+ [BPF_S_ALU_SUB_X] = BPF_ALU|BPF_SUB|BPF_X,
107732+ [BPF_S_ALU_MUL_K] = BPF_ALU|BPF_MUL|BPF_K,
107733+ [BPF_S_ALU_MUL_X] = BPF_ALU|BPF_MUL|BPF_X,
107734+ [BPF_S_ALU_DIV_X] = BPF_ALU|BPF_DIV|BPF_X,
107735+ [BPF_S_ALU_MOD_K] = BPF_ALU|BPF_MOD|BPF_K,
107736+ [BPF_S_ALU_MOD_X] = BPF_ALU|BPF_MOD|BPF_X,
107737+ [BPF_S_ALU_AND_K] = BPF_ALU|BPF_AND|BPF_K,
107738+ [BPF_S_ALU_AND_X] = BPF_ALU|BPF_AND|BPF_X,
107739+ [BPF_S_ALU_OR_K] = BPF_ALU|BPF_OR|BPF_K,
107740+ [BPF_S_ALU_OR_X] = BPF_ALU|BPF_OR|BPF_X,
107741+ [BPF_S_ALU_XOR_K] = BPF_ALU|BPF_XOR|BPF_K,
107742+ [BPF_S_ALU_XOR_X] = BPF_ALU|BPF_XOR|BPF_X,
107743+ [BPF_S_ALU_LSH_K] = BPF_ALU|BPF_LSH|BPF_K,
107744+ [BPF_S_ALU_LSH_X] = BPF_ALU|BPF_LSH|BPF_X,
107745+ [BPF_S_ALU_RSH_K] = BPF_ALU|BPF_RSH|BPF_K,
107746+ [BPF_S_ALU_RSH_X] = BPF_ALU|BPF_RSH|BPF_X,
107747+ [BPF_S_ALU_NEG] = BPF_ALU|BPF_NEG,
107748+ [BPF_S_LD_W_ABS] = BPF_LD|BPF_W|BPF_ABS,
107749+ [BPF_S_LD_H_ABS] = BPF_LD|BPF_H|BPF_ABS,
107750+ [BPF_S_LD_B_ABS] = BPF_LD|BPF_B|BPF_ABS,
107751+ [BPF_S_ANC_PROTOCOL] = BPF_LD|BPF_B|BPF_ABS,
107752+ [BPF_S_ANC_PKTTYPE] = BPF_LD|BPF_B|BPF_ABS,
107753+ [BPF_S_ANC_IFINDEX] = BPF_LD|BPF_B|BPF_ABS,
107754+ [BPF_S_ANC_NLATTR] = BPF_LD|BPF_B|BPF_ABS,
107755+ [BPF_S_ANC_NLATTR_NEST] = BPF_LD|BPF_B|BPF_ABS,
107756+ [BPF_S_ANC_MARK] = BPF_LD|BPF_B|BPF_ABS,
107757+ [BPF_S_ANC_QUEUE] = BPF_LD|BPF_B|BPF_ABS,
107758+ [BPF_S_ANC_HATYPE] = BPF_LD|BPF_B|BPF_ABS,
107759+ [BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS,
107760+ [BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS,
107761+ [BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS,
107762+ [BPF_S_ANC_SECCOMP_LD_W] = BPF_LD|BPF_B|BPF_ABS,
107763+ [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS,
107764+ [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS,
107765+ [BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS,
107766+ [BPF_S_LD_W_LEN] = BPF_LD|BPF_W|BPF_LEN,
107767+ [BPF_S_LD_W_IND] = BPF_LD|BPF_W|BPF_IND,
107768+ [BPF_S_LD_H_IND] = BPF_LD|BPF_H|BPF_IND,
107769+ [BPF_S_LD_B_IND] = BPF_LD|BPF_B|BPF_IND,
107770+ [BPF_S_LD_IMM] = BPF_LD|BPF_IMM,
107771+ [BPF_S_LDX_W_LEN] = BPF_LDX|BPF_W|BPF_LEN,
107772+ [BPF_S_LDX_B_MSH] = BPF_LDX|BPF_B|BPF_MSH,
107773+ [BPF_S_LDX_IMM] = BPF_LDX|BPF_IMM,
107774+ [BPF_S_MISC_TAX] = BPF_MISC|BPF_TAX,
107775+ [BPF_S_MISC_TXA] = BPF_MISC|BPF_TXA,
107776+ [BPF_S_RET_K] = BPF_RET|BPF_K,
107777+ [BPF_S_RET_A] = BPF_RET|BPF_A,
107778+ [BPF_S_ALU_DIV_K] = BPF_ALU|BPF_DIV|BPF_K,
107779+ [BPF_S_LD_MEM] = BPF_LD|BPF_MEM,
107780+ [BPF_S_LDX_MEM] = BPF_LDX|BPF_MEM,
107781+ [BPF_S_ST] = BPF_ST,
107782+ [BPF_S_STX] = BPF_STX,
107783+ [BPF_S_JMP_JA] = BPF_JMP|BPF_JA,
107784+ [BPF_S_JMP_JEQ_K] = BPF_JMP|BPF_JEQ|BPF_K,
107785+ [BPF_S_JMP_JEQ_X] = BPF_JMP|BPF_JEQ|BPF_X,
107786+ [BPF_S_JMP_JGE_K] = BPF_JMP|BPF_JGE|BPF_K,
107787+ [BPF_S_JMP_JGE_X] = BPF_JMP|BPF_JGE|BPF_X,
107788+ [BPF_S_JMP_JGT_K] = BPF_JMP|BPF_JGT|BPF_K,
107789+ [BPF_S_JMP_JGT_X] = BPF_JMP|BPF_JGT|BPF_X,
107790+ [BPF_S_JMP_JSET_K] = BPF_JMP|BPF_JSET|BPF_K,
107791+ [BPF_S_JMP_JSET_X] = BPF_JMP|BPF_JSET|BPF_X,
107792+ };
107793+ u16 code;
107794+
107795+ code = filt->code;
107796+
107797+ to->code = decodes[code];
107798+ to->jt = filt->jt;
107799+ to->jf = filt->jf;
107800+ to->k = filt->k;
107801+}
107802+
107803+int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len)
107804 {
107805- struct sock_fprog_kern *fprog;
107806 struct sk_filter *filter;
107807- int ret = 0;
107808+ int i, ret;
107809
107810 lock_sock(sk);
107811 filter = rcu_dereference_protected(sk->sk_filter,
107812- sock_owned_by_user(sk));
107813+ sock_owned_by_user(sk));
107814+ ret = 0;
107815 if (!filter)
107816 goto out;
107817-
107818- /* We're copying the filter that has been originally attached,
107819- * so no conversion/decode needed anymore.
107820- */
107821- fprog = filter->orig_prog;
107822-
107823- ret = fprog->len;
107824+ ret = filter->len;
107825 if (!len)
107826- /* User space only enquires number of filter blocks. */
107827 goto out;
107828-
107829 ret = -EINVAL;
107830- if (len < fprog->len)
107831+ if (len < filter->len)
107832 goto out;
107833
107834 ret = -EFAULT;
107835- if (copy_to_user(ubuf, fprog->filter, sk_filter_proglen(fprog)))
107836- goto out;
107837+ for (i = 0; i < filter->len; i++) {
107838+ struct sock_filter fb;
107839
107840- /* Instead of bytes, the API requests to return the number
107841- * of filter blocks.
107842- */
107843- ret = fprog->len;
107844+ sk_decode_filter(&filter->insns[i], &fb);
107845+ if (copy_to_user(&ubuf[i], &fb, sizeof(fb)))
107846+ goto out;
107847+ }
107848+
107849+ ret = filter->len;
107850 out:
107851 release_sock(sk);
107852 return ret;
107853diff --git a/net/core/flow.c b/net/core/flow.c
107854index a0348fd..6951c76 100644
107855--- a/net/core/flow.c
107856+++ b/net/core/flow.c
107857@@ -65,7 +65,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
107858 static int flow_entry_valid(struct flow_cache_entry *fle,
107859 struct netns_xfrm *xfrm)
107860 {
107861- if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
107862+ if (atomic_read_unchecked(&xfrm->flow_cache_genid) != fle->genid)
107863 return 0;
107864 if (fle->object && !fle->object->ops->check(fle->object))
107865 return 0;
107866@@ -242,7 +242,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
107867 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
107868 fcp->hash_count++;
107869 }
107870- } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
107871+ } else if (likely(fle->genid == atomic_read_unchecked(&net->xfrm.flow_cache_genid))) {
107872 flo = fle->object;
107873 if (!flo)
107874 goto ret_object;
107875@@ -263,7 +263,7 @@ nocache:
107876 }
107877 flo = resolver(net, key, family, dir, flo, ctx);
107878 if (fle) {
107879- fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
107880+ fle->genid = atomic_read_unchecked(&net->xfrm.flow_cache_genid);
107881 if (!IS_ERR(flo))
107882 fle->object = flo;
107883 else
107884diff --git a/net/core/iovec.c b/net/core/iovec.c
107885index e1ec45a..e5c6f16 100644
107886--- a/net/core/iovec.c
107887+++ b/net/core/iovec.c
107888@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
107889 if (m->msg_name && m->msg_namelen) {
107890 if (mode == VERIFY_READ) {
107891 void __user *namep;
107892- namep = (void __user __force *) m->msg_name;
107893+ namep = (void __force_user *) m->msg_name;
107894 err = move_addr_to_kernel(namep, m->msg_namelen,
107895 address);
107896 if (err < 0)
107897@@ -55,7 +55,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
107898 }
107899
107900 size = m->msg_iovlen * sizeof(struct iovec);
107901- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
107902+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
107903 return -EFAULT;
107904
107905 m->msg_iov = iov;
107906diff --git a/net/core/neighbour.c b/net/core/neighbour.c
107907index ef31fef..8be66d9 100644
107908--- a/net/core/neighbour.c
107909+++ b/net/core/neighbour.c
107910@@ -2825,7 +2825,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
107911 void __user *buffer, size_t *lenp, loff_t *ppos)
107912 {
107913 int size, ret;
107914- struct ctl_table tmp = *ctl;
107915+ ctl_table_no_const tmp = *ctl;
107916
107917 tmp.extra1 = &zero;
107918 tmp.extra2 = &unres_qlen_max;
107919@@ -2887,7 +2887,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
107920 void __user *buffer,
107921 size_t *lenp, loff_t *ppos)
107922 {
107923- struct ctl_table tmp = *ctl;
107924+ ctl_table_no_const tmp = *ctl;
107925 int ret;
107926
107927 tmp.extra1 = &zero;
107928diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
107929index 2bf8329..2eb1423 100644
107930--- a/net/core/net-procfs.c
107931+++ b/net/core/net-procfs.c
107932@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
107933 struct rtnl_link_stats64 temp;
107934 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
107935
107936- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
107937+ if (gr_proc_is_restricted())
107938+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
107939+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
107940+ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
107941+ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
107942+ else
107943+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
107944 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
107945 dev->name, stats->rx_bytes, stats->rx_packets,
107946 stats->rx_errors,
107947@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
107948 return 0;
107949 }
107950
107951-static const struct seq_operations dev_seq_ops = {
107952+const struct seq_operations dev_seq_ops = {
107953 .start = dev_seq_start,
107954 .next = dev_seq_next,
107955 .stop = dev_seq_stop,
107956@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = {
107957
107958 static int softnet_seq_open(struct inode *inode, struct file *file)
107959 {
107960- return seq_open(file, &softnet_seq_ops);
107961+ return seq_open_restrict(file, &softnet_seq_ops);
107962 }
107963
107964 static const struct file_operations softnet_seq_fops = {
107965@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
107966 else
107967 seq_printf(seq, "%04x", ntohs(pt->type));
107968
107969+#ifdef CONFIG_GRKERNSEC_HIDESYM
107970+ seq_printf(seq, " %-8s %pf\n",
107971+ pt->dev ? pt->dev->name : "", NULL);
107972+#else
107973 seq_printf(seq, " %-8s %pf\n",
107974 pt->dev ? pt->dev->name : "", pt->func);
107975+#endif
107976 }
107977
107978 return 0;
107979diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
107980index 1cac29e..fb482f3 100644
107981--- a/net/core/net-sysfs.c
107982+++ b/net/core/net-sysfs.c
107983@@ -259,7 +259,7 @@ static ssize_t carrier_changes_show(struct device *dev,
107984 {
107985 struct net_device *netdev = to_net_dev(dev);
107986 return sprintf(buf, fmt_dec,
107987- atomic_read(&netdev->carrier_changes));
107988+ atomic_read_unchecked(&netdev->carrier_changes));
107989 }
107990 static DEVICE_ATTR_RO(carrier_changes);
107991
107992diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
107993index 85b6269..fc77ea0 100644
107994--- a/net/core/net_namespace.c
107995+++ b/net/core/net_namespace.c
107996@@ -443,7 +443,7 @@ static int __register_pernet_operations(struct list_head *list,
107997 int error;
107998 LIST_HEAD(net_exit_list);
107999
108000- list_add_tail(&ops->list, list);
108001+ pax_list_add_tail((struct list_head *)&ops->list, list);
108002 if (ops->init || (ops->id && ops->size)) {
108003 for_each_net(net) {
108004 error = ops_init(ops, net);
108005@@ -456,7 +456,7 @@ static int __register_pernet_operations(struct list_head *list,
108006
108007 out_undo:
108008 /* If I have an error cleanup all namespaces I initialized */
108009- list_del(&ops->list);
108010+ pax_list_del((struct list_head *)&ops->list);
108011 ops_exit_list(ops, &net_exit_list);
108012 ops_free_list(ops, &net_exit_list);
108013 return error;
108014@@ -467,7 +467,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
108015 struct net *net;
108016 LIST_HEAD(net_exit_list);
108017
108018- list_del(&ops->list);
108019+ pax_list_del((struct list_head *)&ops->list);
108020 for_each_net(net)
108021 list_add_tail(&net->exit_list, &net_exit_list);
108022 ops_exit_list(ops, &net_exit_list);
108023@@ -601,7 +601,7 @@ int register_pernet_device(struct pernet_operations *ops)
108024 mutex_lock(&net_mutex);
108025 error = register_pernet_operations(&pernet_list, ops);
108026 if (!error && (first_device == &pernet_list))
108027- first_device = &ops->list;
108028+ first_device = (struct list_head *)&ops->list;
108029 mutex_unlock(&net_mutex);
108030 return error;
108031 }
108032diff --git a/net/core/netpoll.c b/net/core/netpoll.c
108033index e33937f..b2b4981 100644
108034--- a/net/core/netpoll.c
108035+++ b/net/core/netpoll.c
108036@@ -382,7 +382,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
108037 struct udphdr *udph;
108038 struct iphdr *iph;
108039 struct ethhdr *eth;
108040- static atomic_t ip_ident;
108041+ static atomic_unchecked_t ip_ident;
108042 struct ipv6hdr *ip6h;
108043
108044 udp_len = len + sizeof(*udph);
108045@@ -453,7 +453,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
108046 put_unaligned(0x45, (unsigned char *)iph);
108047 iph->tos = 0;
108048 put_unaligned(htons(ip_len), &(iph->tot_len));
108049- iph->id = htons(atomic_inc_return(&ip_ident));
108050+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
108051 iph->frag_off = 0;
108052 iph->ttl = 64;
108053 iph->protocol = IPPROTO_UDP;
108054diff --git a/net/core/pktgen.c b/net/core/pktgen.c
108055index fc17a9d..d4a3d88 100644
108056--- a/net/core/pktgen.c
108057+++ b/net/core/pktgen.c
108058@@ -3725,7 +3725,7 @@ static int __net_init pg_net_init(struct net *net)
108059 pn->net = net;
108060 INIT_LIST_HEAD(&pn->pktgen_threads);
108061 pn->pktgen_exiting = false;
108062- pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
108063+ pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
108064 if (!pn->proc_dir) {
108065 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
108066 return -ENODEV;
108067diff --git a/net/core/ptp_classifier.c b/net/core/ptp_classifier.c
108068deleted file mode 100644
108069index d3027a7..0000000
108070--- a/net/core/ptp_classifier.c
108071+++ /dev/null
108072@@ -1,141 +0,0 @@
108073-/* PTP classifier
108074- *
108075- * This program is free software; you can redistribute it and/or
108076- * modify it under the terms of version 2 of the GNU General Public
108077- * License as published by the Free Software Foundation.
108078- *
108079- * This program is distributed in the hope that it will be useful, but
108080- * WITHOUT ANY WARRANTY; without even the implied warranty of
108081- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
108082- * General Public License for more details.
108083- */
108084-
108085-/* The below program is the bpf_asm (tools/net/) representation of
108086- * the opcode array in the ptp_filter structure.
108087- *
108088- * For convenience, this can easily be altered and reviewed with
108089- * bpf_asm and bpf_dbg, e.g. `./bpf_asm -c prog` where prog is a
108090- * simple file containing the below program:
108091- *
108092- * ldh [12] ; load ethertype
108093- *
108094- * ; PTP over UDP over IPv4 over Ethernet
108095- * test_ipv4:
108096- * jneq #0x800, test_ipv6 ; ETH_P_IP ?
108097- * ldb [23] ; load proto
108098- * jneq #17, drop_ipv4 ; IPPROTO_UDP ?
108099- * ldh [20] ; load frag offset field
108100- * jset #0x1fff, drop_ipv4 ; don't allow fragments
108101- * ldxb 4*([14]&0xf) ; load IP header len
108102- * ldh [x + 16] ; load UDP dst port
108103- * jneq #319, drop_ipv4 ; is port PTP_EV_PORT ?
108104- * ldh [x + 22] ; load payload
108105- * and #0xf ; mask PTP_CLASS_VMASK
108106- * or #0x10 ; PTP_CLASS_IPV4
108107- * ret a ; return PTP class
108108- * drop_ipv4: ret #0x0 ; PTP_CLASS_NONE
108109- *
108110- * ; PTP over UDP over IPv6 over Ethernet
108111- * test_ipv6:
108112- * jneq #0x86dd, test_8021q ; ETH_P_IPV6 ?
108113- * ldb [20] ; load proto
108114- * jneq #17, drop_ipv6 ; IPPROTO_UDP ?
108115- * ldh [56] ; load UDP dst port
108116- * jneq #319, drop_ipv6 ; is port PTP_EV_PORT ?
108117- * ldh [62] ; load payload
108118- * and #0xf ; mask PTP_CLASS_VMASK
108119- * or #0x20 ; PTP_CLASS_IPV6
108120- * ret a ; return PTP class
108121- * drop_ipv6: ret #0x0 ; PTP_CLASS_NONE
108122- *
108123- * ; PTP over 802.1Q over Ethernet
108124- * test_8021q:
108125- * jneq #0x8100, test_ieee1588 ; ETH_P_8021Q ?
108126- * ldh [16] ; load inner type
108127- * jneq #0x88f7, drop_ieee1588 ; ETH_P_1588 ?
108128- * ldb [18] ; load payload
108129- * and #0x8 ; as we don't have ports here, test
108130- * jneq #0x0, drop_ieee1588 ; for PTP_GEN_BIT and drop these
108131- * ldh [18] ; reload payload
108132- * and #0xf ; mask PTP_CLASS_VMASK
108133- * or #0x40 ; PTP_CLASS_V2_VLAN
108134- * ret a ; return PTP class
108135- *
108136- * ; PTP over Ethernet
108137- * test_ieee1588:
108138- * jneq #0x88f7, drop_ieee1588 ; ETH_P_1588 ?
108139- * ldb [14] ; load payload
108140- * and #0x8 ; as we don't have ports here, test
108141- * jneq #0x0, drop_ieee1588 ; for PTP_GEN_BIT and drop these
108142- * ldh [14] ; reload payload
108143- * and #0xf ; mask PTP_CLASS_VMASK
108144- * or #0x30 ; PTP_CLASS_L2
108145- * ret a ; return PTP class
108146- * drop_ieee1588: ret #0x0 ; PTP_CLASS_NONE
108147- */
108148-
108149-#include <linux/skbuff.h>
108150-#include <linux/filter.h>
108151-#include <linux/ptp_classify.h>
108152-
108153-static struct sk_filter *ptp_insns __read_mostly;
108154-
108155-unsigned int ptp_classify_raw(const struct sk_buff *skb)
108156-{
108157- return SK_RUN_FILTER(ptp_insns, skb);
108158-}
108159-EXPORT_SYMBOL_GPL(ptp_classify_raw);
108160-
108161-void __init ptp_classifier_init(void)
108162-{
108163- static struct sock_filter ptp_filter[] __initdata = {
108164- { 0x28, 0, 0, 0x0000000c },
108165- { 0x15, 0, 12, 0x00000800 },
108166- { 0x30, 0, 0, 0x00000017 },
108167- { 0x15, 0, 9, 0x00000011 },
108168- { 0x28, 0, 0, 0x00000014 },
108169- { 0x45, 7, 0, 0x00001fff },
108170- { 0xb1, 0, 0, 0x0000000e },
108171- { 0x48, 0, 0, 0x00000010 },
108172- { 0x15, 0, 4, 0x0000013f },
108173- { 0x48, 0, 0, 0x00000016 },
108174- { 0x54, 0, 0, 0x0000000f },
108175- { 0x44, 0, 0, 0x00000010 },
108176- { 0x16, 0, 0, 0x00000000 },
108177- { 0x06, 0, 0, 0x00000000 },
108178- { 0x15, 0, 9, 0x000086dd },
108179- { 0x30, 0, 0, 0x00000014 },
108180- { 0x15, 0, 6, 0x00000011 },
108181- { 0x28, 0, 0, 0x00000038 },
108182- { 0x15, 0, 4, 0x0000013f },
108183- { 0x28, 0, 0, 0x0000003e },
108184- { 0x54, 0, 0, 0x0000000f },
108185- { 0x44, 0, 0, 0x00000020 },
108186- { 0x16, 0, 0, 0x00000000 },
108187- { 0x06, 0, 0, 0x00000000 },
108188- { 0x15, 0, 9, 0x00008100 },
108189- { 0x28, 0, 0, 0x00000010 },
108190- { 0x15, 0, 15, 0x000088f7 },
108191- { 0x30, 0, 0, 0x00000012 },
108192- { 0x54, 0, 0, 0x00000008 },
108193- { 0x15, 0, 12, 0x00000000 },
108194- { 0x28, 0, 0, 0x00000012 },
108195- { 0x54, 0, 0, 0x0000000f },
108196- { 0x44, 0, 0, 0x00000040 },
108197- { 0x16, 0, 0, 0x00000000 },
108198- { 0x15, 0, 7, 0x000088f7 },
108199- { 0x30, 0, 0, 0x0000000e },
108200- { 0x54, 0, 0, 0x00000008 },
108201- { 0x15, 0, 4, 0x00000000 },
108202- { 0x28, 0, 0, 0x0000000e },
108203- { 0x54, 0, 0, 0x0000000f },
108204- { 0x44, 0, 0, 0x00000030 },
108205- { 0x16, 0, 0, 0x00000000 },
108206- { 0x06, 0, 0, 0x00000000 },
108207- };
108208- struct sock_fprog_kern ptp_prog = {
108209- .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter,
108210- };
108211-
108212- BUG_ON(sk_unattached_filter_create(&ptp_insns, &ptp_prog));
108213-}
108214diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
108215index 1063996..0729c19 100644
108216--- a/net/core/rtnetlink.c
108217+++ b/net/core/rtnetlink.c
108218@@ -58,7 +58,7 @@ struct rtnl_link {
108219 rtnl_doit_func doit;
108220 rtnl_dumpit_func dumpit;
108221 rtnl_calcit_func calcit;
108222-};
108223+} __no_const;
108224
108225 static DEFINE_MUTEX(rtnl_mutex);
108226
108227@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
108228 if (rtnl_link_ops_get(ops->kind))
108229 return -EEXIST;
108230
108231- if (!ops->dellink)
108232- ops->dellink = unregister_netdevice_queue;
108233+ if (!ops->dellink) {
108234+ pax_open_kernel();
108235+ *(void **)&ops->dellink = unregister_netdevice_queue;
108236+ pax_close_kernel();
108237+ }
108238
108239- list_add_tail(&ops->list, &link_ops);
108240+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
108241 return 0;
108242 }
108243 EXPORT_SYMBOL_GPL(__rtnl_link_register);
108244@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
108245 for_each_net(net) {
108246 __rtnl_kill_links(net, ops);
108247 }
108248- list_del(&ops->list);
108249+ pax_list_del((struct list_head *)&ops->list);
108250 }
108251 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
108252
108253@@ -1008,7 +1011,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
108254 (dev->ifalias &&
108255 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
108256 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
108257- atomic_read(&dev->carrier_changes)))
108258+ atomic_read_unchecked(&dev->carrier_changes)))
108259 goto nla_put_failure;
108260
108261 if (1) {
108262diff --git a/net/core/scm.c b/net/core/scm.c
108263index b442e7e..6f5b5a2 100644
108264--- a/net/core/scm.c
108265+++ b/net/core/scm.c
108266@@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send);
108267 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
108268 {
108269 struct cmsghdr __user *cm
108270- = (__force struct cmsghdr __user *)msg->msg_control;
108271+ = (struct cmsghdr __force_user *)msg->msg_control;
108272 struct cmsghdr cmhdr;
108273 int cmlen = CMSG_LEN(len);
108274 int err;
108275@@ -233,7 +233,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
108276 err = -EFAULT;
108277 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
108278 goto out;
108279- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
108280+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
108281 goto out;
108282 cmlen = CMSG_SPACE(len);
108283 if (msg->msg_controllen < cmlen)
108284@@ -249,7 +249,7 @@ EXPORT_SYMBOL(put_cmsg);
108285 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
108286 {
108287 struct cmsghdr __user *cm
108288- = (__force struct cmsghdr __user*)msg->msg_control;
108289+ = (struct cmsghdr __force_user *)msg->msg_control;
108290
108291 int fdmax = 0;
108292 int fdnum = scm->fp->count;
108293@@ -269,7 +269,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
108294 if (fdnum < fdmax)
108295 fdmax = fdnum;
108296
108297- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
108298+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
108299 i++, cmfptr++)
108300 {
108301 struct socket *sock;
108302diff --git a/net/core/skbuff.c b/net/core/skbuff.c
108303index 58ff88e..af9b458 100644
108304--- a/net/core/skbuff.c
108305+++ b/net/core/skbuff.c
108306@@ -2010,7 +2010,7 @@ EXPORT_SYMBOL(__skb_checksum);
108307 __wsum skb_checksum(const struct sk_buff *skb, int offset,
108308 int len, __wsum csum)
108309 {
108310- const struct skb_checksum_ops ops = {
108311+ static const struct skb_checksum_ops ops = {
108312 .update = csum_partial_ext,
108313 .combine = csum_block_add_ext,
108314 };
108315@@ -3233,13 +3233,15 @@ void __init skb_init(void)
108316 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
108317 sizeof(struct sk_buff),
108318 0,
108319- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
108320+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
108321+ SLAB_NO_SANITIZE,
108322 NULL);
108323 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
108324 (2*sizeof(struct sk_buff)) +
108325 sizeof(atomic_t),
108326 0,
108327- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
108328+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
108329+ SLAB_NO_SANITIZE,
108330 NULL);
108331 }
108332
108333diff --git a/net/core/sock.c b/net/core/sock.c
108334index 026e01f..f54f908 100644
108335--- a/net/core/sock.c
108336+++ b/net/core/sock.c
108337@@ -442,7 +442,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
108338 struct sk_buff_head *list = &sk->sk_receive_queue;
108339
108340 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
108341- atomic_inc(&sk->sk_drops);
108342+ atomic_inc_unchecked(&sk->sk_drops);
108343 trace_sock_rcvqueue_full(sk, skb);
108344 return -ENOMEM;
108345 }
108346@@ -452,7 +452,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
108347 return err;
108348
108349 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
108350- atomic_inc(&sk->sk_drops);
108351+ atomic_inc_unchecked(&sk->sk_drops);
108352 return -ENOBUFS;
108353 }
108354
108355@@ -472,7 +472,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
108356 skb_dst_force(skb);
108357
108358 spin_lock_irqsave(&list->lock, flags);
108359- skb->dropcount = atomic_read(&sk->sk_drops);
108360+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
108361 __skb_queue_tail(list, skb);
108362 spin_unlock_irqrestore(&list->lock, flags);
108363
108364@@ -492,7 +492,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
108365 skb->dev = NULL;
108366
108367 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
108368- atomic_inc(&sk->sk_drops);
108369+ atomic_inc_unchecked(&sk->sk_drops);
108370 goto discard_and_relse;
108371 }
108372 if (nested)
108373@@ -510,7 +510,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
108374 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
108375 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
108376 bh_unlock_sock(sk);
108377- atomic_inc(&sk->sk_drops);
108378+ atomic_inc_unchecked(&sk->sk_drops);
108379 goto discard_and_relse;
108380 }
108381
108382@@ -998,12 +998,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
108383 struct timeval tm;
108384 } v;
108385
108386- int lv = sizeof(int);
108387- int len;
108388+ unsigned int lv = sizeof(int);
108389+ unsigned int len;
108390
108391 if (get_user(len, optlen))
108392 return -EFAULT;
108393- if (len < 0)
108394+ if (len > INT_MAX)
108395 return -EINVAL;
108396
108397 memset(&v, 0, sizeof(v));
108398@@ -1155,11 +1155,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
108399
108400 case SO_PEERNAME:
108401 {
108402- char address[128];
108403+ char address[_K_SS_MAXSIZE];
108404
108405 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
108406 return -ENOTCONN;
108407- if (lv < len)
108408+ if (lv < len || sizeof address < len)
108409 return -EINVAL;
108410 if (copy_to_user(optval, address, len))
108411 return -EFAULT;
108412@@ -1240,7 +1240,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
108413
108414 if (len > lv)
108415 len = lv;
108416- if (copy_to_user(optval, &v, len))
108417+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
108418 return -EFAULT;
108419 lenout:
108420 if (put_user(len, optlen))
108421@@ -2375,7 +2375,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
108422 */
108423 smp_wmb();
108424 atomic_set(&sk->sk_refcnt, 1);
108425- atomic_set(&sk->sk_drops, 0);
108426+ atomic_set_unchecked(&sk->sk_drops, 0);
108427 }
108428 EXPORT_SYMBOL(sock_init_data);
108429
108430@@ -2503,6 +2503,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
108431 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
108432 int level, int type)
108433 {
108434+ struct sock_extended_err ee;
108435 struct sock_exterr_skb *serr;
108436 struct sk_buff *skb, *skb2;
108437 int copied, err;
108438@@ -2524,7 +2525,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
108439 sock_recv_timestamp(msg, sk, skb);
108440
108441 serr = SKB_EXT_ERR(skb);
108442- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
108443+ ee = serr->ee;
108444+ put_cmsg(msg, level, type, sizeof ee, &ee);
108445
108446 msg->msg_flags |= MSG_ERRQUEUE;
108447 err = copied;
108448diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
108449index a4216a4..773e3d7 100644
108450--- a/net/core/sock_diag.c
108451+++ b/net/core/sock_diag.c
108452@@ -9,26 +9,33 @@
108453 #include <linux/inet_diag.h>
108454 #include <linux/sock_diag.h>
108455
108456-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
108457+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
108458 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
108459 static DEFINE_MUTEX(sock_diag_table_mutex);
108460
108461 int sock_diag_check_cookie(void *sk, __u32 *cookie)
108462 {
108463+#ifndef CONFIG_GRKERNSEC_HIDESYM
108464 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
108465 cookie[1] != INET_DIAG_NOCOOKIE) &&
108466 ((u32)(unsigned long)sk != cookie[0] ||
108467 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
108468 return -ESTALE;
108469 else
108470+#endif
108471 return 0;
108472 }
108473 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
108474
108475 void sock_diag_save_cookie(void *sk, __u32 *cookie)
108476 {
108477+#ifdef CONFIG_GRKERNSEC_HIDESYM
108478+ cookie[0] = 0;
108479+ cookie[1] = 0;
108480+#else
108481 cookie[0] = (u32)(unsigned long)sk;
108482 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
108483+#endif
108484 }
108485 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
108486
108487@@ -52,10 +59,9 @@ EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
108488 int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
108489 struct sk_buff *skb, int attrtype)
108490 {
108491- struct sock_fprog_kern *fprog;
108492- struct sk_filter *filter;
108493 struct nlattr *attr;
108494- unsigned int flen;
108495+ struct sk_filter *filter;
108496+ unsigned int len;
108497 int err = 0;
108498
108499 if (!may_report_filterinfo) {
108500@@ -64,20 +70,24 @@ int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
108501 }
108502
108503 rcu_read_lock();
108504+
108505 filter = rcu_dereference(sk->sk_filter);
108506- if (!filter)
108507- goto out;
108508+ len = filter ? filter->len * sizeof(struct sock_filter) : 0;
108509
108510- fprog = filter->orig_prog;
108511- flen = sk_filter_proglen(fprog);
108512-
108513- attr = nla_reserve(skb, attrtype, flen);
108514+ attr = nla_reserve(skb, attrtype, len);
108515 if (attr == NULL) {
108516 err = -EMSGSIZE;
108517 goto out;
108518 }
108519
108520- memcpy(nla_data(attr), fprog->filter, flen);
108521+ if (filter) {
108522+ struct sock_filter *fb = (struct sock_filter *)nla_data(attr);
108523+ int i;
108524+
108525+ for (i = 0; i < filter->len; i++, fb++)
108526+ sk_decode_filter(&filter->insns[i], fb);
108527+ }
108528+
108529 out:
108530 rcu_read_unlock();
108531 return err;
108532@@ -110,8 +120,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
108533 mutex_lock(&sock_diag_table_mutex);
108534 if (sock_diag_handlers[hndl->family])
108535 err = -EBUSY;
108536- else
108537+ else {
108538+ pax_open_kernel();
108539 sock_diag_handlers[hndl->family] = hndl;
108540+ pax_close_kernel();
108541+ }
108542 mutex_unlock(&sock_diag_table_mutex);
108543
108544 return err;
108545@@ -127,7 +140,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
108546
108547 mutex_lock(&sock_diag_table_mutex);
108548 BUG_ON(sock_diag_handlers[family] != hnld);
108549+ pax_open_kernel();
108550 sock_diag_handlers[family] = NULL;
108551+ pax_close_kernel();
108552 mutex_unlock(&sock_diag_table_mutex);
108553 }
108554 EXPORT_SYMBOL_GPL(sock_diag_unregister);
108555diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
108556index cf9cd13..8b56af3 100644
108557--- a/net/core/sysctl_net_core.c
108558+++ b/net/core/sysctl_net_core.c
108559@@ -32,7 +32,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
108560 {
108561 unsigned int orig_size, size;
108562 int ret, i;
108563- struct ctl_table tmp = {
108564+ ctl_table_no_const tmp = {
108565 .data = &size,
108566 .maxlen = sizeof(size),
108567 .mode = table->mode
108568@@ -200,7 +200,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
108569 void __user *buffer, size_t *lenp, loff_t *ppos)
108570 {
108571 char id[IFNAMSIZ];
108572- struct ctl_table tbl = {
108573+ ctl_table_no_const tbl = {
108574 .data = id,
108575 .maxlen = IFNAMSIZ,
108576 };
108577@@ -379,13 +379,12 @@ static struct ctl_table netns_core_table[] = {
108578
108579 static __net_init int sysctl_core_net_init(struct net *net)
108580 {
108581- struct ctl_table *tbl;
108582+ ctl_table_no_const *tbl = NULL;
108583
108584 net->core.sysctl_somaxconn = SOMAXCONN;
108585
108586- tbl = netns_core_table;
108587 if (!net_eq(net, &init_net)) {
108588- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
108589+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
108590 if (tbl == NULL)
108591 goto err_dup;
108592
108593@@ -395,17 +394,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
108594 if (net->user_ns != &init_user_ns) {
108595 tbl[0].procname = NULL;
108596 }
108597- }
108598-
108599- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
108600+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
108601+ } else
108602+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
108603 if (net->core.sysctl_hdr == NULL)
108604 goto err_reg;
108605
108606 return 0;
108607
108608 err_reg:
108609- if (tbl != netns_core_table)
108610- kfree(tbl);
108611+ kfree(tbl);
108612 err_dup:
108613 return -ENOMEM;
108614 }
108615@@ -420,7 +418,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
108616 kfree(tbl);
108617 }
108618
108619-static __net_initdata struct pernet_operations sysctl_core_ops = {
108620+static __net_initconst struct pernet_operations sysctl_core_ops = {
108621 .init = sysctl_core_net_init,
108622 .exit = sysctl_core_net_exit,
108623 };
108624diff --git a/net/core/timestamping.c b/net/core/timestamping.c
108625index 6521dfd..661b5a4 100644
108626--- a/net/core/timestamping.c
108627+++ b/net/core/timestamping.c
108628@@ -23,11 +23,16 @@
108629 #include <linux/skbuff.h>
108630 #include <linux/export.h>
108631
108632+static struct sock_filter ptp_filter[] = {
108633+ PTP_FILTER
108634+};
108635+
108636 static unsigned int classify(const struct sk_buff *skb)
108637 {
108638- if (likely(skb->dev && skb->dev->phydev &&
108639+ if (likely(skb->dev &&
108640+ skb->dev->phydev &&
108641 skb->dev->phydev->drv))
108642- return ptp_classify_raw(skb);
108643+ return sk_run_filter(skb, ptp_filter);
108644 else
108645 return PTP_CLASS_NONE;
108646 }
108647@@ -55,13 +60,11 @@ void skb_clone_tx_timestamp(struct sk_buff *skb)
108648 if (likely(phydev->drv->txtstamp)) {
108649 if (!atomic_inc_not_zero(&sk->sk_refcnt))
108650 return;
108651-
108652 clone = skb_clone(skb, GFP_ATOMIC);
108653 if (!clone) {
108654 sock_put(sk);
108655 return;
108656 }
108657-
108658 clone->sk = sk;
108659 phydev->drv->txtstamp(phydev, clone, type);
108660 }
108661@@ -86,15 +89,12 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
108662 }
108663
108664 *skb_hwtstamps(skb) = *hwtstamps;
108665-
108666 serr = SKB_EXT_ERR(skb);
108667 memset(serr, 0, sizeof(*serr));
108668 serr->ee.ee_errno = ENOMSG;
108669 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
108670 skb->sk = NULL;
108671-
108672 err = sock_queue_err_skb(sk, skb);
108673-
108674 sock_put(sk);
108675 if (err)
108676 kfree_skb(skb);
108677@@ -132,3 +132,8 @@ bool skb_defer_rx_timestamp(struct sk_buff *skb)
108678 return false;
108679 }
108680 EXPORT_SYMBOL_GPL(skb_defer_rx_timestamp);
108681+
108682+void __init skb_timestamping_init(void)
108683+{
108684+ BUG_ON(sk_chk_filter(ptp_filter, ARRAY_SIZE(ptp_filter)));
108685+}
108686diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
108687index ae011b4..d2d18bf 100644
108688--- a/net/decnet/af_decnet.c
108689+++ b/net/decnet/af_decnet.c
108690@@ -465,6 +465,7 @@ static struct proto dn_proto = {
108691 .sysctl_rmem = sysctl_decnet_rmem,
108692 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
108693 .obj_size = sizeof(struct dn_sock),
108694+ .slab_flags = SLAB_USERCOPY,
108695 };
108696
108697 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
108698diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
108699index 3b726f3..1af6368 100644
108700--- a/net/decnet/dn_dev.c
108701+++ b/net/decnet/dn_dev.c
108702@@ -200,7 +200,7 @@ static struct dn_dev_sysctl_table {
108703 .extra1 = &min_t3,
108704 .extra2 = &max_t3
108705 },
108706- {0}
108707+ { }
108708 },
108709 };
108710
108711diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
108712index 5325b54..a0d4d69 100644
108713--- a/net/decnet/sysctl_net_decnet.c
108714+++ b/net/decnet/sysctl_net_decnet.c
108715@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
108716
108717 if (len > *lenp) len = *lenp;
108718
108719- if (copy_to_user(buffer, addr, len))
108720+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
108721 return -EFAULT;
108722
108723 *lenp = len;
108724@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
108725
108726 if (len > *lenp) len = *lenp;
108727
108728- if (copy_to_user(buffer, devname, len))
108729+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
108730 return -EFAULT;
108731
108732 *lenp = len;
108733diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c
108734index 6f1428c..9586b83 100644
108735--- a/net/ieee802154/reassembly.c
108736+++ b/net/ieee802154/reassembly.c
108737@@ -438,14 +438,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
108738
108739 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
108740 {
108741- struct ctl_table *table;
108742+ ctl_table_no_const *table = NULL;
108743 struct ctl_table_header *hdr;
108744 struct netns_ieee802154_lowpan *ieee802154_lowpan =
108745 net_ieee802154_lowpan(net);
108746
108747- table = lowpan_frags_ns_ctl_table;
108748 if (!net_eq(net, &init_net)) {
108749- table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
108750+ table = kmemdup(lowpan_frags_ns_ctl_table, sizeof(lowpan_frags_ns_ctl_table),
108751 GFP_KERNEL);
108752 if (table == NULL)
108753 goto err_alloc;
108754@@ -458,9 +457,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
108755 /* Don't export sysctls to unprivileged users */
108756 if (net->user_ns != &init_user_ns)
108757 table[0].procname = NULL;
108758- }
108759-
108760- hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
108761+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
108762+ } else
108763+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", lowpan_frags_ns_ctl_table);
108764 if (hdr == NULL)
108765 goto err_reg;
108766
108767@@ -468,8 +467,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
108768 return 0;
108769
108770 err_reg:
108771- if (!net_eq(net, &init_net))
108772- kfree(table);
108773+ kfree(table);
108774 err_alloc:
108775 return -ENOMEM;
108776 }
108777diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
108778index e944937..368fe78 100644
108779--- a/net/ipv4/devinet.c
108780+++ b/net/ipv4/devinet.c
108781@@ -1540,7 +1540,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
108782 idx = 0;
108783 head = &net->dev_index_head[h];
108784 rcu_read_lock();
108785- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
108786+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
108787 net->dev_base_seq;
108788 hlist_for_each_entry_rcu(dev, head, index_hlist) {
108789 if (idx < s_idx)
108790@@ -1858,7 +1858,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
108791 idx = 0;
108792 head = &net->dev_index_head[h];
108793 rcu_read_lock();
108794- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
108795+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
108796 net->dev_base_seq;
108797 hlist_for_each_entry_rcu(dev, head, index_hlist) {
108798 if (idx < s_idx)
108799@@ -2093,7 +2093,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
108800 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
108801 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
108802
108803-static struct devinet_sysctl_table {
108804+static const struct devinet_sysctl_table {
108805 struct ctl_table_header *sysctl_header;
108806 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
108807 } devinet_sysctl = {
108808@@ -2215,7 +2215,7 @@ static __net_init int devinet_init_net(struct net *net)
108809 int err;
108810 struct ipv4_devconf *all, *dflt;
108811 #ifdef CONFIG_SYSCTL
108812- struct ctl_table *tbl = ctl_forward_entry;
108813+ ctl_table_no_const *tbl = NULL;
108814 struct ctl_table_header *forw_hdr;
108815 #endif
108816
108817@@ -2233,7 +2233,7 @@ static __net_init int devinet_init_net(struct net *net)
108818 goto err_alloc_dflt;
108819
108820 #ifdef CONFIG_SYSCTL
108821- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
108822+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
108823 if (tbl == NULL)
108824 goto err_alloc_ctl;
108825
108826@@ -2253,7 +2253,10 @@ static __net_init int devinet_init_net(struct net *net)
108827 goto err_reg_dflt;
108828
108829 err = -ENOMEM;
108830- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
108831+ if (!net_eq(net, &init_net))
108832+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
108833+ else
108834+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
108835 if (forw_hdr == NULL)
108836 goto err_reg_ctl;
108837 net->ipv4.forw_hdr = forw_hdr;
108838@@ -2269,8 +2272,7 @@ err_reg_ctl:
108839 err_reg_dflt:
108840 __devinet_sysctl_unregister(all);
108841 err_reg_all:
108842- if (tbl != ctl_forward_entry)
108843- kfree(tbl);
108844+ kfree(tbl);
108845 err_alloc_ctl:
108846 #endif
108847 if (dflt != &ipv4_devconf_dflt)
108848diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
108849index 255aa99..45c78f8 100644
108850--- a/net/ipv4/fib_frontend.c
108851+++ b/net/ipv4/fib_frontend.c
108852@@ -1015,12 +1015,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
108853 #ifdef CONFIG_IP_ROUTE_MULTIPATH
108854 fib_sync_up(dev);
108855 #endif
108856- atomic_inc(&net->ipv4.dev_addr_genid);
108857+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
108858 rt_cache_flush(dev_net(dev));
108859 break;
108860 case NETDEV_DOWN:
108861 fib_del_ifaddr(ifa, NULL);
108862- atomic_inc(&net->ipv4.dev_addr_genid);
108863+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
108864 if (ifa->ifa_dev->ifa_list == NULL) {
108865 /* Last address was deleted from this interface.
108866 * Disable IP.
108867@@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
108868 #ifdef CONFIG_IP_ROUTE_MULTIPATH
108869 fib_sync_up(dev);
108870 #endif
108871- atomic_inc(&net->ipv4.dev_addr_genid);
108872+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
108873 rt_cache_flush(net);
108874 break;
108875 case NETDEV_DOWN:
108876diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
108877index b10cd43a..22327f9 100644
108878--- a/net/ipv4/fib_semantics.c
108879+++ b/net/ipv4/fib_semantics.c
108880@@ -768,7 +768,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
108881 nh->nh_saddr = inet_select_addr(nh->nh_dev,
108882 nh->nh_gw,
108883 nh->nh_parent->fib_scope);
108884- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
108885+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
108886
108887 return nh->nh_saddr;
108888 }
108889diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
108890index 43116e8..e3e6159 100644
108891--- a/net/ipv4/inet_hashtables.c
108892+++ b/net/ipv4/inet_hashtables.c
108893@@ -18,6 +18,7 @@
108894 #include <linux/sched.h>
108895 #include <linux/slab.h>
108896 #include <linux/wait.h>
108897+#include <linux/security.h>
108898
108899 #include <net/inet_connection_sock.h>
108900 #include <net/inet_hashtables.h>
108901@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
108902 return inet_ehashfn(net, laddr, lport, faddr, fport);
108903 }
108904
108905+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
108906+
108907 /*
108908 * Allocate and initialize a new local port bind bucket.
108909 * The bindhash mutex for snum's hash chain must be held here.
108910@@ -554,6 +557,8 @@ ok:
108911 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
108912 spin_unlock(&head->lock);
108913
108914+ gr_update_task_in_ip_table(current, inet_sk(sk));
108915+
108916 if (tw) {
108917 inet_twsk_deschedule(tw, death_row);
108918 while (twrefcnt) {
108919diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
108920index bd5f592..e80e605 100644
108921--- a/net/ipv4/inetpeer.c
108922+++ b/net/ipv4/inetpeer.c
108923@@ -482,7 +482,7 @@ relookup:
108924 if (p) {
108925 p->daddr = *daddr;
108926 atomic_set(&p->refcnt, 1);
108927- atomic_set(&p->rid, 0);
108928+ atomic_set_unchecked(&p->rid, 0);
108929 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
108930 p->rate_tokens = 0;
108931 /* 60*HZ is arbitrary, but chosen enough high so that the first
108932diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
108933index ed32313..3762abe 100644
108934--- a/net/ipv4/ip_fragment.c
108935+++ b/net/ipv4/ip_fragment.c
108936@@ -284,7 +284,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
108937 return 0;
108938
108939 start = qp->rid;
108940- end = atomic_inc_return(&peer->rid);
108941+ end = atomic_inc_return_unchecked(&peer->rid);
108942 qp->rid = end;
108943
108944 rc = qp->q.fragments && (end - start) > max;
108945@@ -761,12 +761,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
108946
108947 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
108948 {
108949- struct ctl_table *table;
108950+ ctl_table_no_const *table = NULL;
108951 struct ctl_table_header *hdr;
108952
108953- table = ip4_frags_ns_ctl_table;
108954 if (!net_eq(net, &init_net)) {
108955- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
108956+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
108957 if (table == NULL)
108958 goto err_alloc;
108959
108960@@ -777,9 +776,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
108961 /* Don't export sysctls to unprivileged users */
108962 if (net->user_ns != &init_user_ns)
108963 table[0].procname = NULL;
108964- }
108965+ hdr = register_net_sysctl(net, "net/ipv4", table);
108966+ } else
108967+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
108968
108969- hdr = register_net_sysctl(net, "net/ipv4", table);
108970 if (hdr == NULL)
108971 goto err_reg;
108972
108973@@ -787,8 +787,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
108974 return 0;
108975
108976 err_reg:
108977- if (!net_eq(net, &init_net))
108978- kfree(table);
108979+ kfree(table);
108980 err_alloc:
108981 return -ENOMEM;
108982 }
108983diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
108984index 9b84254..c776611 100644
108985--- a/net/ipv4/ip_gre.c
108986+++ b/net/ipv4/ip_gre.c
108987@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
108988 module_param(log_ecn_error, bool, 0644);
108989 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
108990
108991-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
108992+static struct rtnl_link_ops ipgre_link_ops;
108993 static int ipgre_tunnel_init(struct net_device *dev);
108994
108995 static int ipgre_net_id __read_mostly;
108996@@ -733,7 +733,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
108997 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
108998 };
108999
109000-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
109001+static struct rtnl_link_ops ipgre_link_ops = {
109002 .kind = "gre",
109003 .maxtype = IFLA_GRE_MAX,
109004 .policy = ipgre_policy,
109005@@ -747,7 +747,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
109006 .fill_info = ipgre_fill_info,
109007 };
109008
109009-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
109010+static struct rtnl_link_ops ipgre_tap_ops = {
109011 .kind = "gretap",
109012 .maxtype = IFLA_GRE_MAX,
109013 .policy = ipgre_policy,
109014diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
109015index 64741b9..6f334a2 100644
109016--- a/net/ipv4/ip_sockglue.c
109017+++ b/net/ipv4/ip_sockglue.c
109018@@ -1188,7 +1188,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
109019 len = min_t(unsigned int, len, opt->optlen);
109020 if (put_user(len, optlen))
109021 return -EFAULT;
109022- if (copy_to_user(optval, opt->__data, len))
109023+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
109024+ copy_to_user(optval, opt->__data, len))
109025 return -EFAULT;
109026 return 0;
109027 }
109028@@ -1319,7 +1320,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
109029 if (sk->sk_type != SOCK_STREAM)
109030 return -ENOPROTOOPT;
109031
109032- msg.msg_control = optval;
109033+ msg.msg_control = (void __force_kernel *)optval;
109034 msg.msg_controllen = len;
109035 msg.msg_flags = flags;
109036
109037diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
109038index b8960f3..0f025db 100644
109039--- a/net/ipv4/ip_vti.c
109040+++ b/net/ipv4/ip_vti.c
109041@@ -45,7 +45,7 @@
109042 #include <net/net_namespace.h>
109043 #include <net/netns/generic.h>
109044
109045-static struct rtnl_link_ops vti_link_ops __read_mostly;
109046+static struct rtnl_link_ops vti_link_ops;
109047
109048 static int vti_net_id __read_mostly;
109049 static int vti_tunnel_init(struct net_device *dev);
109050@@ -519,7 +519,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
109051 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
109052 };
109053
109054-static struct rtnl_link_ops vti_link_ops __read_mostly = {
109055+static struct rtnl_link_ops vti_link_ops = {
109056 .kind = "vti",
109057 .maxtype = IFLA_VTI_MAX,
109058 .policy = vti_policy,
109059diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
109060index b3e86ea..18ce98c 100644
109061--- a/net/ipv4/ipconfig.c
109062+++ b/net/ipv4/ipconfig.c
109063@@ -334,7 +334,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
109064
109065 mm_segment_t oldfs = get_fs();
109066 set_fs(get_ds());
109067- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
109068+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
109069 set_fs(oldfs);
109070 return res;
109071 }
109072@@ -345,7 +345,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
109073
109074 mm_segment_t oldfs = get_fs();
109075 set_fs(get_ds());
109076- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
109077+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
109078 set_fs(oldfs);
109079 return res;
109080 }
109081@@ -356,7 +356,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
109082
109083 mm_segment_t oldfs = get_fs();
109084 set_fs(get_ds());
109085- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
109086+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
109087 set_fs(oldfs);
109088 return res;
109089 }
109090diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
109091index 62eaa00..29b2dc2 100644
109092--- a/net/ipv4/ipip.c
109093+++ b/net/ipv4/ipip.c
109094@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
109095 static int ipip_net_id __read_mostly;
109096
109097 static int ipip_tunnel_init(struct net_device *dev);
109098-static struct rtnl_link_ops ipip_link_ops __read_mostly;
109099+static struct rtnl_link_ops ipip_link_ops;
109100
109101 static int ipip_err(struct sk_buff *skb, u32 info)
109102 {
109103@@ -409,7 +409,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
109104 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
109105 };
109106
109107-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
109108+static struct rtnl_link_ops ipip_link_ops = {
109109 .kind = "ipip",
109110 .maxtype = IFLA_IPTUN_MAX,
109111 .policy = ipip_policy,
109112diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
109113index f95b6f9..2ee2097 100644
109114--- a/net/ipv4/netfilter/arp_tables.c
109115+++ b/net/ipv4/netfilter/arp_tables.c
109116@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
109117 #endif
109118
109119 static int get_info(struct net *net, void __user *user,
109120- const int *len, int compat)
109121+ int len, int compat)
109122 {
109123 char name[XT_TABLE_MAXNAMELEN];
109124 struct xt_table *t;
109125 int ret;
109126
109127- if (*len != sizeof(struct arpt_getinfo)) {
109128- duprintf("length %u != %Zu\n", *len,
109129+ if (len != sizeof(struct arpt_getinfo)) {
109130+ duprintf("length %u != %Zu\n", len,
109131 sizeof(struct arpt_getinfo));
109132 return -EINVAL;
109133 }
109134@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
109135 info.size = private->size;
109136 strcpy(info.name, name);
109137
109138- if (copy_to_user(user, &info, *len) != 0)
109139+ if (copy_to_user(user, &info, len) != 0)
109140 ret = -EFAULT;
109141 else
109142 ret = 0;
109143@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
109144
109145 switch (cmd) {
109146 case ARPT_SO_GET_INFO:
109147- ret = get_info(sock_net(sk), user, len, 1);
109148+ ret = get_info(sock_net(sk), user, *len, 1);
109149 break;
109150 case ARPT_SO_GET_ENTRIES:
109151 ret = compat_get_entries(sock_net(sk), user, len);
109152@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
109153
109154 switch (cmd) {
109155 case ARPT_SO_GET_INFO:
109156- ret = get_info(sock_net(sk), user, len, 0);
109157+ ret = get_info(sock_net(sk), user, *len, 0);
109158 break;
109159
109160 case ARPT_SO_GET_ENTRIES:
109161diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
109162index 99e810f..3711b81 100644
109163--- a/net/ipv4/netfilter/ip_tables.c
109164+++ b/net/ipv4/netfilter/ip_tables.c
109165@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
109166 #endif
109167
109168 static int get_info(struct net *net, void __user *user,
109169- const int *len, int compat)
109170+ int len, int compat)
109171 {
109172 char name[XT_TABLE_MAXNAMELEN];
109173 struct xt_table *t;
109174 int ret;
109175
109176- if (*len != sizeof(struct ipt_getinfo)) {
109177- duprintf("length %u != %zu\n", *len,
109178+ if (len != sizeof(struct ipt_getinfo)) {
109179+ duprintf("length %u != %zu\n", len,
109180 sizeof(struct ipt_getinfo));
109181 return -EINVAL;
109182 }
109183@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
109184 info.size = private->size;
109185 strcpy(info.name, name);
109186
109187- if (copy_to_user(user, &info, *len) != 0)
109188+ if (copy_to_user(user, &info, len) != 0)
109189 ret = -EFAULT;
109190 else
109191 ret = 0;
109192@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
109193
109194 switch (cmd) {
109195 case IPT_SO_GET_INFO:
109196- ret = get_info(sock_net(sk), user, len, 1);
109197+ ret = get_info(sock_net(sk), user, *len, 1);
109198 break;
109199 case IPT_SO_GET_ENTRIES:
109200 ret = compat_get_entries(sock_net(sk), user, len);
109201@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
109202
109203 switch (cmd) {
109204 case IPT_SO_GET_INFO:
109205- ret = get_info(sock_net(sk), user, len, 0);
109206+ ret = get_info(sock_net(sk), user, *len, 0);
109207 break;
109208
109209 case IPT_SO_GET_ENTRIES:
109210diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
109211index 2510c02..cfb34fa 100644
109212--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
109213+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
109214@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net)
109215 spin_lock_init(&cn->lock);
109216
109217 #ifdef CONFIG_PROC_FS
109218- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
109219+ cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net);
109220 if (!cn->procdir) {
109221 pr_err("Unable to proc dir entry\n");
109222 return -ENOMEM;
109223diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
109224index 044a0dd..3399751 100644
109225--- a/net/ipv4/ping.c
109226+++ b/net/ipv4/ping.c
109227@@ -59,7 +59,7 @@ struct ping_table {
109228 };
109229
109230 static struct ping_table ping_table;
109231-struct pingv6_ops pingv6_ops;
109232+struct pingv6_ops *pingv6_ops;
109233 EXPORT_SYMBOL_GPL(pingv6_ops);
109234
109235 static u16 ping_port_rover;
109236@@ -348,7 +348,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
109237 return -ENODEV;
109238 }
109239 }
109240- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
109241+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
109242 scoped);
109243 rcu_read_unlock();
109244
109245@@ -556,7 +556,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
109246 }
109247 #if IS_ENABLED(CONFIG_IPV6)
109248 } else if (skb->protocol == htons(ETH_P_IPV6)) {
109249- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
109250+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
109251 #endif
109252 }
109253
109254@@ -574,7 +574,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
109255 info, (u8 *)icmph);
109256 #if IS_ENABLED(CONFIG_IPV6)
109257 } else if (family == AF_INET6) {
109258- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
109259+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
109260 info, (u8 *)icmph);
109261 #endif
109262 }
109263@@ -858,7 +858,7 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
109264 return ip_recv_error(sk, msg, len, addr_len);
109265 #if IS_ENABLED(CONFIG_IPV6)
109266 } else if (family == AF_INET6) {
109267- return pingv6_ops.ipv6_recv_error(sk, msg, len,
109268+ return pingv6_ops->ipv6_recv_error(sk, msg, len,
109269 addr_len);
109270 #endif
109271 }
109272@@ -916,10 +916,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
109273 }
109274
109275 if (inet6_sk(sk)->rxopt.all)
109276- pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
109277+ pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb);
109278 if (skb->protocol == htons(ETH_P_IPV6) &&
109279 inet6_sk(sk)->rxopt.all)
109280- pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
109281+ pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb);
109282 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
109283 ip_cmsg_recv(msg, skb);
109284 #endif
109285@@ -1111,7 +1111,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
109286 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
109287 0, sock_i_ino(sp),
109288 atomic_read(&sp->sk_refcnt), sp,
109289- atomic_read(&sp->sk_drops));
109290+ atomic_read_unchecked(&sp->sk_drops));
109291 }
109292
109293 static int ping_v4_seq_show(struct seq_file *seq, void *v)
109294diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
109295index 2c65160..213ecdf 100644
109296--- a/net/ipv4/raw.c
109297+++ b/net/ipv4/raw.c
109298@@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
109299 int raw_rcv(struct sock *sk, struct sk_buff *skb)
109300 {
109301 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
109302- atomic_inc(&sk->sk_drops);
109303+ atomic_inc_unchecked(&sk->sk_drops);
109304 kfree_skb(skb);
109305 return NET_RX_DROP;
109306 }
109307@@ -748,16 +748,20 @@ static int raw_init(struct sock *sk)
109308
109309 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
109310 {
109311+ struct icmp_filter filter;
109312+
109313 if (optlen > sizeof(struct icmp_filter))
109314 optlen = sizeof(struct icmp_filter);
109315- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
109316+ if (copy_from_user(&filter, optval, optlen))
109317 return -EFAULT;
109318+ raw_sk(sk)->filter = filter;
109319 return 0;
109320 }
109321
109322 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
109323 {
109324 int len, ret = -EFAULT;
109325+ struct icmp_filter filter;
109326
109327 if (get_user(len, optlen))
109328 goto out;
109329@@ -767,8 +771,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
109330 if (len > sizeof(struct icmp_filter))
109331 len = sizeof(struct icmp_filter);
109332 ret = -EFAULT;
109333- if (put_user(len, optlen) ||
109334- copy_to_user(optval, &raw_sk(sk)->filter, len))
109335+ filter = raw_sk(sk)->filter;
109336+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
109337 goto out;
109338 ret = 0;
109339 out: return ret;
109340@@ -997,7 +1001,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
109341 0, 0L, 0,
109342 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
109343 0, sock_i_ino(sp),
109344- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
109345+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
109346 }
109347
109348 static int raw_seq_show(struct seq_file *seq, void *v)
109349diff --git a/net/ipv4/route.c b/net/ipv4/route.c
109350index 1901998..a9a850a 100644
109351--- a/net/ipv4/route.c
109352+++ b/net/ipv4/route.c
109353@@ -228,7 +228,7 @@ static const struct seq_operations rt_cache_seq_ops = {
109354
109355 static int rt_cache_seq_open(struct inode *inode, struct file *file)
109356 {
109357- return seq_open(file, &rt_cache_seq_ops);
109358+ return seq_open_restrict(file, &rt_cache_seq_ops);
109359 }
109360
109361 static const struct file_operations rt_cache_seq_fops = {
109362@@ -319,7 +319,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
109363
109364 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
109365 {
109366- return seq_open(file, &rt_cpu_seq_ops);
109367+ return seq_open_restrict(file, &rt_cpu_seq_ops);
109368 }
109369
109370 static const struct file_operations rt_cpu_seq_fops = {
109371@@ -357,7 +357,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
109372
109373 static int rt_acct_proc_open(struct inode *inode, struct file *file)
109374 {
109375- return single_open(file, rt_acct_proc_show, NULL);
109376+ return single_open_restrict(file, rt_acct_proc_show, NULL);
109377 }
109378
109379 static const struct file_operations rt_acct_proc_fops = {
109380@@ -459,11 +459,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
109381
109382 #define IP_IDENTS_SZ 2048u
109383 struct ip_ident_bucket {
109384- atomic_t id;
109385+ atomic_unchecked_t id;
109386 u32 stamp32;
109387 };
109388
109389-static struct ip_ident_bucket *ip_idents __read_mostly;
109390+static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly;
109391
109392 /* In order to protect privacy, we add a perturbation to identifiers
109393 * if one generator is seldom used. This makes hard for an attacker
109394@@ -479,7 +479,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
109395 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
109396 delta = prandom_u32_max(now - old);
109397
109398- return atomic_add_return(segs + delta, &bucket->id) - segs;
109399+ return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs;
109400 }
109401 EXPORT_SYMBOL(ip_idents_reserve);
109402
109403@@ -2625,34 +2625,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
109404 .maxlen = sizeof(int),
109405 .mode = 0200,
109406 .proc_handler = ipv4_sysctl_rtcache_flush,
109407+ .extra1 = &init_net,
109408 },
109409 { },
109410 };
109411
109412 static __net_init int sysctl_route_net_init(struct net *net)
109413 {
109414- struct ctl_table *tbl;
109415+ ctl_table_no_const *tbl = NULL;
109416
109417- tbl = ipv4_route_flush_table;
109418 if (!net_eq(net, &init_net)) {
109419- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
109420+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
109421 if (tbl == NULL)
109422 goto err_dup;
109423
109424 /* Don't export sysctls to unprivileged users */
109425 if (net->user_ns != &init_user_ns)
109426 tbl[0].procname = NULL;
109427- }
109428- tbl[0].extra1 = net;
109429+ tbl[0].extra1 = net;
109430+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
109431+ } else
109432+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
109433
109434- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
109435 if (net->ipv4.route_hdr == NULL)
109436 goto err_reg;
109437 return 0;
109438
109439 err_reg:
109440- if (tbl != ipv4_route_flush_table)
109441- kfree(tbl);
109442+ kfree(tbl);
109443 err_dup:
109444 return -ENOMEM;
109445 }
109446@@ -2675,8 +2675,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
109447
109448 static __net_init int rt_genid_init(struct net *net)
109449 {
109450- atomic_set(&net->ipv4.rt_genid, 0);
109451- atomic_set(&net->fnhe_genid, 0);
109452+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
109453+ atomic_set_unchecked(&net->fnhe_genid, 0);
109454 get_random_bytes(&net->ipv4.dev_addr_genid,
109455 sizeof(net->ipv4.dev_addr_genid));
109456 return 0;
109457@@ -2719,11 +2719,7 @@ int __init ip_rt_init(void)
109458 {
109459 int rc = 0;
109460
109461- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
109462- if (!ip_idents)
109463- panic("IP: failed to allocate ip_idents\n");
109464-
109465- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
109466+ prandom_bytes(ip_idents, sizeof(ip_idents));
109467
109468 #ifdef CONFIG_IP_ROUTE_CLASSID
109469 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
109470diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
109471index 79a007c..5023029 100644
109472--- a/net/ipv4/sysctl_net_ipv4.c
109473+++ b/net/ipv4/sysctl_net_ipv4.c
109474@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
109475 container_of(table->data, struct net, ipv4.ip_local_ports.range);
109476 int ret;
109477 int range[2];
109478- struct ctl_table tmp = {
109479+ ctl_table_no_const tmp = {
109480 .data = &range,
109481 .maxlen = sizeof(range),
109482 .mode = table->mode,
109483@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
109484 int ret;
109485 gid_t urange[2];
109486 kgid_t low, high;
109487- struct ctl_table tmp = {
109488+ ctl_table_no_const tmp = {
109489 .data = &urange,
109490 .maxlen = sizeof(urange),
109491 .mode = table->mode,
109492@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
109493 void __user *buffer, size_t *lenp, loff_t *ppos)
109494 {
109495 char val[TCP_CA_NAME_MAX];
109496- struct ctl_table tbl = {
109497+ ctl_table_no_const tbl = {
109498 .data = val,
109499 .maxlen = TCP_CA_NAME_MAX,
109500 };
109501@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
109502 void __user *buffer, size_t *lenp,
109503 loff_t *ppos)
109504 {
109505- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
109506+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
109507 int ret;
109508
109509 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
109510@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
109511 void __user *buffer, size_t *lenp,
109512 loff_t *ppos)
109513 {
109514- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
109515+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
109516 int ret;
109517
109518 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
109519@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
109520 void __user *buffer, size_t *lenp,
109521 loff_t *ppos)
109522 {
109523- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
109524+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
109525 struct tcp_fastopen_context *ctxt;
109526 int ret;
109527 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
109528@@ -857,13 +857,12 @@ static struct ctl_table ipv4_net_table[] = {
109529
109530 static __net_init int ipv4_sysctl_init_net(struct net *net)
109531 {
109532- struct ctl_table *table;
109533+ ctl_table_no_const *table = NULL;
109534
109535- table = ipv4_net_table;
109536 if (!net_eq(net, &init_net)) {
109537 int i;
109538
109539- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
109540+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
109541 if (table == NULL)
109542 goto err_alloc;
109543
109544@@ -872,7 +871,10 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
109545 table[i].data += (void *)net - (void *)&init_net;
109546 }
109547
109548- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
109549+ if (!net_eq(net, &init_net))
109550+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
109551+ else
109552+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
109553 if (net->ipv4.ipv4_hdr == NULL)
109554 goto err_reg;
109555
109556diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
109557index 40639c2..dfc86b2 100644
109558--- a/net/ipv4/tcp_input.c
109559+++ b/net/ipv4/tcp_input.c
109560@@ -754,7 +754,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
109561 * without any lock. We want to make sure compiler wont store
109562 * intermediate values in this location.
109563 */
109564- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
109565+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
109566 sk->sk_max_pacing_rate);
109567 }
109568
109569@@ -4478,7 +4478,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
109570 * simplifies code)
109571 */
109572 static void
109573-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
109574+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
109575 struct sk_buff *head, struct sk_buff *tail,
109576 u32 start, u32 end)
109577 {
109578@@ -5536,6 +5536,7 @@ discard:
109579 tcp_paws_reject(&tp->rx_opt, 0))
109580 goto discard_and_undo;
109581
109582+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
109583 if (th->syn) {
109584 /* We see SYN without ACK. It is attempt of
109585 * simultaneous connect with crossed SYNs.
109586@@ -5586,6 +5587,7 @@ discard:
109587 goto discard;
109588 #endif
109589 }
109590+#endif
109591 /* "fifth, if neither of the SYN or RST bits is set then
109592 * drop the segment and return."
109593 */
109594@@ -5632,7 +5634,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
109595 goto discard;
109596
109597 if (th->syn) {
109598- if (th->fin)
109599+ if (th->fin || th->urg || th->psh)
109600 goto discard;
109601 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
109602 return 1;
109603diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
109604index 77cccda..10122c4 100644
109605--- a/net/ipv4/tcp_ipv4.c
109606+++ b/net/ipv4/tcp_ipv4.c
109607@@ -91,6 +91,10 @@ int sysctl_tcp_low_latency __read_mostly;
109608 EXPORT_SYMBOL(sysctl_tcp_low_latency);
109609
109610
109611+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109612+extern int grsec_enable_blackhole;
109613+#endif
109614+
109615 #ifdef CONFIG_TCP_MD5SIG
109616 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
109617 __be32 daddr, __be32 saddr, const struct tcphdr *th);
109618@@ -1591,6 +1595,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
109619 return 0;
109620
109621 reset:
109622+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109623+ if (!grsec_enable_blackhole)
109624+#endif
109625 tcp_v4_send_reset(rsk, skb);
109626 discard:
109627 kfree_skb(skb);
109628@@ -1737,12 +1744,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
109629 TCP_SKB_CB(skb)->sacked = 0;
109630
109631 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
109632- if (!sk)
109633+ if (!sk) {
109634+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109635+ ret = 1;
109636+#endif
109637 goto no_tcp_socket;
109638-
109639+ }
109640 process:
109641- if (sk->sk_state == TCP_TIME_WAIT)
109642+ if (sk->sk_state == TCP_TIME_WAIT) {
109643+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109644+ ret = 2;
109645+#endif
109646 goto do_time_wait;
109647+ }
109648
109649 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
109650 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
109651@@ -1796,6 +1810,10 @@ csum_error:
109652 bad_packet:
109653 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
109654 } else {
109655+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109656+ if (!grsec_enable_blackhole || (ret == 1 &&
109657+ (skb->dev->flags & IFF_LOOPBACK)))
109658+#endif
109659 tcp_v4_send_reset(NULL, skb);
109660 }
109661
109662diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
109663index e68e0d4..0334263 100644
109664--- a/net/ipv4/tcp_minisocks.c
109665+++ b/net/ipv4/tcp_minisocks.c
109666@@ -27,6 +27,10 @@
109667 #include <net/inet_common.h>
109668 #include <net/xfrm.h>
109669
109670+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109671+extern int grsec_enable_blackhole;
109672+#endif
109673+
109674 int sysctl_tcp_syncookies __read_mostly = 1;
109675 EXPORT_SYMBOL(sysctl_tcp_syncookies);
109676
109677@@ -740,7 +744,10 @@ embryonic_reset:
109678 * avoid becoming vulnerable to outside attack aiming at
109679 * resetting legit local connections.
109680 */
109681- req->rsk_ops->send_reset(sk, skb);
109682+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109683+ if (!grsec_enable_blackhole)
109684+#endif
109685+ req->rsk_ops->send_reset(sk, skb);
109686 } else if (fastopen) { /* received a valid RST pkt */
109687 reqsk_fastopen_remove(sk, req, true);
109688 tcp_reset(sk);
109689diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
109690index 3b66610..bfbe23a 100644
109691--- a/net/ipv4/tcp_probe.c
109692+++ b/net/ipv4/tcp_probe.c
109693@@ -238,7 +238,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
109694 if (cnt + width >= len)
109695 break;
109696
109697- if (copy_to_user(buf + cnt, tbuf, width))
109698+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
109699 return -EFAULT;
109700 cnt += width;
109701 }
109702diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
109703index 286227a..c495a76 100644
109704--- a/net/ipv4/tcp_timer.c
109705+++ b/net/ipv4/tcp_timer.c
109706@@ -22,6 +22,10 @@
109707 #include <linux/gfp.h>
109708 #include <net/tcp.h>
109709
109710+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109711+extern int grsec_lastack_retries;
109712+#endif
109713+
109714 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
109715 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
109716 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
109717@@ -192,6 +196,13 @@ static int tcp_write_timeout(struct sock *sk)
109718 }
109719 }
109720
109721+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109722+ if ((sk->sk_state == TCP_LAST_ACK) &&
109723+ (grsec_lastack_retries > 0) &&
109724+ (grsec_lastack_retries < retry_until))
109725+ retry_until = grsec_lastack_retries;
109726+#endif
109727+
109728 if (retransmits_timed_out(sk, retry_until,
109729 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
109730 /* Has it gone just too far? */
109731diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
109732index 7d5a866..4874211 100644
109733--- a/net/ipv4/udp.c
109734+++ b/net/ipv4/udp.c
109735@@ -87,6 +87,7 @@
109736 #include <linux/types.h>
109737 #include <linux/fcntl.h>
109738 #include <linux/module.h>
109739+#include <linux/security.h>
109740 #include <linux/socket.h>
109741 #include <linux/sockios.h>
109742 #include <linux/igmp.h>
109743@@ -113,6 +114,10 @@
109744 #include <net/busy_poll.h>
109745 #include "udp_impl.h"
109746
109747+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109748+extern int grsec_enable_blackhole;
109749+#endif
109750+
109751 struct udp_table udp_table __read_mostly;
109752 EXPORT_SYMBOL(udp_table);
109753
109754@@ -615,6 +620,9 @@ found:
109755 return s;
109756 }
109757
109758+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
109759+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
109760+
109761 /*
109762 * This routine is called by the ICMP module when it gets some
109763 * sort of error condition. If err < 0 then the socket should
109764@@ -952,9 +960,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
109765 dport = usin->sin_port;
109766 if (dport == 0)
109767 return -EINVAL;
109768+
109769+ err = gr_search_udp_sendmsg(sk, usin);
109770+ if (err)
109771+ return err;
109772 } else {
109773 if (sk->sk_state != TCP_ESTABLISHED)
109774 return -EDESTADDRREQ;
109775+
109776+ err = gr_search_udp_sendmsg(sk, NULL);
109777+ if (err)
109778+ return err;
109779+
109780 daddr = inet->inet_daddr;
109781 dport = inet->inet_dport;
109782 /* Open fast path for connected socket.
109783@@ -1202,7 +1219,7 @@ static unsigned int first_packet_length(struct sock *sk)
109784 IS_UDPLITE(sk));
109785 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
109786 IS_UDPLITE(sk));
109787- atomic_inc(&sk->sk_drops);
109788+ atomic_inc_unchecked(&sk->sk_drops);
109789 __skb_unlink(skb, rcvq);
109790 __skb_queue_tail(&list_kill, skb);
109791 }
109792@@ -1282,6 +1299,10 @@ try_again:
109793 if (!skb)
109794 goto out;
109795
109796+ err = gr_search_udp_recvmsg(sk, skb);
109797+ if (err)
109798+ goto out_free;
109799+
109800 ulen = skb->len - sizeof(struct udphdr);
109801 copied = len;
109802 if (copied > ulen)
109803@@ -1315,7 +1336,7 @@ try_again:
109804 if (unlikely(err)) {
109805 trace_kfree_skb(skb, udp_recvmsg);
109806 if (!peeked) {
109807- atomic_inc(&sk->sk_drops);
109808+ atomic_inc_unchecked(&sk->sk_drops);
109809 UDP_INC_STATS_USER(sock_net(sk),
109810 UDP_MIB_INERRORS, is_udplite);
109811 }
109812@@ -1612,7 +1633,7 @@ csum_error:
109813 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
109814 drop:
109815 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
109816- atomic_inc(&sk->sk_drops);
109817+ atomic_inc_unchecked(&sk->sk_drops);
109818 kfree_skb(skb);
109819 return -1;
109820 }
109821@@ -1631,7 +1652,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
109822 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
109823
109824 if (!skb1) {
109825- atomic_inc(&sk->sk_drops);
109826+ atomic_inc_unchecked(&sk->sk_drops);
109827 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
109828 IS_UDPLITE(sk));
109829 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
109830@@ -1817,6 +1838,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
109831 goto csum_error;
109832
109833 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
109834+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
109835+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
109836+#endif
109837 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
109838
109839 /*
109840@@ -2403,7 +2427,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
109841 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
109842 0, sock_i_ino(sp),
109843 atomic_read(&sp->sk_refcnt), sp,
109844- atomic_read(&sp->sk_drops));
109845+ atomic_read_unchecked(&sp->sk_drops));
109846 }
109847
109848 int udp4_seq_show(struct seq_file *seq, void *v)
109849diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
109850index 6156f68..d6ab46d 100644
109851--- a/net/ipv4/xfrm4_policy.c
109852+++ b/net/ipv4/xfrm4_policy.c
109853@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
109854 fl4->flowi4_tos = iph->tos;
109855 }
109856
109857-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
109858+static int xfrm4_garbage_collect(struct dst_ops *ops)
109859 {
109860 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
109861
109862- xfrm4_policy_afinfo.garbage_collect(net);
109863+ xfrm_garbage_collect_deferred(net);
109864 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
109865 }
109866
109867@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
109868
109869 static int __net_init xfrm4_net_init(struct net *net)
109870 {
109871- struct ctl_table *table;
109872+ ctl_table_no_const *table = NULL;
109873 struct ctl_table_header *hdr;
109874
109875- table = xfrm4_policy_table;
109876 if (!net_eq(net, &init_net)) {
109877- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
109878+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
109879 if (!table)
109880 goto err_alloc;
109881
109882 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
109883- }
109884-
109885- hdr = register_net_sysctl(net, "net/ipv4", table);
109886+ hdr = register_net_sysctl(net, "net/ipv4", table);
109887+ } else
109888+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
109889 if (!hdr)
109890 goto err_reg;
109891
109892@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
109893 return 0;
109894
109895 err_reg:
109896- if (!net_eq(net, &init_net))
109897- kfree(table);
109898+ kfree(table);
109899 err_alloc:
109900 return -ENOMEM;
109901 }
109902diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
109903index 5667b30..2044f61 100644
109904--- a/net/ipv6/addrconf.c
109905+++ b/net/ipv6/addrconf.c
109906@@ -593,7 +593,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
109907 idx = 0;
109908 head = &net->dev_index_head[h];
109909 rcu_read_lock();
109910- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
109911+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
109912 net->dev_base_seq;
109913 hlist_for_each_entry_rcu(dev, head, index_hlist) {
109914 if (idx < s_idx)
109915@@ -2390,7 +2390,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
109916 p.iph.ihl = 5;
109917 p.iph.protocol = IPPROTO_IPV6;
109918 p.iph.ttl = 64;
109919- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
109920+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
109921
109922 if (ops->ndo_do_ioctl) {
109923 mm_segment_t oldfs = get_fs();
109924@@ -3516,16 +3516,23 @@ static const struct file_operations if6_fops = {
109925 .release = seq_release_net,
109926 };
109927
109928+extern void register_ipv6_seq_ops_addr(struct seq_operations *addr);
109929+extern void unregister_ipv6_seq_ops_addr(void);
109930+
109931 static int __net_init if6_proc_net_init(struct net *net)
109932 {
109933- if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
109934+ register_ipv6_seq_ops_addr(&if6_seq_ops);
109935+ if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) {
109936+ unregister_ipv6_seq_ops_addr();
109937 return -ENOMEM;
109938+ }
109939 return 0;
109940 }
109941
109942 static void __net_exit if6_proc_net_exit(struct net *net)
109943 {
109944 remove_proc_entry("if_inet6", net->proc_net);
109945+ unregister_ipv6_seq_ops_addr();
109946 }
109947
109948 static struct pernet_operations if6_proc_net_ops = {
109949@@ -4141,7 +4148,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
109950 s_ip_idx = ip_idx = cb->args[2];
109951
109952 rcu_read_lock();
109953- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
109954+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
109955 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
109956 idx = 0;
109957 head = &net->dev_index_head[h];
109958@@ -4741,11 +4748,8 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
109959
109960 rt = rt6_lookup(dev_net(dev), &ifp->peer_addr, NULL,
109961 dev->ifindex, 1);
109962- if (rt) {
109963- dst_hold(&rt->dst);
109964- if (ip6_del_rt(rt))
109965- dst_free(&rt->dst);
109966- }
109967+ if (rt && ip6_del_rt(rt))
109968+ dst_free(&rt->dst);
109969 }
109970 dst_hold(&ifp->rt->dst);
109971
109972@@ -4753,7 +4757,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
109973 dst_free(&ifp->rt->dst);
109974 break;
109975 }
109976- atomic_inc(&net->ipv6.dev_addr_genid);
109977+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
109978 rt_genid_bump_ipv6(net);
109979 }
109980
109981@@ -4774,7 +4778,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
109982 int *valp = ctl->data;
109983 int val = *valp;
109984 loff_t pos = *ppos;
109985- struct ctl_table lctl;
109986+ ctl_table_no_const lctl;
109987 int ret;
109988
109989 /*
109990@@ -4859,7 +4863,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
109991 int *valp = ctl->data;
109992 int val = *valp;
109993 loff_t pos = *ppos;
109994- struct ctl_table lctl;
109995+ ctl_table_no_const lctl;
109996 int ret;
109997
109998 /*
109999diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
110000index 7cb4392..dc96d28 100644
110001--- a/net/ipv6/af_inet6.c
110002+++ b/net/ipv6/af_inet6.c
110003@@ -765,7 +765,7 @@ static int __net_init inet6_net_init(struct net *net)
110004 net->ipv6.sysctl.bindv6only = 0;
110005 net->ipv6.sysctl.icmpv6_time = 1*HZ;
110006 net->ipv6.sysctl.flowlabel_consistency = 1;
110007- atomic_set(&net->ipv6.rt_genid, 0);
110008+ atomic_set_unchecked(&net->ipv6.rt_genid, 0);
110009
110010 err = ipv6_init_mibs(net);
110011 if (err)
110012diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
110013index c3bf2d2..1f00573 100644
110014--- a/net/ipv6/datagram.c
110015+++ b/net/ipv6/datagram.c
110016@@ -938,5 +938,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
110017 0,
110018 sock_i_ino(sp),
110019 atomic_read(&sp->sk_refcnt), sp,
110020- atomic_read(&sp->sk_drops));
110021+ atomic_read_unchecked(&sp->sk_drops));
110022 }
110023diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
110024index f6c84a6..9f2084e 100644
110025--- a/net/ipv6/icmp.c
110026+++ b/net/ipv6/icmp.c
110027@@ -990,7 +990,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
110028
110029 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
110030 {
110031- struct ctl_table *table;
110032+ ctl_table_no_const *table;
110033
110034 table = kmemdup(ipv6_icmp_table_template,
110035 sizeof(ipv6_icmp_table_template),
110036diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
110037index 3873181..220ad3f 100644
110038--- a/net/ipv6/ip6_gre.c
110039+++ b/net/ipv6/ip6_gre.c
110040@@ -71,8 +71,8 @@ struct ip6gre_net {
110041 struct net_device *fb_tunnel_dev;
110042 };
110043
110044-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
110045-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
110046+static struct rtnl_link_ops ip6gre_link_ops;
110047+static struct rtnl_link_ops ip6gre_tap_ops;
110048 static int ip6gre_tunnel_init(struct net_device *dev);
110049 static void ip6gre_tunnel_setup(struct net_device *dev);
110050 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
110051@@ -1280,7 +1280,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
110052 }
110053
110054
110055-static struct inet6_protocol ip6gre_protocol __read_mostly = {
110056+static struct inet6_protocol ip6gre_protocol = {
110057 .handler = ip6gre_rcv,
110058 .err_handler = ip6gre_err,
110059 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
110060@@ -1638,7 +1638,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
110061 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
110062 };
110063
110064-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
110065+static struct rtnl_link_ops ip6gre_link_ops = {
110066 .kind = "ip6gre",
110067 .maxtype = IFLA_GRE_MAX,
110068 .policy = ip6gre_policy,
110069@@ -1652,7 +1652,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
110070 .fill_info = ip6gre_fill_info,
110071 };
110072
110073-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
110074+static struct rtnl_link_ops ip6gre_tap_ops = {
110075 .kind = "ip6gretap",
110076 .maxtype = IFLA_GRE_MAX,
110077 .policy = ip6gre_policy,
110078diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
110079index afa0824..04ba530 100644
110080--- a/net/ipv6/ip6_tunnel.c
110081+++ b/net/ipv6/ip6_tunnel.c
110082@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
110083
110084 static int ip6_tnl_dev_init(struct net_device *dev);
110085 static void ip6_tnl_dev_setup(struct net_device *dev);
110086-static struct rtnl_link_ops ip6_link_ops __read_mostly;
110087+static struct rtnl_link_ops ip6_link_ops;
110088
110089 static int ip6_tnl_net_id __read_mostly;
110090 struct ip6_tnl_net {
110091@@ -1708,7 +1708,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
110092 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
110093 };
110094
110095-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
110096+static struct rtnl_link_ops ip6_link_ops = {
110097 .kind = "ip6tnl",
110098 .maxtype = IFLA_IPTUN_MAX,
110099 .policy = ip6_tnl_policy,
110100diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
110101index 9aaa6bb..5c13e57 100644
110102--- a/net/ipv6/ip6_vti.c
110103+++ b/net/ipv6/ip6_vti.c
110104@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
110105
110106 static int vti6_dev_init(struct net_device *dev);
110107 static void vti6_dev_setup(struct net_device *dev);
110108-static struct rtnl_link_ops vti6_link_ops __read_mostly;
110109+static struct rtnl_link_ops vti6_link_ops;
110110
110111 static int vti6_net_id __read_mostly;
110112 struct vti6_net {
110113@@ -977,7 +977,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
110114 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
110115 };
110116
110117-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
110118+static struct rtnl_link_ops vti6_link_ops = {
110119 .kind = "vti6",
110120 .maxtype = IFLA_VTI_MAX,
110121 .policy = vti6_policy,
110122diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
110123index edb58af..78de133 100644
110124--- a/net/ipv6/ipv6_sockglue.c
110125+++ b/net/ipv6/ipv6_sockglue.c
110126@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
110127 if (sk->sk_type != SOCK_STREAM)
110128 return -ENOPROTOOPT;
110129
110130- msg.msg_control = optval;
110131+ msg.msg_control = (void __force_kernel *)optval;
110132 msg.msg_controllen = len;
110133 msg.msg_flags = flags;
110134
110135diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
110136index e080fbb..412b3cf 100644
110137--- a/net/ipv6/netfilter/ip6_tables.c
110138+++ b/net/ipv6/netfilter/ip6_tables.c
110139@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
110140 #endif
110141
110142 static int get_info(struct net *net, void __user *user,
110143- const int *len, int compat)
110144+ int len, int compat)
110145 {
110146 char name[XT_TABLE_MAXNAMELEN];
110147 struct xt_table *t;
110148 int ret;
110149
110150- if (*len != sizeof(struct ip6t_getinfo)) {
110151- duprintf("length %u != %zu\n", *len,
110152+ if (len != sizeof(struct ip6t_getinfo)) {
110153+ duprintf("length %u != %zu\n", len,
110154 sizeof(struct ip6t_getinfo));
110155 return -EINVAL;
110156 }
110157@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
110158 info.size = private->size;
110159 strcpy(info.name, name);
110160
110161- if (copy_to_user(user, &info, *len) != 0)
110162+ if (copy_to_user(user, &info, len) != 0)
110163 ret = -EFAULT;
110164 else
110165 ret = 0;
110166@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
110167
110168 switch (cmd) {
110169 case IP6T_SO_GET_INFO:
110170- ret = get_info(sock_net(sk), user, len, 1);
110171+ ret = get_info(sock_net(sk), user, *len, 1);
110172 break;
110173 case IP6T_SO_GET_ENTRIES:
110174 ret = compat_get_entries(sock_net(sk), user, len);
110175@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
110176
110177 switch (cmd) {
110178 case IP6T_SO_GET_INFO:
110179- ret = get_info(sock_net(sk), user, len, 0);
110180+ ret = get_info(sock_net(sk), user, *len, 0);
110181 break;
110182
110183 case IP6T_SO_GET_ENTRIES:
110184diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
110185index 0d5279f..89d9f6f 100644
110186--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
110187+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
110188@@ -90,12 +90,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
110189
110190 static int nf_ct_frag6_sysctl_register(struct net *net)
110191 {
110192- struct ctl_table *table;
110193+ ctl_table_no_const *table = NULL;
110194 struct ctl_table_header *hdr;
110195
110196- table = nf_ct_frag6_sysctl_table;
110197 if (!net_eq(net, &init_net)) {
110198- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
110199+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
110200 GFP_KERNEL);
110201 if (table == NULL)
110202 goto err_alloc;
110203@@ -103,9 +102,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
110204 table[0].data = &net->nf_frag.frags.timeout;
110205 table[1].data = &net->nf_frag.frags.low_thresh;
110206 table[2].data = &net->nf_frag.frags.high_thresh;
110207- }
110208-
110209- hdr = register_net_sysctl(net, "net/netfilter", table);
110210+ hdr = register_net_sysctl(net, "net/netfilter", table);
110211+ } else
110212+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
110213 if (hdr == NULL)
110214 goto err_reg;
110215
110216@@ -113,8 +112,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
110217 return 0;
110218
110219 err_reg:
110220- if (!net_eq(net, &init_net))
110221- kfree(table);
110222+ kfree(table);
110223 err_alloc:
110224 return -ENOMEM;
110225 }
110226diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
110227index 5b7a1ed..d9da205 100644
110228--- a/net/ipv6/ping.c
110229+++ b/net/ipv6/ping.c
110230@@ -240,6 +240,24 @@ static struct pernet_operations ping_v6_net_ops = {
110231 };
110232 #endif
110233
110234+static struct pingv6_ops real_pingv6_ops = {
110235+ .ipv6_recv_error = ipv6_recv_error,
110236+ .ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl,
110237+ .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl,
110238+ .icmpv6_err_convert = icmpv6_err_convert,
110239+ .ipv6_icmp_error = ipv6_icmp_error,
110240+ .ipv6_chk_addr = ipv6_chk_addr,
110241+};
110242+
110243+static struct pingv6_ops dummy_pingv6_ops = {
110244+ .ipv6_recv_error = dummy_ipv6_recv_error,
110245+ .ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl,
110246+ .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl,
110247+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
110248+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
110249+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
110250+};
110251+
110252 int __init pingv6_init(void)
110253 {
110254 #ifdef CONFIG_PROC_FS
110255@@ -247,13 +265,7 @@ int __init pingv6_init(void)
110256 if (ret)
110257 return ret;
110258 #endif
110259- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
110260- pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl;
110261- pingv6_ops.ip6_datagram_recv_specific_ctl =
110262- ip6_datagram_recv_specific_ctl;
110263- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
110264- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
110265- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
110266+ pingv6_ops = &real_pingv6_ops;
110267 return inet6_register_protosw(&pingv6_protosw);
110268 }
110269
110270@@ -262,14 +274,9 @@ int __init pingv6_init(void)
110271 */
110272 void pingv6_exit(void)
110273 {
110274- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
110275- pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl;
110276- pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl;
110277- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
110278- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
110279- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
110280 #ifdef CONFIG_PROC_FS
110281 unregister_pernet_subsys(&ping_v6_net_ops);
110282 #endif
110283+ pingv6_ops = &dummy_pingv6_ops;
110284 inet6_unregister_protosw(&pingv6_protosw);
110285 }
110286diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
110287index 3317440..201764e 100644
110288--- a/net/ipv6/proc.c
110289+++ b/net/ipv6/proc.c
110290@@ -309,7 +309,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
110291 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
110292 goto proc_snmp6_fail;
110293
110294- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
110295+ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
110296 if (!net->mib.proc_net_devsnmp6)
110297 goto proc_dev_snmp6_fail;
110298 return 0;
110299diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
110300index b2dc60b..a6b6c10 100644
110301--- a/net/ipv6/raw.c
110302+++ b/net/ipv6/raw.c
110303@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
110304 {
110305 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
110306 skb_checksum_complete(skb)) {
110307- atomic_inc(&sk->sk_drops);
110308+ atomic_inc_unchecked(&sk->sk_drops);
110309 kfree_skb(skb);
110310 return NET_RX_DROP;
110311 }
110312@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
110313 struct raw6_sock *rp = raw6_sk(sk);
110314
110315 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
110316- atomic_inc(&sk->sk_drops);
110317+ atomic_inc_unchecked(&sk->sk_drops);
110318 kfree_skb(skb);
110319 return NET_RX_DROP;
110320 }
110321@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
110322
110323 if (inet->hdrincl) {
110324 if (skb_checksum_complete(skb)) {
110325- atomic_inc(&sk->sk_drops);
110326+ atomic_inc_unchecked(&sk->sk_drops);
110327 kfree_skb(skb);
110328 return NET_RX_DROP;
110329 }
110330@@ -610,7 +610,7 @@ out:
110331 return err;
110332 }
110333
110334-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
110335+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
110336 struct flowi6 *fl6, struct dst_entry **dstp,
110337 unsigned int flags)
110338 {
110339@@ -916,12 +916,15 @@ do_confirm:
110340 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
110341 char __user *optval, int optlen)
110342 {
110343+ struct icmp6_filter filter;
110344+
110345 switch (optname) {
110346 case ICMPV6_FILTER:
110347 if (optlen > sizeof(struct icmp6_filter))
110348 optlen = sizeof(struct icmp6_filter);
110349- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
110350+ if (copy_from_user(&filter, optval, optlen))
110351 return -EFAULT;
110352+ raw6_sk(sk)->filter = filter;
110353 return 0;
110354 default:
110355 return -ENOPROTOOPT;
110356@@ -934,6 +937,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
110357 char __user *optval, int __user *optlen)
110358 {
110359 int len;
110360+ struct icmp6_filter filter;
110361
110362 switch (optname) {
110363 case ICMPV6_FILTER:
110364@@ -945,7 +949,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
110365 len = sizeof(struct icmp6_filter);
110366 if (put_user(len, optlen))
110367 return -EFAULT;
110368- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
110369+ filter = raw6_sk(sk)->filter;
110370+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
110371 return -EFAULT;
110372 return 0;
110373 default:
110374diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
110375index cc85a9b..526a133 100644
110376--- a/net/ipv6/reassembly.c
110377+++ b/net/ipv6/reassembly.c
110378@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
110379
110380 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
110381 {
110382- struct ctl_table *table;
110383+ ctl_table_no_const *table = NULL;
110384 struct ctl_table_header *hdr;
110385
110386- table = ip6_frags_ns_ctl_table;
110387 if (!net_eq(net, &init_net)) {
110388- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
110389+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
110390 if (table == NULL)
110391 goto err_alloc;
110392
110393@@ -642,9 +641,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
110394 /* Don't export sysctls to unprivileged users */
110395 if (net->user_ns != &init_user_ns)
110396 table[0].procname = NULL;
110397- }
110398+ hdr = register_net_sysctl(net, "net/ipv6", table);
110399+ } else
110400+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
110401
110402- hdr = register_net_sysctl(net, "net/ipv6", table);
110403 if (hdr == NULL)
110404 goto err_reg;
110405
110406@@ -652,8 +652,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
110407 return 0;
110408
110409 err_reg:
110410- if (!net_eq(net, &init_net))
110411- kfree(table);
110412+ kfree(table);
110413 err_alloc:
110414 return -ENOMEM;
110415 }
110416diff --git a/net/ipv6/route.c b/net/ipv6/route.c
110417index f23fbd2..7868241 100644
110418--- a/net/ipv6/route.c
110419+++ b/net/ipv6/route.c
110420@@ -2971,7 +2971,7 @@ struct ctl_table ipv6_route_table_template[] = {
110421
110422 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
110423 {
110424- struct ctl_table *table;
110425+ ctl_table_no_const *table;
110426
110427 table = kmemdup(ipv6_route_table_template,
110428 sizeof(ipv6_route_table_template),
110429diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
110430index 4f40817..54dcbef 100644
110431--- a/net/ipv6/sit.c
110432+++ b/net/ipv6/sit.c
110433@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
110434 static void ipip6_dev_free(struct net_device *dev);
110435 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
110436 __be32 *v4dst);
110437-static struct rtnl_link_ops sit_link_ops __read_mostly;
110438+static struct rtnl_link_ops sit_link_ops;
110439
110440 static int sit_net_id __read_mostly;
110441 struct sit_net {
110442@@ -1661,7 +1661,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
110443 unregister_netdevice_queue(dev, head);
110444 }
110445
110446-static struct rtnl_link_ops sit_link_ops __read_mostly = {
110447+static struct rtnl_link_ops sit_link_ops = {
110448 .kind = "sit",
110449 .maxtype = IFLA_IPTUN_MAX,
110450 .policy = ipip6_policy,
110451diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
110452index 058f3ec..dec973d 100644
110453--- a/net/ipv6/sysctl_net_ipv6.c
110454+++ b/net/ipv6/sysctl_net_ipv6.c
110455@@ -61,7 +61,7 @@ static struct ctl_table ipv6_rotable[] = {
110456
110457 static int __net_init ipv6_sysctl_net_init(struct net *net)
110458 {
110459- struct ctl_table *ipv6_table;
110460+ ctl_table_no_const *ipv6_table;
110461 struct ctl_table *ipv6_route_table;
110462 struct ctl_table *ipv6_icmp_table;
110463 int err;
110464diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
110465index 229239ad..ee2802f 100644
110466--- a/net/ipv6/tcp_ipv6.c
110467+++ b/net/ipv6/tcp_ipv6.c
110468@@ -102,6 +102,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
110469 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
110470 }
110471
110472+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
110473+extern int grsec_enable_blackhole;
110474+#endif
110475+
110476 static void tcp_v6_hash(struct sock *sk)
110477 {
110478 if (sk->sk_state != TCP_CLOSE) {
110479@@ -1424,6 +1428,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
110480 return 0;
110481
110482 reset:
110483+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
110484+ if (!grsec_enable_blackhole)
110485+#endif
110486 tcp_v6_send_reset(sk, skb);
110487 discard:
110488 if (opt_skb)
110489@@ -1508,12 +1515,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
110490 TCP_SKB_CB(skb)->sacked = 0;
110491
110492 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
110493- if (!sk)
110494+ if (!sk) {
110495+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
110496+ ret = 1;
110497+#endif
110498 goto no_tcp_socket;
110499+ }
110500
110501 process:
110502- if (sk->sk_state == TCP_TIME_WAIT)
110503+ if (sk->sk_state == TCP_TIME_WAIT) {
110504+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
110505+ ret = 2;
110506+#endif
110507 goto do_time_wait;
110508+ }
110509
110510 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
110511 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
110512@@ -1565,6 +1580,10 @@ csum_error:
110513 bad_packet:
110514 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
110515 } else {
110516+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
110517+ if (!grsec_enable_blackhole || (ret == 1 &&
110518+ (skb->dev->flags & IFF_LOOPBACK)))
110519+#endif
110520 tcp_v6_send_reset(NULL, skb);
110521 }
110522
110523diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
110524index 7092ff7..3fd0eb4 100644
110525--- a/net/ipv6/udp.c
110526+++ b/net/ipv6/udp.c
110527@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
110528 udp_ipv6_hash_secret + net_hash_mix(net));
110529 }
110530
110531+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
110532+extern int grsec_enable_blackhole;
110533+#endif
110534+
110535 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
110536 {
110537 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
110538@@ -435,7 +439,7 @@ try_again:
110539 if (unlikely(err)) {
110540 trace_kfree_skb(skb, udpv6_recvmsg);
110541 if (!peeked) {
110542- atomic_inc(&sk->sk_drops);
110543+ atomic_inc_unchecked(&sk->sk_drops);
110544 if (is_udp4)
110545 UDP_INC_STATS_USER(sock_net(sk),
110546 UDP_MIB_INERRORS,
110547@@ -698,7 +702,7 @@ csum_error:
110548 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
110549 drop:
110550 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
110551- atomic_inc(&sk->sk_drops);
110552+ atomic_inc_unchecked(&sk->sk_drops);
110553 kfree_skb(skb);
110554 return -1;
110555 }
110556@@ -754,7 +758,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
110557 if (likely(skb1 == NULL))
110558 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
110559 if (!skb1) {
110560- atomic_inc(&sk->sk_drops);
110561+ atomic_inc_unchecked(&sk->sk_drops);
110562 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
110563 IS_UDPLITE(sk));
110564 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
110565@@ -920,6 +924,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
110566 goto csum_error;
110567
110568 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
110569+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
110570+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
110571+#endif
110572 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
110573
110574 kfree_skb(skb);
110575diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
110576index 2a0bbda..d75ca57 100644
110577--- a/net/ipv6/xfrm6_policy.c
110578+++ b/net/ipv6/xfrm6_policy.c
110579@@ -212,11 +212,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
110580 }
110581 }
110582
110583-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
110584+static int xfrm6_garbage_collect(struct dst_ops *ops)
110585 {
110586 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
110587
110588- xfrm6_policy_afinfo.garbage_collect(net);
110589+ xfrm_garbage_collect_deferred(net);
110590 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
110591 }
110592
110593@@ -329,19 +329,19 @@ static struct ctl_table xfrm6_policy_table[] = {
110594
110595 static int __net_init xfrm6_net_init(struct net *net)
110596 {
110597- struct ctl_table *table;
110598+ ctl_table_no_const *table = NULL;
110599 struct ctl_table_header *hdr;
110600
110601- table = xfrm6_policy_table;
110602 if (!net_eq(net, &init_net)) {
110603- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
110604+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
110605 if (!table)
110606 goto err_alloc;
110607
110608 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
110609- }
110610+ hdr = register_net_sysctl(net, "net/ipv6", table);
110611+ } else
110612+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
110613
110614- hdr = register_net_sysctl(net, "net/ipv6", table);
110615 if (!hdr)
110616 goto err_reg;
110617
110618@@ -349,8 +349,7 @@ static int __net_init xfrm6_net_init(struct net *net)
110619 return 0;
110620
110621 err_reg:
110622- if (!net_eq(net, &init_net))
110623- kfree(table);
110624+ kfree(table);
110625 err_alloc:
110626 return -ENOMEM;
110627 }
110628diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
110629index e15c16a..7cf07aa 100644
110630--- a/net/ipx/ipx_proc.c
110631+++ b/net/ipx/ipx_proc.c
110632@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
110633 struct proc_dir_entry *p;
110634 int rc = -ENOMEM;
110635
110636- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
110637+ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
110638
110639 if (!ipx_proc_dir)
110640 goto out;
110641diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
110642index 2ba8b97..6d33010 100644
110643--- a/net/irda/ircomm/ircomm_tty.c
110644+++ b/net/irda/ircomm/ircomm_tty.c
110645@@ -317,11 +317,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
110646 add_wait_queue(&port->open_wait, &wait);
110647
110648 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
110649- __FILE__, __LINE__, tty->driver->name, port->count);
110650+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
110651
110652 spin_lock_irqsave(&port->lock, flags);
110653 if (!tty_hung_up_p(filp))
110654- port->count--;
110655+ atomic_dec(&port->count);
110656 port->blocked_open++;
110657 spin_unlock_irqrestore(&port->lock, flags);
110658
110659@@ -356,7 +356,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
110660 }
110661
110662 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
110663- __FILE__, __LINE__, tty->driver->name, port->count);
110664+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
110665
110666 schedule();
110667 }
110668@@ -366,12 +366,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
110669
110670 spin_lock_irqsave(&port->lock, flags);
110671 if (!tty_hung_up_p(filp))
110672- port->count++;
110673+ atomic_inc(&port->count);
110674 port->blocked_open--;
110675 spin_unlock_irqrestore(&port->lock, flags);
110676
110677 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
110678- __FILE__, __LINE__, tty->driver->name, port->count);
110679+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
110680
110681 if (!retval)
110682 port->flags |= ASYNC_NORMAL_ACTIVE;
110683@@ -445,12 +445,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
110684
110685 /* ++ is not atomic, so this should be protected - Jean II */
110686 spin_lock_irqsave(&self->port.lock, flags);
110687- self->port.count++;
110688+ atomic_inc(&self->port.count);
110689 spin_unlock_irqrestore(&self->port.lock, flags);
110690 tty_port_tty_set(&self->port, tty);
110691
110692 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
110693- self->line, self->port.count);
110694+ self->line, atomic_read(&self->port.count));
110695
110696 /* Not really used by us, but lets do it anyway */
110697 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
110698@@ -987,7 +987,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
110699 tty_kref_put(port->tty);
110700 }
110701 port->tty = NULL;
110702- port->count = 0;
110703+ atomic_set(&port->count, 0);
110704 spin_unlock_irqrestore(&port->lock, flags);
110705
110706 wake_up_interruptible(&port->open_wait);
110707@@ -1344,7 +1344,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
110708 seq_putc(m, '\n');
110709
110710 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
110711- seq_printf(m, "Open count: %d\n", self->port.count);
110712+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
110713 seq_printf(m, "Max data size: %d\n", self->max_data_size);
110714 seq_printf(m, "Max header size: %d\n", self->max_header_size);
110715
110716diff --git a/net/irda/irproc.c b/net/irda/irproc.c
110717index b9ac598..f88cc56 100644
110718--- a/net/irda/irproc.c
110719+++ b/net/irda/irproc.c
110720@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
110721 {
110722 int i;
110723
110724- proc_irda = proc_mkdir("irda", init_net.proc_net);
110725+ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
110726 if (proc_irda == NULL)
110727 return;
110728
110729diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
110730index 7a95fa4..57be196 100644
110731--- a/net/iucv/af_iucv.c
110732+++ b/net/iucv/af_iucv.c
110733@@ -686,10 +686,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
110734 {
110735 char name[12];
110736
110737- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
110738+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
110739 while (__iucv_get_sock_by_name(name)) {
110740 sprintf(name, "%08x",
110741- atomic_inc_return(&iucv_sk_list.autobind_name));
110742+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
110743 }
110744 memcpy(iucv->src_name, name, 8);
110745 }
110746diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
110747index da78793..bdd78cf 100644
110748--- a/net/iucv/iucv.c
110749+++ b/net/iucv/iucv.c
110750@@ -702,7 +702,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
110751 return NOTIFY_OK;
110752 }
110753
110754-static struct notifier_block __refdata iucv_cpu_notifier = {
110755+static struct notifier_block iucv_cpu_notifier = {
110756 .notifier_call = iucv_cpu_notify,
110757 };
110758
110759diff --git a/net/key/af_key.c b/net/key/af_key.c
110760index ba2a2f9..b658bc3 100644
110761--- a/net/key/af_key.c
110762+++ b/net/key/af_key.c
110763@@ -3052,10 +3052,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
110764 static u32 get_acqseq(void)
110765 {
110766 u32 res;
110767- static atomic_t acqseq;
110768+ static atomic_unchecked_t acqseq;
110769
110770 do {
110771- res = atomic_inc_return(&acqseq);
110772+ res = atomic_inc_return_unchecked(&acqseq);
110773 } while (!res);
110774 return res;
110775 }
110776diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
110777index 76125c5..e474828 100644
110778--- a/net/l2tp/l2tp_eth.c
110779+++ b/net/l2tp/l2tp_eth.c
110780@@ -42,12 +42,12 @@ struct l2tp_eth {
110781 struct sock *tunnel_sock;
110782 struct l2tp_session *session;
110783 struct list_head list;
110784- atomic_long_t tx_bytes;
110785- atomic_long_t tx_packets;
110786- atomic_long_t tx_dropped;
110787- atomic_long_t rx_bytes;
110788- atomic_long_t rx_packets;
110789- atomic_long_t rx_errors;
110790+ atomic_long_unchecked_t tx_bytes;
110791+ atomic_long_unchecked_t tx_packets;
110792+ atomic_long_unchecked_t tx_dropped;
110793+ atomic_long_unchecked_t rx_bytes;
110794+ atomic_long_unchecked_t rx_packets;
110795+ atomic_long_unchecked_t rx_errors;
110796 };
110797
110798 /* via l2tp_session_priv() */
110799@@ -98,10 +98,10 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
110800 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
110801
110802 if (likely(ret == NET_XMIT_SUCCESS)) {
110803- atomic_long_add(len, &priv->tx_bytes);
110804- atomic_long_inc(&priv->tx_packets);
110805+ atomic_long_add_unchecked(len, &priv->tx_bytes);
110806+ atomic_long_inc_unchecked(&priv->tx_packets);
110807 } else {
110808- atomic_long_inc(&priv->tx_dropped);
110809+ atomic_long_inc_unchecked(&priv->tx_dropped);
110810 }
110811 return NETDEV_TX_OK;
110812 }
110813@@ -111,12 +111,12 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
110814 {
110815 struct l2tp_eth *priv = netdev_priv(dev);
110816
110817- stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
110818- stats->tx_packets = atomic_long_read(&priv->tx_packets);
110819- stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
110820- stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
110821- stats->rx_packets = atomic_long_read(&priv->rx_packets);
110822- stats->rx_errors = atomic_long_read(&priv->rx_errors);
110823+ stats->tx_bytes = atomic_long_read_unchecked(&priv->tx_bytes);
110824+ stats->tx_packets = atomic_long_read_unchecked(&priv->tx_packets);
110825+ stats->tx_dropped = atomic_long_read_unchecked(&priv->tx_dropped);
110826+ stats->rx_bytes = atomic_long_read_unchecked(&priv->rx_bytes);
110827+ stats->rx_packets = atomic_long_read_unchecked(&priv->rx_packets);
110828+ stats->rx_errors = atomic_long_read_unchecked(&priv->rx_errors);
110829 return stats;
110830 }
110831
110832@@ -166,15 +166,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
110833 nf_reset(skb);
110834
110835 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
110836- atomic_long_inc(&priv->rx_packets);
110837- atomic_long_add(data_len, &priv->rx_bytes);
110838+ atomic_long_inc_unchecked(&priv->rx_packets);
110839+ atomic_long_add_unchecked(data_len, &priv->rx_bytes);
110840 } else {
110841- atomic_long_inc(&priv->rx_errors);
110842+ atomic_long_inc_unchecked(&priv->rx_errors);
110843 }
110844 return;
110845
110846 error:
110847- atomic_long_inc(&priv->rx_errors);
110848+ atomic_long_inc_unchecked(&priv->rx_errors);
110849 kfree_skb(skb);
110850 }
110851
110852diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
110853index 13752d9..b704a93 100644
110854--- a/net/l2tp/l2tp_ppp.c
110855+++ b/net/l2tp/l2tp_ppp.c
110856@@ -755,7 +755,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
110857 /* If PMTU discovery was enabled, use the MTU that was discovered */
110858 dst = sk_dst_get(tunnel->sock);
110859 if (dst != NULL) {
110860- u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock));
110861+ u32 pmtu = dst_mtu(dst);
110862+
110863 if (pmtu != 0)
110864 session->mtu = session->mru = pmtu -
110865 PPPOL2TP_HEADER_OVERHEAD;
110866diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
110867index 1a3c7e0..80f8b0c 100644
110868--- a/net/llc/llc_proc.c
110869+++ b/net/llc/llc_proc.c
110870@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
110871 int rc = -ENOMEM;
110872 struct proc_dir_entry *p;
110873
110874- llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
110875+ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
110876 if (!llc_proc_dir)
110877 goto out;
110878
110879diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
110880index 592f4b1..efa7aa9 100644
110881--- a/net/mac80211/cfg.c
110882+++ b/net/mac80211/cfg.c
110883@@ -864,7 +864,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
110884 ret = ieee80211_vif_use_channel(sdata, chandef,
110885 IEEE80211_CHANCTX_EXCLUSIVE);
110886 }
110887- } else if (local->open_count == local->monitors) {
110888+ } else if (local_read(&local->open_count) == local->monitors) {
110889 local->_oper_chandef = *chandef;
110890 ieee80211_hw_config(local, 0);
110891 }
110892@@ -3574,7 +3574,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
110893 else
110894 local->probe_req_reg--;
110895
110896- if (!local->open_count)
110897+ if (!local_read(&local->open_count))
110898 break;
110899
110900 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
110901@@ -3723,8 +3723,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
110902 if (chanctx_conf) {
110903 *chandef = chanctx_conf->def;
110904 ret = 0;
110905- } else if (local->open_count > 0 &&
110906- local->open_count == local->monitors &&
110907+ } else if (local_read(&local->open_count) > 0 &&
110908+ local_read(&local->open_count) == local->monitors &&
110909 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
110910 if (local->use_chanctx)
110911 *chandef = local->monitor_chandef;
110912diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
110913index ac9836e..32613c1 100644
110914--- a/net/mac80211/ieee80211_i.h
110915+++ b/net/mac80211/ieee80211_i.h
110916@@ -28,6 +28,7 @@
110917 #include <net/ieee80211_radiotap.h>
110918 #include <net/cfg80211.h>
110919 #include <net/mac80211.h>
110920+#include <asm/local.h>
110921 #include "key.h"
110922 #include "sta_info.h"
110923 #include "debug.h"
110924@@ -1011,7 +1012,7 @@ struct ieee80211_local {
110925 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
110926 spinlock_t queue_stop_reason_lock;
110927
110928- int open_count;
110929+ local_t open_count;
110930 int monitors, cooked_mntrs;
110931 /* number of interfaces with corresponding FIF_ flags */
110932 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
110933diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
110934index 388b863..6575b55 100644
110935--- a/net/mac80211/iface.c
110936+++ b/net/mac80211/iface.c
110937@@ -531,7 +531,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
110938 break;
110939 }
110940
110941- if (local->open_count == 0) {
110942+ if (local_read(&local->open_count) == 0) {
110943 res = drv_start(local);
110944 if (res)
110945 goto err_del_bss;
110946@@ -578,7 +578,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
110947 res = drv_add_interface(local, sdata);
110948 if (res)
110949 goto err_stop;
110950- } else if (local->monitors == 0 && local->open_count == 0) {
110951+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
110952 res = ieee80211_add_virtual_monitor(local);
110953 if (res)
110954 goto err_stop;
110955@@ -687,7 +687,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
110956 atomic_inc(&local->iff_promiscs);
110957
110958 if (coming_up)
110959- local->open_count++;
110960+ local_inc(&local->open_count);
110961
110962 if (hw_reconf_flags)
110963 ieee80211_hw_config(local, hw_reconf_flags);
110964@@ -725,7 +725,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
110965 err_del_interface:
110966 drv_remove_interface(local, sdata);
110967 err_stop:
110968- if (!local->open_count)
110969+ if (!local_read(&local->open_count))
110970 drv_stop(local);
110971 err_del_bss:
110972 sdata->bss = NULL;
110973@@ -888,7 +888,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
110974 }
110975
110976 if (going_down)
110977- local->open_count--;
110978+ local_dec(&local->open_count);
110979
110980 switch (sdata->vif.type) {
110981 case NL80211_IFTYPE_AP_VLAN:
110982@@ -949,7 +949,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
110983 }
110984 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
110985
110986- if (local->open_count == 0)
110987+ if (local_read(&local->open_count) == 0)
110988 ieee80211_clear_tx_pending(local);
110989
110990 /*
110991@@ -989,7 +989,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
110992
110993 ieee80211_recalc_ps(local, -1);
110994
110995- if (local->open_count == 0) {
110996+ if (local_read(&local->open_count) == 0) {
110997 ieee80211_stop_device(local);
110998
110999 /* no reconfiguring after stop! */
111000@@ -1000,7 +1000,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
111001 ieee80211_configure_filter(local);
111002 ieee80211_hw_config(local, hw_reconf_flags);
111003
111004- if (local->monitors == local->open_count)
111005+ if (local->monitors == local_read(&local->open_count))
111006 ieee80211_add_virtual_monitor(local);
111007 }
111008
111009diff --git a/net/mac80211/main.c b/net/mac80211/main.c
111010index d17c26d..43d6bfb 100644
111011--- a/net/mac80211/main.c
111012+++ b/net/mac80211/main.c
111013@@ -174,7 +174,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
111014 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
111015 IEEE80211_CONF_CHANGE_POWER);
111016
111017- if (changed && local->open_count) {
111018+ if (changed && local_read(&local->open_count)) {
111019 ret = drv_config(local, changed);
111020 /*
111021 * Goal:
111022diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
111023index d478b88..8c8d157 100644
111024--- a/net/mac80211/pm.c
111025+++ b/net/mac80211/pm.c
111026@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
111027 struct ieee80211_sub_if_data *sdata;
111028 struct sta_info *sta;
111029
111030- if (!local->open_count)
111031+ if (!local_read(&local->open_count))
111032 goto suspend;
111033
111034 ieee80211_scan_cancel(local);
111035@@ -58,7 +58,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
111036 cancel_work_sync(&local->dynamic_ps_enable_work);
111037 del_timer_sync(&local->dynamic_ps_timer);
111038
111039- local->wowlan = wowlan && local->open_count;
111040+ local->wowlan = wowlan && local_read(&local->open_count);
111041 if (local->wowlan) {
111042 int err = drv_suspend(local, wowlan);
111043 if (err < 0) {
111044@@ -123,7 +123,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
111045 WARN_ON(!list_empty(&local->chanctx_list));
111046
111047 /* stop hardware - this must stop RX */
111048- if (local->open_count)
111049+ if (local_read(&local->open_count))
111050 ieee80211_stop_device(local);
111051
111052 suspend:
111053diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
111054index 8fdadfd..a4f72b8 100644
111055--- a/net/mac80211/rate.c
111056+++ b/net/mac80211/rate.c
111057@@ -720,7 +720,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
111058
111059 ASSERT_RTNL();
111060
111061- if (local->open_count)
111062+ if (local_read(&local->open_count))
111063 return -EBUSY;
111064
111065 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
111066diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
111067index 6ff1346..936ca9a 100644
111068--- a/net/mac80211/rc80211_pid_debugfs.c
111069+++ b/net/mac80211/rc80211_pid_debugfs.c
111070@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
111071
111072 spin_unlock_irqrestore(&events->lock, status);
111073
111074- if (copy_to_user(buf, pb, p))
111075+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
111076 return -EFAULT;
111077
111078 return p;
111079diff --git a/net/mac80211/util.c b/net/mac80211/util.c
111080index a6cda52..f3b6776 100644
111081--- a/net/mac80211/util.c
111082+++ b/net/mac80211/util.c
111083@@ -1548,7 +1548,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
111084 }
111085 #endif
111086 /* everything else happens only if HW was up & running */
111087- if (!local->open_count)
111088+ if (!local_read(&local->open_count))
111089 goto wake_up;
111090
111091 /*
111092@@ -1772,7 +1772,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
111093 local->in_reconfig = false;
111094 barrier();
111095
111096- if (local->monitors == local->open_count && local->monitors > 0)
111097+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
111098 ieee80211_add_virtual_monitor(local);
111099
111100 /*
111101diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
111102index e9410d1..77b6378 100644
111103--- a/net/netfilter/Kconfig
111104+++ b/net/netfilter/Kconfig
111105@@ -1081,6 +1081,16 @@ config NETFILTER_XT_MATCH_ESP
111106
111107 To compile it as a module, choose M here. If unsure, say N.
111108
111109+config NETFILTER_XT_MATCH_GRADM
111110+ tristate '"gradm" match support'
111111+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
111112+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
111113+ ---help---
111114+ The gradm match allows to match on grsecurity RBAC being enabled.
111115+ It is useful when iptables rules are applied early on bootup to
111116+ prevent connections to the machine (except from a trusted host)
111117+ while the RBAC system is disabled.
111118+
111119 config NETFILTER_XT_MATCH_HASHLIMIT
111120 tristate '"hashlimit" match support'
111121 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
111122diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
111123index bffdad7..f9317d1 100644
111124--- a/net/netfilter/Makefile
111125+++ b/net/netfilter/Makefile
111126@@ -133,6 +133,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
111127 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
111128 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
111129 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
111130+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
111131 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
111132 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
111133 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
111134diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
111135index ec8114f..6b2bfba 100644
111136--- a/net/netfilter/ipset/ip_set_core.c
111137+++ b/net/netfilter/ipset/ip_set_core.c
111138@@ -1921,7 +1921,7 @@ done:
111139 return ret;
111140 }
111141
111142-static struct nf_sockopt_ops so_set __read_mostly = {
111143+static struct nf_sockopt_ops so_set = {
111144 .pf = PF_INET,
111145 .get_optmin = SO_IP_SET,
111146 .get_optmax = SO_IP_SET + 1,
111147diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
111148index 610e19c..08d0c3f 100644
111149--- a/net/netfilter/ipvs/ip_vs_conn.c
111150+++ b/net/netfilter/ipvs/ip_vs_conn.c
111151@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
111152 /* Increase the refcnt counter of the dest */
111153 ip_vs_dest_hold(dest);
111154
111155- conn_flags = atomic_read(&dest->conn_flags);
111156+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
111157 if (cp->protocol != IPPROTO_UDP)
111158 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
111159 flags = cp->flags;
111160@@ -899,7 +899,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
111161
111162 cp->control = NULL;
111163 atomic_set(&cp->n_control, 0);
111164- atomic_set(&cp->in_pkts, 0);
111165+ atomic_set_unchecked(&cp->in_pkts, 0);
111166
111167 cp->packet_xmit = NULL;
111168 cp->app = NULL;
111169@@ -1187,7 +1187,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
111170
111171 /* Don't drop the entry if its number of incoming packets is not
111172 located in [0, 8] */
111173- i = atomic_read(&cp->in_pkts);
111174+ i = atomic_read_unchecked(&cp->in_pkts);
111175 if (i > 8 || i < 0) return 0;
111176
111177 if (!todrop_rate[i]) return 0;
111178diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
111179index e683675..67cb16b 100644
111180--- a/net/netfilter/ipvs/ip_vs_core.c
111181+++ b/net/netfilter/ipvs/ip_vs_core.c
111182@@ -567,7 +567,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
111183 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
111184 /* do not touch skb anymore */
111185
111186- atomic_inc(&cp->in_pkts);
111187+ atomic_inc_unchecked(&cp->in_pkts);
111188 ip_vs_conn_put(cp);
111189 return ret;
111190 }
111191@@ -1711,7 +1711,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
111192 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
111193 pkts = sysctl_sync_threshold(ipvs);
111194 else
111195- pkts = atomic_add_return(1, &cp->in_pkts);
111196+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
111197
111198 if (ipvs->sync_state & IP_VS_STATE_MASTER)
111199 ip_vs_sync_conn(net, cp, pkts);
111200diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
111201index 581a658..910e112 100644
111202--- a/net/netfilter/ipvs/ip_vs_ctl.c
111203+++ b/net/netfilter/ipvs/ip_vs_ctl.c
111204@@ -794,7 +794,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
111205 */
111206 ip_vs_rs_hash(ipvs, dest);
111207 }
111208- atomic_set(&dest->conn_flags, conn_flags);
111209+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
111210
111211 /* bind the service */
111212 old_svc = rcu_dereference_protected(dest->svc, 1);
111213@@ -1654,7 +1654,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
111214 * align with netns init in ip_vs_control_net_init()
111215 */
111216
111217-static struct ctl_table vs_vars[] = {
111218+static ctl_table_no_const vs_vars[] __read_only = {
111219 {
111220 .procname = "amemthresh",
111221 .maxlen = sizeof(int),
111222@@ -2075,7 +2075,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
111223 " %-7s %-6d %-10d %-10d\n",
111224 &dest->addr.in6,
111225 ntohs(dest->port),
111226- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
111227+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
111228 atomic_read(&dest->weight),
111229 atomic_read(&dest->activeconns),
111230 atomic_read(&dest->inactconns));
111231@@ -2086,7 +2086,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
111232 "%-7s %-6d %-10d %-10d\n",
111233 ntohl(dest->addr.ip),
111234 ntohs(dest->port),
111235- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
111236+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
111237 atomic_read(&dest->weight),
111238 atomic_read(&dest->activeconns),
111239 atomic_read(&dest->inactconns));
111240@@ -2564,7 +2564,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
111241
111242 entry.addr = dest->addr.ip;
111243 entry.port = dest->port;
111244- entry.conn_flags = atomic_read(&dest->conn_flags);
111245+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
111246 entry.weight = atomic_read(&dest->weight);
111247 entry.u_threshold = dest->u_threshold;
111248 entry.l_threshold = dest->l_threshold;
111249@@ -3107,7 +3107,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
111250 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
111251 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
111252 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
111253- (atomic_read(&dest->conn_flags) &
111254+ (atomic_read_unchecked(&dest->conn_flags) &
111255 IP_VS_CONN_F_FWD_MASK)) ||
111256 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
111257 atomic_read(&dest->weight)) ||
111258@@ -3697,7 +3697,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
111259 {
111260 int idx;
111261 struct netns_ipvs *ipvs = net_ipvs(net);
111262- struct ctl_table *tbl;
111263+ ctl_table_no_const *tbl;
111264
111265 atomic_set(&ipvs->dropentry, 0);
111266 spin_lock_init(&ipvs->dropentry_lock);
111267diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
111268index 547ff33..c8c8117 100644
111269--- a/net/netfilter/ipvs/ip_vs_lblc.c
111270+++ b/net/netfilter/ipvs/ip_vs_lblc.c
111271@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
111272 * IPVS LBLC sysctl table
111273 */
111274 #ifdef CONFIG_SYSCTL
111275-static struct ctl_table vs_vars_table[] = {
111276+static ctl_table_no_const vs_vars_table[] __read_only = {
111277 {
111278 .procname = "lblc_expiration",
111279 .data = NULL,
111280diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
111281index 3f21a2f..a112e85 100644
111282--- a/net/netfilter/ipvs/ip_vs_lblcr.c
111283+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
111284@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
111285 * IPVS LBLCR sysctl table
111286 */
111287
111288-static struct ctl_table vs_vars_table[] = {
111289+static ctl_table_no_const vs_vars_table[] __read_only = {
111290 {
111291 .procname = "lblcr_expiration",
111292 .data = NULL,
111293diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
111294index db80126..ef7110e 100644
111295--- a/net/netfilter/ipvs/ip_vs_sync.c
111296+++ b/net/netfilter/ipvs/ip_vs_sync.c
111297@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
111298 cp = cp->control;
111299 if (cp) {
111300 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
111301- pkts = atomic_add_return(1, &cp->in_pkts);
111302+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
111303 else
111304 pkts = sysctl_sync_threshold(ipvs);
111305 ip_vs_sync_conn(net, cp->control, pkts);
111306@@ -771,7 +771,7 @@ control:
111307 if (!cp)
111308 return;
111309 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
111310- pkts = atomic_add_return(1, &cp->in_pkts);
111311+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
111312 else
111313 pkts = sysctl_sync_threshold(ipvs);
111314 goto sloop;
111315@@ -895,7 +895,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
111316
111317 if (opt)
111318 memcpy(&cp->in_seq, opt, sizeof(*opt));
111319- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
111320+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
111321 cp->state = state;
111322 cp->old_state = cp->state;
111323 /*
111324diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
111325index 73ba1cc..1adfc7a 100644
111326--- a/net/netfilter/ipvs/ip_vs_xmit.c
111327+++ b/net/netfilter/ipvs/ip_vs_xmit.c
111328@@ -1102,7 +1102,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
111329 else
111330 rc = NF_ACCEPT;
111331 /* do not touch skb anymore */
111332- atomic_inc(&cp->in_pkts);
111333+ atomic_inc_unchecked(&cp->in_pkts);
111334 goto out;
111335 }
111336
111337@@ -1194,7 +1194,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
111338 else
111339 rc = NF_ACCEPT;
111340 /* do not touch skb anymore */
111341- atomic_inc(&cp->in_pkts);
111342+ atomic_inc_unchecked(&cp->in_pkts);
111343 goto out;
111344 }
111345
111346diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
111347index a4b5e2a..13b1de3 100644
111348--- a/net/netfilter/nf_conntrack_acct.c
111349+++ b/net/netfilter/nf_conntrack_acct.c
111350@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
111351 #ifdef CONFIG_SYSCTL
111352 static int nf_conntrack_acct_init_sysctl(struct net *net)
111353 {
111354- struct ctl_table *table;
111355+ ctl_table_no_const *table;
111356
111357 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
111358 GFP_KERNEL);
111359diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
111360index 1f4f954..e364ad7 100644
111361--- a/net/netfilter/nf_conntrack_core.c
111362+++ b/net/netfilter/nf_conntrack_core.c
111363@@ -1789,6 +1789,10 @@ void nf_conntrack_init_end(void)
111364 #define DYING_NULLS_VAL ((1<<30)+1)
111365 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
111366
111367+#ifdef CONFIG_GRKERNSEC_HIDESYM
111368+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
111369+#endif
111370+
111371 int nf_conntrack_init_net(struct net *net)
111372 {
111373 int ret = -ENOMEM;
111374@@ -1814,7 +1818,11 @@ int nf_conntrack_init_net(struct net *net)
111375 if (!net->ct.stat)
111376 goto err_pcpu_lists;
111377
111378+#ifdef CONFIG_GRKERNSEC_HIDESYM
111379+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
111380+#else
111381 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
111382+#endif
111383 if (!net->ct.slabname)
111384 goto err_slabname;
111385
111386diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
111387index 1df1761..ce8b88a 100644
111388--- a/net/netfilter/nf_conntrack_ecache.c
111389+++ b/net/netfilter/nf_conntrack_ecache.c
111390@@ -188,7 +188,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
111391 #ifdef CONFIG_SYSCTL
111392 static int nf_conntrack_event_init_sysctl(struct net *net)
111393 {
111394- struct ctl_table *table;
111395+ ctl_table_no_const *table;
111396
111397 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
111398 GFP_KERNEL);
111399diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
111400index 5b3eae7..dd4b8fe 100644
111401--- a/net/netfilter/nf_conntrack_helper.c
111402+++ b/net/netfilter/nf_conntrack_helper.c
111403@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
111404
111405 static int nf_conntrack_helper_init_sysctl(struct net *net)
111406 {
111407- struct ctl_table *table;
111408+ ctl_table_no_const *table;
111409
111410 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
111411 GFP_KERNEL);
111412diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
111413index b65d586..beec902 100644
111414--- a/net/netfilter/nf_conntrack_proto.c
111415+++ b/net/netfilter/nf_conntrack_proto.c
111416@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
111417
111418 static void
111419 nf_ct_unregister_sysctl(struct ctl_table_header **header,
111420- struct ctl_table **table,
111421+ ctl_table_no_const **table,
111422 unsigned int users)
111423 {
111424 if (users > 0)
111425diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
111426index f641751..d3c5b51 100644
111427--- a/net/netfilter/nf_conntrack_standalone.c
111428+++ b/net/netfilter/nf_conntrack_standalone.c
111429@@ -471,7 +471,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
111430
111431 static int nf_conntrack_standalone_init_sysctl(struct net *net)
111432 {
111433- struct ctl_table *table;
111434+ ctl_table_no_const *table;
111435
111436 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
111437 GFP_KERNEL);
111438diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
111439index 7a394df..bd91a8a 100644
111440--- a/net/netfilter/nf_conntrack_timestamp.c
111441+++ b/net/netfilter/nf_conntrack_timestamp.c
111442@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
111443 #ifdef CONFIG_SYSCTL
111444 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
111445 {
111446- struct ctl_table *table;
111447+ ctl_table_no_const *table;
111448
111449 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
111450 GFP_KERNEL);
111451diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
111452index 85296d4..8becdec 100644
111453--- a/net/netfilter/nf_log.c
111454+++ b/net/netfilter/nf_log.c
111455@@ -243,7 +243,7 @@ static const struct file_operations nflog_file_ops = {
111456
111457 #ifdef CONFIG_SYSCTL
111458 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
111459-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
111460+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
111461
111462 static int nf_log_proc_dostring(struct ctl_table *table, int write,
111463 void __user *buffer, size_t *lenp, loff_t *ppos)
111464@@ -274,14 +274,16 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
111465 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
111466 mutex_unlock(&nf_log_mutex);
111467 } else {
111468+ ctl_table_no_const nf_log_table = *table;
111469+
111470 mutex_lock(&nf_log_mutex);
111471 logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
111472 lockdep_is_held(&nf_log_mutex));
111473 if (!logger)
111474- table->data = "NONE";
111475+ nf_log_table.data = "NONE";
111476 else
111477- table->data = logger->name;
111478- r = proc_dostring(table, write, buffer, lenp, ppos);
111479+ nf_log_table.data = logger->name;
111480+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
111481 mutex_unlock(&nf_log_mutex);
111482 }
111483
111484diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
111485index f042ae5..30ea486 100644
111486--- a/net/netfilter/nf_sockopt.c
111487+++ b/net/netfilter/nf_sockopt.c
111488@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
111489 }
111490 }
111491
111492- list_add(&reg->list, &nf_sockopts);
111493+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
111494 out:
111495 mutex_unlock(&nf_sockopt_mutex);
111496 return ret;
111497@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
111498 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
111499 {
111500 mutex_lock(&nf_sockopt_mutex);
111501- list_del(&reg->list);
111502+ pax_list_del((struct list_head *)&reg->list);
111503 mutex_unlock(&nf_sockopt_mutex);
111504 }
111505 EXPORT_SYMBOL(nf_unregister_sockopt);
111506diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
111507index d292c8d..9f1e166 100644
111508--- a/net/netfilter/nfnetlink_log.c
111509+++ b/net/netfilter/nfnetlink_log.c
111510@@ -79,7 +79,7 @@ static int nfnl_log_net_id __read_mostly;
111511 struct nfnl_log_net {
111512 spinlock_t instances_lock;
111513 struct hlist_head instance_table[INSTANCE_BUCKETS];
111514- atomic_t global_seq;
111515+ atomic_unchecked_t global_seq;
111516 };
111517
111518 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
111519@@ -561,7 +561,7 @@ __build_packet_message(struct nfnl_log_net *log,
111520 /* global sequence number */
111521 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
111522 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
111523- htonl(atomic_inc_return(&log->global_seq))))
111524+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
111525 goto nla_put_failure;
111526
111527 if (data_len) {
111528diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
111529index 1840989..6895744 100644
111530--- a/net/netfilter/nft_compat.c
111531+++ b/net/netfilter/nft_compat.c
111532@@ -225,7 +225,7 @@ target_dump_info(struct sk_buff *skb, const struct xt_target *t, const void *in)
111533 /* We want to reuse existing compat_to_user */
111534 old_fs = get_fs();
111535 set_fs(KERNEL_DS);
111536- t->compat_to_user(out, in);
111537+ t->compat_to_user((void __force_user *)out, in);
111538 set_fs(old_fs);
111539 ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), out);
111540 kfree(out);
111541@@ -421,7 +421,7 @@ match_dump_info(struct sk_buff *skb, const struct xt_match *m, const void *in)
111542 /* We want to reuse existing compat_to_user */
111543 old_fs = get_fs();
111544 set_fs(KERNEL_DS);
111545- m->compat_to_user(out, in);
111546+ m->compat_to_user((void __force_user *)out, in);
111547 set_fs(old_fs);
111548 ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), out);
111549 kfree(out);
111550diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c
111551index bbffdbda..12d4da8 100644
111552--- a/net/netfilter/xt_bpf.c
111553+++ b/net/netfilter/xt_bpf.c
111554@@ -23,11 +23,10 @@ MODULE_ALIAS("ip6t_bpf");
111555 static int bpf_mt_check(const struct xt_mtchk_param *par)
111556 {
111557 struct xt_bpf_info *info = par->matchinfo;
111558- struct sock_fprog_kern program;
111559+ struct sock_fprog program;
111560
111561 program.len = info->bpf_program_num_elem;
111562- program.filter = info->bpf_program;
111563-
111564+ program.filter = (struct sock_filter __user *) info->bpf_program;
111565 if (sk_unattached_filter_create(&info->filter, &program)) {
111566 pr_info("bpf: check failed: parse error\n");
111567 return -EINVAL;
111568diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
111569new file mode 100644
111570index 0000000..c566332
111571--- /dev/null
111572+++ b/net/netfilter/xt_gradm.c
111573@@ -0,0 +1,51 @@
111574+/*
111575+ * gradm match for netfilter
111576